metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jprochazk/conan-center-index",
"score": 2
} |
#### File: entt/3.x.x/conanfile.py
```python
import os
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
class EnttConan(ConanFile):
name = "entt"
description = "Gaming meets modern C++ - a fast and reliable entity-component system (ECS) and much more"
topics = ("conan," "entt", "gaming", "entity", "ecs")
homepage = "https://github.com/skypjack/entt"
url = "https://github.com/conan-io/conan-center-index"
license = "MIT"
no_copy_source = True
settings = "os", "compiler", "build_type", "arch"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def configure(self):
minimal_cpp_standard = "17"
if self.settings.compiler.cppstd:
tools.check_min_cppstd(self, minimal_cpp_standard)
minimal_version = {
"Visual Studio": "15.9",
"gcc": "7",
"clang": "5",
"apple-clang": "10"
}
compiler = str(self.settings.compiler)
if compiler not in minimal_version:
self.output.warn(
"%s recipe lacks information about the %s compiler standard version support" % (self.name, compiler))
self.output.warn(
"%s requires a compiler that supports at least C++%s" % (self.name, minimal_cpp_standard))
return
version = tools.Version(self.settings.compiler.version)
if version < minimal_version[compiler]:
raise ConanInvalidConfiguration(
"%s requires a compiler that supports at least C++%s" % (self.name, minimal_cpp_standard))
def source(self):
tools.get(**self.conan_data["sources"][self.version])
os.rename(self.name + "-" + self.version, self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["USE_LIBCPP"] = False
self._cmake.definitions["USE_ASAN"] = False
self._cmake.definitions["BUILD_TESTING"] = False
self._cmake.definitions["BUILD_DOCS"] = False
self._cmake.configure(
source_folder=self._source_subfolder,
build_folder=self._build_subfolder
)
return self._cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="LICENSE", dst="licenses",
src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "cmake"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
def package_id(self):
self.info.header_only()
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "EnTT"
self.cpp_info.names["cmake_find_package_multi"] = "EnTT"
``` |
{
"source": "jproddy/lol-stats-online",
"score": 3
} |
#### File: lol-stats-online/lol_online/api.py
```python
from flask import current_app as app
from flask_restful import Api, Resource
from lol_online.db import get_db
from . import table_manipulation
api = Api(app)
class HelloApi(Resource):
def get(self):
return {'data': 'hello!'}
class AllGames(Resource):
def get(self):
db = get_db()
data = db.execute('SELECT * FROM games').fetchall()
return {row['game_id']: {k: v for k, v in dict(row).items() if k != 'game_id'} for row in data}
class AllGamesLimited(Resource):
def get(self, limit):
db = get_db()
data = db.execute('SELECT * FROM games LIMIT ?', [limit]).fetchall()
return {row['game_id']: {k: v for k, v in dict(row).items() if k != 'game_id'} for row in data}
class AllPlayers(Resource):
def get(self):
db = get_db()
data = db.execute('SELECT * FROM players').fetchall()
return {i: dict(row) for i, row in enumerate(data)}
class AllPlayersLimited(Resource):
def get(self, limit):
db = get_db()
data = db.execute('SELECT * FROM players LIMIT ?', [limit]).fetchall()
return {i: dict(row) for i, row in enumerate(data)}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class PlayerStatsInternal(Resource):
def get(self, username):
return table_manipulation.generate_player_stats(username, True)
class PlayerStatsExternal(Resource):
def get(self, username):
return table_manipulation.generate_player_stats(username, False)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
api.add_resource(HelloApi, '/hello_api')
api.add_resource(AllGames, '/all_games')
api.add_resource(AllGamesLimited, '/all_games/<limit>')
api.add_resource(AllPlayers, '/all_players')
api.add_resource(AllPlayersLimited, '/all_players/<limit>')
api.add_resource(PlayerStatsInternal, '/player_stats_internal/<username>')
api.add_resource(PlayerStatsExternal, '/player_stats_external/<username>')
```
#### File: lol-stats-online/lol_online/riot_api.py
```python
import pandas as pd
import time
from tqdm import tqdm
from riotwatcher import LolWatcher, ApiError
from .api_key import API_KEY
REGION = 'na1'
def get_account_id(account_name):
watcher = LolWatcher(API_KEY)
account = watcher.summoner.by_name(REGION, account_name)
return account['accountId']
def get_matchlist(account_id, region=REGION):
''' retrieves list of all matches for the given account id and returns as a dataframe '''
watcher = LolWatcher(API_KEY)
matches = []
i = 0
# queue ids limit the games to 5v5 sr (norms, draft, flex, soloq, clash)
valid_queue_ids = [400, 420, 430, 440, 700]
print('fetching matchlist:')
pbar = tqdm(total=float('inf'), mininterval=0.1)
while True:
try:
match = watcher.match.matchlist_by_account(
region,
account_id,
queue=valid_queue_ids,
begin_index=100*i
)
if match['matches']:
matches.append(match)
i += 1
time.sleep(.1)
pbar.update(len(match['matches']))
else:
break
except:
pass
all_matches = [m for match in matches for m in match['matches']]
pbar.total = len(all_matches)
pbar.close()
df = pd.DataFrame(all_matches)
df.rename({'timestamp':'creation', 'gameId': 'game_id'}, axis=1, inplace=True)
df.set_index('game_id', drop=False, inplace=True)
df.drop(['season', 'role', 'lane', 'platformId', 'champion'], axis=1, inplace=True)
return df
def get_timelines(game_ids, region=REGION):
''' retrieves detailed reports of all match timelines in the given matchlist and returns as a dataframe '''
watcher = LolWatcher(API_KEY)
timelines = []
game_ids_success = []
failed = []
print('fetching timelines:')
for i, game_id in enumerate(tqdm(game_ids)):
for _ in range(3):
try:
timelines.append(watcher.match.timeline_by_match(region, game_id))
game_ids_success.append(game_id)
break
except:
time.sleep(1.5)
else:
failed.append(game_id)
time.sleep(1.5)
if failed:
print('game ids failed:', failed)
df_tl = pd.DataFrame(timelines, index=game_ids_success)
df_tl.index.rename('game_id', inplace=True)
df_tl.sort_index(inplace=True)
return df_tl
def get_forfeits(df, region=REGION):
'''return a series containing if each game in df was forfeit or finished normally'''
df_tl = get_timelines(df.game_id, region)
df_tl['winner'] = df.winner
return df_tl.apply(lambda x: extract_forfeit_from_frames(x.frames, x.winner), axis=1)
def extract_forfeit_from_frames(frames, winner):
'''uses timeline frames to determine if a given game was forfeit or finished normally'''
nexus_turrets = {1748: False, 2177: False, 12611: False, 13052: False}
for frame in frames:
for event in frame['events']:
if event['type'] == 'BUILDING_KILL' and event['towerType'] == 'NEXUS_TURRET':
nexus_turrets[event['position']['x']] = True
blue_nexus_turrets_destroyed = nexus_turrets[1748] and nexus_turrets[2177]
red_nexus_turrets_destroyed = nexus_turrets[12611] and nexus_turrets[13052]
forfeit = not ((red_nexus_turrets_destroyed & (winner == 100)) | (blue_nexus_turrets_destroyed & (winner == 200)))
return int(forfeit)
def get_matches(df, region=REGION):
'''collects games from riot api'''
watcher = LolWatcher(API_KEY)
matches = []
game_ids_success = []
failed = []
print('fetching matches:')
for i, game_id in enumerate(tqdm(df.game_id)):
for _ in range(3):
try:
matches.append(watcher.match.by_id(region, game_id))
game_ids_success.append(game_id)
break
except:
time.sleep(1.5)
else:
failed.append(game_id)
time.sleep(1.5)
if failed:
print('game ids failed:', failed)
df_m = pd.DataFrame(matches)
df_m.rename(columns={'gameId': 'game_id'}, inplace=True)
df_m.set_index('game_id', drop=False, inplace=True)
return df_m
def filter_remakes(df):
'''returns tuple (full-length games, remakes). cutoff set to 300 s'''
if 'duration' in df.columns:
remake_mask = df.duration > 300
elif 'gameDuration' in df.columns:
remake_mask = df.gameDuration > 300
return df[remake_mask], df[~remake_mask]
``` |
{
"source": "jproffitt/django-react-templatetags",
"score": 2
} |
#### File: django-react-templatetags/django_react_templatetags/context_processors.py
```python
import warnings
def react_context_processor(request):
"""Expose a global list of react components to be processed"""
warnings.warn(
"react_context_processor is no longer required.", DeprecationWarning
)
return {
'REACT_COMPONENTS': [],
}
``` |
{
"source": "jprof/jrnl",
"score": 3
} |
#### File: jrnl/plugins/text_exporter.py
```python
from __future__ import absolute_import, unicode_literals
import codecs
from ..util import u, slugify
import os
from ..util import ERROR_COLOR, RESET_COLOR
class TextExporter(object):
"""This Exporter can convert entries and journals into text files."""
names = ["text", "txt"]
extension = "txt"
@classmethod
def export_entry(cls, entry):
"""Returns a unicode representation of a single entry."""
return entry.__unicode__()
@classmethod
def export_journal(cls, journal):
"""Returns a unicode representation of an entire journal."""
return "\n".join(cls.export_entry(entry) for entry in journal)
@classmethod
def write_file(cls, journal, path):
"""Exports a journal into a single file."""
try:
with codecs.open(path, "w", "utf-8") as f:
f.write(cls.export_journal(journal))
return "[Journal exported to {0}]".format(path)
except IOError as e:
return "[{2}ERROR{3}: {0} {1}]".format(e.filename, e.strerror, ERROR_COLOR, RESET_COLOR)
@classmethod
def make_filename(cls, entry):
return entry.date.strftime("%Y-%m-%d_{0}.{1}".format(slugify(u(entry.title)), cls.extension))
@classmethod
def write_files(cls, journal, path):
"""Exports a journal into individual files for each entry."""
for entry in journal.entries:
try:
full_path = os.path.join(path, cls.make_filename(entry))
with codecs.open(full_path, "w", "utf-8") as f:
f.write(cls.export_entry(entry))
except IOError as e:
return "[{2}ERROR{3}: {0} {1}]".format(e.filename, e.strerror, ERROR_COLOR, RESET_COLOR)
return "[Journal exported to {0}]".format(path)
@classmethod
def export(cls, journal, output=None):
"""Exports to individual files if output is an existing path, or into
a single file if output is a file name, or returns the exporter's
representation as unicode if output is None."""
if output and os.path.isdir(output): # multiple files
return cls.write_files(journal, output)
elif output: # single file
return cls.write_file(journal, output)
else:
return cls.export_journal(journal)
``` |
{
"source": "JPRolfe/AutoClicker",
"score": 2
} |
#### File: JPRolfe/AutoClicker/CurrentWorking.py
```python
from ctypes import string_at
from multiprocessing import RawArray, process
from tkinter import *
from tkinter import messagebox
from tkinter import IntVar
import time
import pyautogui
import win32api, win32con, win32gui
import datetime
import secrets
import keyboard
from pynput import keyboard
from pynput.keyboard import Key, Listener, KeyCode
from multiprocessing import Process, Queue, Value
# Collect events until released
s_x = None
s_y = None
e_x = None
e_y = None
millisecond_lower = None
millisecond_higher = None
running = False
direction = None
rawKey = None
finalKey = None
globalKey = None
finiteReset = Queue()
ClickCounter = Queue()
finiteReset.put(False)
beginningVariable = True
lastOption = None
counterVariable = Value('i', 0)
class Application():
def __init__(self, master):
self.master = master
self.rect = None
self.x = self.y = 0
self.start_x = None
self.start_y = None
self.curX = None
self.curY = None
self.switch = False
#root.configure(background = 'red')
#root.attributes("-transparentcolor","red")
#root.attributes("-transparent", "blue")
root.geometry('900x480+1700+800') # set new geometry
root.title('Random Mouse Clicker')
self.menu_frame = Frame(master)
self.menu_frame.pack(expand=YES, fill=BOTH)
self.menu_frame.columnconfigure((0,1), weight=0, pad=10)
self.bottommainframe = Frame(master)
self.bottommainframe.pack(side="bottom", expand=YES, fill=BOTH)
self.topmainframe = Frame(master)
self.topmainframe.pack(side="top", expand=YES, fill=BOTH)
self.leftframe = Frame(self.topmainframe)
self.leftframe.pack(side="left", expand=YES, fill=BOTH)
self.rightframe = Frame(self.topmainframe)
self.rightframe.pack(side="right", expand=YES, fill=BOTH)
self.framebuttons = Frame(self.rightframe)
self.framebuttons.pack(side="right", expand=YES, fill=BOTH)
self.labelframe_area = LabelFrame(self.framebuttons, highlightthickness=0, text="Set Area", )
self.labelframe_area.pack(side="top", padx=20, expand=True, fill=BOTH)
self.labelframe_click = LabelFrame(self.leftframe, highlightthickness=0, text="Autoclick")
self.labelframe_click.pack(padx=20, expand=YES, fill=BOTH)
self.labelframe_keybind = LabelFrame(self.framebuttons, highlightthickness=0, text="Set Keybind")
self.labelframe_keybind.pack(side="bottom", padx=20, pady=20, expand=YES, fill=BOTH)
self.labelframe_delay = LabelFrame(self.leftframe, highlightthickness=0, text="Set Delay")
self.labelframe_delay.pack(expand=YES, fill=BOTH, pady=20, padx=20)
self.setupforsetup = Frame(self.bottommainframe)
self.setupforsetup.pack(side="left", expand=YES, fill=BOTH)
self.labelframe_setup = LabelFrame(self.setupforsetup, highlightthickness=0, text="Current Setup", )
self.labelframe_setup.pack(padx=20,fill=BOTH)
self.setupbox1 = Frame(self.labelframe_setup)
self.setupbox1.pack(side="left")
self.setupbox2 = Frame(self.labelframe_setup)
self.setupbox2.pack(side="left", padx=3)
self.setupbox3 = Frame(self.labelframe_setup)
self.setupbox3.pack(side="left", padx=3, pady=10)
self.keybindtext = StringVar()
self.keybindtext.set("None")
self.keybindDir = Label(self.setupbox1, textvariable=self.keybindtext, bg="white")
self.keybindDir.pack(side="bottom", fill=X)
self.keybindlabel = Label(self.setupbox1, text="Current Keybind Allocated", height=1)
self.keybindlabel.pack(side="top")
self.delaytext = StringVar()
self.delaytext.set("None")
self.delayDir = Label(self.setupbox2, textvariable=self.delaytext, bg="white")
self.delayDir.pack(side="bottom", fill=X)
self.delaylabel = Label(self.setupbox2, text="Current Delay Allocated", height=1)
self.delaylabel.pack(side="top")
self.clickrvar = IntVar()
self.clickrvar.set(0)
self.clickrtext = StringVar
#self.clickrtext.set("None")
#print(str(self.clickrtext.value))
self.clickrDir = Label(self.setupbox3, text=str(self.clickrvar), bg="white")
self.clickrDir.pack(side="bottom", fill=X)
self.clickrlabel = Label(self.setupbox3, text="Clicks Remaining", height=1)
self.clickrlabel.pack(side="top")
self.frameMinButton = Frame(self.bottommainframe)
self.frameMinButton.pack(side="right", fill=BOTH, expand=YES)
self.minimizebutton = Button(self.frameMinButton, text = "Mimimize", command=self.minim, background='lightgrey')
self.minimizebutton.pack(pady=10, fill=Y, expand=YES)
self.snipButton = Button(self.labelframe_area, text="Set Click Area", command=self.createScreenCanvas)
self.snipButton.pack(pady=20)
self.labeltext1 = StringVar()
self.labeltext1.set("Lower Limit Value")
self.labelDir = Label(self.labelframe_click, textvariable=self.labeltext1, height=1)
self.labelDir.grid(row=0,column=0)
self.labeltext2 = StringVar()
self.labeltext2.set("Upper Limit Value")
self.labelDir = Label(self.labelframe_click, textvariable=self.labeltext2, height=2)
self.labelDir.grid(row=1,column=0)
self.rangetxtbelow = Entry(self.labelframe_click, width=10)
self.rangetxtbelow.bind("<KeyRelease>", self.updateDelaySetup)
self.rangetxtbelow.bind("<FocusOut>", self.checkDelayLower)
#self.rangetxtbelow.pack(expand=YES)
self.clicked = StringVar()
self.clicked.set("Milliseconds")
self.secondscale = OptionMenu(self.labelframe_click, self.clicked, "Seconds", "Milliseconds", command=self.updateDelaySetup)
self.myButton = Button(self.labelframe_keybind, text="Assign", command=self.assignKey)
self.myButton.grid(row=0, column=1, padx=2)
self.myButton = Button(self.labelframe_keybind, text="Clear", command=self.clearKey)
self.myButton.grid(row=0, column=2, padx=2)
self.rangetextabove = Entry(self.labelframe_click, width=10)
self.rangetextabove.bind("<KeyRelease>", self.updateDelaySetup)
self.rangetextabove.bind("<FocusOut>", self.checkDelayUpper)
#self.rangetextabove.pack(expand=YES)
self.rangetextabove.grid(row=1, column=2)
self.rangetxtbelow.grid(row=0, column=2)
self.secondscale.grid(row=0, column=4)
self.ClickDelayLabelFrame = LabelFrame(self.labelframe_delay, text="Click Option")
self.ClickDelayLabelFrame.pack(padx=10, side="left", fill=X)
self.option = StringVar()
self.option2 = StringVar()
self.endlessclicking = Radiobutton(self.ClickDelayLabelFrame, command=self.enableRange, text="Endless Clicking", value="endless", var=self.option)
self.finiteclicking = Radiobutton(self.ClickDelayLabelFrame, command=self.enableFiniteRange, text= "Finite Clicking", value="finite", var=self.option)
self.option.set('endless')
self.numberofclicks = Entry(self.ClickDelayLabelFrame, width=10, state='disabled')
self.numberofclicks.bind("<KeyRelease>", self.updateClickSetup)
self.numberofclicks.grid(row=2, column=8, padx=15)
self.PressReleaseLabel = LabelFrame(self.labelframe_delay, text="Press & Release Click Delay")
self.PressReleaseLabel.pack(padx=10, side="left", fill=X)
self.PressReleaseDelayOn = Radiobutton(self.PressReleaseLabel, command=self.enableDelayOn, text="On", value="On", var=self.option2)
self.PressReleaseDelayOff = Radiobutton(self.PressReleaseLabel, command=self.enableDelayOff, text= "Off", value="Off", var=self.option2)
self.option2.set('On')
self.clickReleaseDelay = Entry(self.PressReleaseLabel, width=10, state='disabled')
self.clickReleaseDelay.bind("<KeyRelease>", self.updateClickDelaySetup)
self.clickReleaseDelay.grid(row=2, column=8)
self.PressReleaseDelayOn.grid(row = 2, column=1)
self.PressReleaseDelayOff.grid(row=1, column=1)
self.finiteclicking.grid(row = 2, column=6)
self.endlessclicking.grid(row=1, column=6)
self.keybindentry = Entry(self.labelframe_keybind, width=10)
self.keybindentry.bind("<KeyRelease>", self.clicker)
self.keybindentry.grid(row=0, column=0)
self.keybindentry.insert(END, "None")
self.master_screen = Toplevel(root)
self.master_screen.withdraw()
self.master_screen.attributes("-transparent", "blue")
self.picture_frame = Frame(self.master_screen, background = "blue")
self.picture_frame.pack(fill=BOTH, expand=YES)
root.bind_all("<1>", lambda event:event.widget.focus_set())
self.key = self.keybindentry.get()
def enableDelayOn(self):
pass
return
def enableDelayOff(self):
pass
return
def updateClickDelaySetup(self, key):
pass
return
def updateClickSetup(self, key):
try:
int(self.numberofclicks.get())
except:
messagebox.showerror("Non-Integer Type Error", "Please input a Positive Integer")
return 0
counterVariable.value = int(self.numberofclicks.get())
#global testingInt
#testingInt = int(self.numberofclicks.get())
print(counterVariable.value)
#self.clickrtext.set(counterVariable)
return 1
def checkDelayLower(self, key):
if self.rangetxtbelow.get() != "":
print(self.rangetxtbelow.get())
try:
int(self.rangetxtbelow.get())
except:
messagebox.showerror("Non-Integer Type Error", "Please input a Positive Integer")
return
if int(self.rangetxtbelow.get()) <= 0:
messagebox.showerror("Negative/Zero Type Error", "Please input a Positive Integer")
def checkDelayUpper(self, key):
if self.rangetextabove.get() != "":
if self.rangetxtbelow.get() != "":
if self.rangetxtbelow.get() >= self.rangetextabove.get():
messagebox.showerror("Upper Equals Lower Error", "Please input an Integer greater than the Lower Limit Value")
def updateDelaySetup(self, key):
print(self.rangetextabove.get())
tempVariable = "None"
if self.clicked.get() == 'Milliseconds':
tempVariable = "ms"
else:
tempVariable = "s"
if self.rangetxtbelow.get() == "" and self.rangetextabove.get() == "":
self.delaytext.set("? < x < ? {}".format(tempVariable))
elif self.rangetxtbelow.get() == "":
self.delaytext.set("? < x < {} {}".format(self.rangetextabove.get(), tempVariable))
elif self.rangetextabove.get() == "":
self.delaytext.set("{} < x < ? {}".format(self.rangetxtbelow.get(), tempVariable))
else:
self.delaytext.set("{} < x < {} {}".format(self.rangetxtbelow.get(), self.rangetextabove.get(), tempVariable))
def minim(self):
root.wm_state("iconic")
def enableRange(self):
self.numberofclicks.config(state='disabled')
return
def enableFiniteRange(self):
self.numberofclicks.config(state='normal')
return
def assignKey(self):
print(rawKey)
global finalKey
self.keybindtext.set(str(rawKey).replace("'", ""))
finalKey = KeyCode(char=rawKey)
def clearKey(self):
global rawKey
rawKey = None
self.keybindentry.delete(0, END)
self.keybindentry.insert(END, "None")
self.keybindtext.set("None")
global finalKey
finalKey = None
def clicker(self, event):
self.keybindentry.delete(0, END)
self.keybindentry.insert(END, event.keysym)
global rawKey
rawKey = globalKey
#return str(event.keysym)
def update_delay(self):
try:
global millisecond_lower
millisecond_lower = int(self.rangetxtbelow.get())
global millisecond_higher
millisecond_higher = int(self.rangetextabove.get())
except:
#messagebox.showerror("Integer Error", "Please enter an Integer")
return 0
return 1
def clicking(self):
if running == False:
return 0
elif running == True:
return 1
def on_activate_h(self):
print('<ctrl>+<alt>+h pressed')
def takeBoundedScreenShot(self, x1, y1, x2, y2):
im = pyautogui.screenshot(region=(x1, y1, x2, y2))
x = datetime.datetime.now()
fileName = x.strftime("%f")
#im.save("snips/" + fileName + ".png")
def createScreenCanvas(self):
self.master_screen.deiconify()
root.withdraw()
self.screenCanvas = Canvas(self.picture_frame, cursor="cross", bg="grey11")
self.screenCanvas.pack(fill=BOTH, expand=YES)
self.screenCanvas.bind("<ButtonPress-1>", self.on_button_press)
self.screenCanvas.bind("<B1-Motion>", self.on_move_press)
self.screenCanvas.bind("<ButtonRelease-1>", self.on_button_release)
self.master_screen.attributes('-fullscreen', True)
self.master_screen.attributes('-alpha', .3)
self.master_screen.lift()
self.master_screen.attributes("-topmost", True)
def on_button_release(self, event):
#self.recPosition()
global direction
if self.start_x <= self.curX and self.start_y <= self.curY:
direction = "rd"
print("right down")
self.takeBoundedScreenShot(self.start_x, self.start_y, self.curX - self.start_x, self.curY - self.start_y)
elif self.start_x >= self.curX and self.start_y <= self.curY:
direction = "ld"
print("left down")
self.takeBoundedScreenShot(self.curX, self.start_y, self.start_x - self.curX, self.curY - self.start_y)
elif self.start_x <= self.curX and self.start_y >= self.curY:
direction = "ru"
print("right up")
self.takeBoundedScreenShot(self.start_x, self.curY, self.curX - self.start_x, self.start_y - self.curY)
elif self.start_x >= self.curX and self.start_y >= self.curY:
direction = "lu"
print("left up")
self.takeBoundedScreenShot(self.curX, self.curY, self.start_x - self.curX, self.start_y - self.curY)
global e_x
e_x = self.curX
global e_y
e_y = self.curY
print("Screenshot Chosen end {} {}".format(e_x, e_y))
self.exitScreenshotMode()
return event
def exitScreenshotMode(self):
print("Screenshot mode exited")
self.screenCanvas.destroy()
self.master_screen.withdraw()
root.deiconify()
def exit_application(self):
print("Application exit")
listener.stop()
root.quit()
def on_button_press(self, event):
# save mouse drag start position
self.start_x = self.screenCanvas.canvasx(event.x)
self.start_y = self.screenCanvas.canvasy(event.y)
global s_x
s_x = self.start_x
global s_y
s_y = self.start_y
print("Screenshot Chosen begin {} {}".format(s_x, s_y))
self.rect = self.screenCanvas.create_rectangle(self.x, self.y, 1, 1, outline='red', width=3, fill="blue")
def on_move_press(self, event):
self.curX, self.curY = (event.x, event.y)
# expand rectangle as you drag the mouse
self.screenCanvas.coords(self.rect, self.start_x, self.start_y, self.curX, self.curY)
def recPosition(self):
print(self.start_x)
print(self.start_y)
print(self.curX)
print(self.curY)
def clickMouse(s_x, s_y, e_x, e_y, millisecond_higher, millisecond_lower, direction):
#print("{}, {}, {}, {}, {}, {}".format(s_x, s_y, e_x, e_y, millisecond_higher, millisecond_lower))
try:
if direction == "rd":
x_coord = int(s_x+secrets.randbelow(int(e_x) - int(s_x)))
y_coord = int(s_y+secrets.randbelow(int(e_y) - int(s_y)))
elif direction == "ru":
x_coord = int(s_x+secrets.randbelow(int(e_x) - int(s_x)))
y_coord = int(e_y+secrets.randbelow(int(s_y) - int(e_y)))
elif direction == "ld":
x_coord = int(e_x+secrets.randbelow(int(s_x) - int(e_x)))
y_coord = int(s_y+secrets.randbelow(int(e_y) - int(s_y)))
elif direction == "lu":
x_coord = int(e_x+secrets.randbelow(int(s_x) - int(e_x)))
y_coord = int(e_y+secrets.randbelow(int(s_y) - int(e_y)))
else:
messagebox.showerror("Fatal Error", "Report to Joey if this happens")
return 0
except:
messagebox.showerror("None Error", "Select an Area to click from")
messagebox.showerror("Log Box", "sx {} sy {} ex {} ey {}".format(s_x, s_y, e_x, e_y))
return 0
try:
delay = millisecond_lower + secrets.randbelow(millisecond_higher - millisecond_lower)
except:
messagebox.showerror("Non-Integer Type Error", "Please input a proper Delay Time")
return 0
delay = delay*0.001
win32api.SetCursorPos((x_coord,y_coord))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,x_coord,y_coord,0,0)
click_delay = (30 + int(secrets.randbelow(40))) * 0.001
time.sleep(click_delay)
print(click_delay)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,x_coord,y_coord,0,0)
time.sleep(delay)
def updateClickCounter():
pass
return
def on_press(key):
global beginningVariable
global running
global globalKey
global lastOption
#global testingInt
#testingInt = testingInt - 1
globalKey = key
print(app.option.get())
print(finalKey)
print("key {}".format(running))
if KeyCode(char=key) == finalKey:
if beginningVariable == True:
if(app.option.get() == 'endless'):
if app.clicking() == 0:
app.update_delay()
p = proc_start()
p.start()
processes.append(p)
switch()
print(processes)
elif app.clicking() == 1:
print(processes)
for p in processes:
proc_stop(p)
processes.remove(p)
print(processes)
switch()
x_temp, y_temp = win32api.GetCursorPos()
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x_temp, y_temp,0,0)
beginningVariable = False
finiteReset.get()
lastOption = 'endless'
else:
if app.clicking() == 0:
app.update_delay()
p = proc_start()
p.start()
processes.append(p)
switch()
print(processes)
finiteReset.get()
beginningVariable = False
lastOption = 'finite'
else:
if lastOption == 'endless':
if app.option.get() == 'endless':
if app.clicking() == 0:
app.update_delay()
p = proc_start()
p.start()
processes.append(p)
switch()
print(processes)
elif app.clicking() == 1:
print(processes)
for p in processes:
proc_stop(p)
processes.remove(p)
print(processes)
switch()
x_temp, y_temp = win32api.GetCursorPos()
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x_temp, y_temp,0,0)
else:
if app.clicking() == 0:
app.update_delay()
p = proc_start()
p.start()
processes.append(p)
switch()
print(processes)
if finiteReset.empty() != True:
finiteReset.get()
elif app.clicking() == 1:
print(processes)
for p in processes:
proc_stop(p)
processes.remove(p)
print(processes)
switch()
x_temp, y_temp = win32api.GetCursorPos()
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x_temp, y_temp,0,0)
lastOption = 'finite'
elif lastOption == 'finite':
if app.option.get() == 'endless':
print("ending!")
if finiteReset.empty() != True:
if finiteReset.get() == True:
switch()
for p in processes:
proc_stop(p)
processes.remove(p)
if app.clicking() == 0:
app.update_delay()
p = proc_start()
p.start()
processes.append(p)
switch()
print(processes)
#finiteReset.get()
elif app.clicking() == 1:
print(processes)
for p in processes:
proc_stop(p)
processes.remove(p)
print(processes)
switch()
x_temp, y_temp = win32api.GetCursorPos()
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x_temp, y_temp,0,0)
lastOption = 'endless'
else:
print("testing")
if finiteReset.empty() != True:
if finiteReset.get() == True:
switch()
for p in processes:
proc_stop(p)
processes.remove(p)
finiteReset.put(False)
print("testing2")
if app.clicking() == 0:
app.update_delay()
p = proc_start()
p.start()
processes.append(p)
switch()
print(processes)
if finiteReset.empty() != True:
finiteReset.get()
elif app.clicking() == 1:
print(processes)
for p in processes:
proc_stop(p)
processes.remove(p)
print(processes)
switch()
x_temp, y_temp = win32api.GetCursorPos()
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x_temp, y_temp,0,0)
lastOption = 'finite'
def on_closing():
listener.stop()
try:
for p in processes:
proc_stop(p)
except:
pass
root.destroy()
return
def switch():
global running
if running == False:
running = True
else:
running = False
def work(s_x, s_y, e_x, e_y, millisecond_higher, millisecond_lower, direction, option, limit, finiteReset, counterVariable):
global running
if option == 'endless':
finiteReset.put(False)
print("hi!")
while True:
if clickMouse(s_x, s_y, e_x, e_y, millisecond_higher, millisecond_lower, direction) == 0:
return 0
elif option == 'finite':
finiteReset.put(False)
try:
limit = int(limit)
except:
messagebox.showerror("Limit Integer Error", "Enter an integer in the limit box")
finiteReset.get()
finiteReset.put(True)
return 0
i = 0
while i < limit:
if clickMouse(s_x, s_y, e_x, e_y, millisecond_higher, millisecond_lower, direction) == 0:
return 0
counterVariable.value = counterVariable.value - 1
print(counterVariable.value)
i += 1
finiteReset.get()
finiteReset.put(True)
def proc_start():
p_to_start = Process(target=work, args=(s_x, s_y, e_x, e_y, millisecond_higher, millisecond_lower, direction, app.option.get(), app.numberofclicks.get(), finiteReset, counterVariable))
return p_to_start
def proc_stop(p_to_stop):
p_to_stop.terminate()
if __name__ == '__main__':
listener = keyboard.Listener(on_press=on_press)
listener.start()
processes = []
root = Tk()
app = Application(root)
root.protocol("WM_DELETE_WINDOW", on_closing)
root.mainloop()
``` |
{
"source": "jpromanonet/codeChallenges",
"score": 3
} |
#### File: 77_Day/chillingWith/chillingWith.py
```python
# --------------------------------------------------------------------------------------------------------------
# Calling libraries and frameworks
import random
import os
import datetime
import shelve
import sys
# Declaring global variables
## Input user parameters, DB(shelf file) and front-end doc construction environment variables.
## Movies parameters
movieTitle = ''
releaseYear = ''
orderInSaga = ''
pendingMovie = ''
## Series parameters
serieTitle = ''
numberOfSeasons = ''
startedYear = ''
numberOfChapters = ''
pendingSerie = ''
## DB Files
moviesFile = ''
seriesFile = ''
## FrontEnd Files
finalHtmlFile = ''
# Declaring functions
## Movies Functions
def addMovie(title, releaseDate, sagaNumber, pendingToWatch):
moviesFile = shelve.open('moviesDB')
movieTitle = title
moviesFile[movieTitle] = [title, releaseDate, sagaNumber, pendingToWatch]
moviesFile.close()
def deleteMovie(title):
movieTitle = title
moviesFile = shelve.open('moviesDB')
del moviesFile[movieTitle]
moviesFile.close()
## Series Functions
def addSeries(title, seasons, releaseDate, chapters, pendingToWatch):
seriesFile = shelve.open('seriesDB')
serieTitle = title
seriesFile[serieTitle] = [title, seasons, releaseDate, chapters, pendingToWatch]
seriesFile.close()
def deleteSeries(title):
serieTitle = title
seriesFile = shelve.open('seriesDB')
del seriesFile[serieTitle]
seriesFile.close()
# Program logic
print('Insert title: ')
movieTitle = input()
print("Insert Release Date: ")
release = input()
print('''Insert order in the movie saga (if it's a solo movie, leave it blank)''')
orderInSaga = input()
print('Pending to watch? y/n ')
toWatch = input()
addMovie(movieTitle, release, orderInSaga, toWatch)
``` |
{
"source": "jproppe/pka",
"score": 3
} |
#### File: pka/pka/main.py
```python
import xlrd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import BayesianRidge
from sklearn.preprocessing import PolynomialFeatures
class pKa:
def __init__(self, path_to_data='./training_data.xlsx'):
'''
Load data from path (default: './training_data.xlsx') and specify variables that are being
used throughout the procedure. Display the underlying database.
'''
self.data = pd.read_excel('training_data.xlsx')
display(self.data)
self.x = np.array(self.data['pKa_theo']).flatten() # independent variable
self.y = np.array(self.data['pKa_exp']).flatten() # dependent variable
if len(self.x) != len(self.y):
raise Exception("Number of instances in pKa_exp and pKa_theo is required to be identical.")
self.N = len(self.x) # number of data points
self.X = PolynomialFeatures(1).fit_transform(self.x.reshape(-1, 1)) # add intercept term
self.X2 = PolynomialFeatures(2).fit_transform(self.x.reshape(-1, 1)) # ... and quadratic term
# define <x_grid> and <X_grid> (equivalent to <x> and <X>) for plotting purposes
delta = np.max(self.x) - np.min(self.x)
self.x_grid = np.linspace(np.min(self.x) - .05 * delta, np.max(self.x) + .05 * delta, 250)
self.X_grid = PolynomialFeatures(1).fit_transform(self.x_grid.reshape(-1, 1))
self.rng = np.random.RandomState() # random number generator (local to the class)
#------------------------------------------------------------------------------------------------------#
def get_coefs(self, seed=None):
'''
Return coefficients (<model.coef_>) of a weighted linear regression model (<model>).
Weights (<weight>) are obtained on the basis of Bayesian bootstrapping.
If <x>-dependent variance (<var>) is available (from heteroscedastic regression), adjust weights.
'''
if not hasattr(self, 'var'):
self.var = 1.
if not seed is None: # sample-specific seed
self.rng.seed(seed)
weight = np.diff(np.concatenate(([0.], np.sort(self.rng.uniform(0., 1., self.N-1)), [1.])))
model = LinearRegression(fit_intercept=False, normalize=True).fit(self.X, self.y, weight / self.var)
return model.coef_
#------------------------------------------------------------------------------------------------------#
def bootstrap(self):
'''
Draw 1000 bootstrap samples and perform weighted linear regression.
Collect regression coefficients (<coefs>) and determine the ensemble mean (<coefs_mean>)
and covariance (<coefs_cov>).
Approximate <y> on the basis of the ensemble of regression models (predictions <f>).
'''
self.coefs = []
for b in range(1000):
self.coefs.append(self.get_coefs(seed=b))
self.coefs = np.array(self.coefs)
self.coefs_mean = np.mean(self.coefs, axis=0).reshape(-1, 1)
self.coefs_cov = np.cov(self.coefs.T)
self.f = self.X.dot(self.coefs_mean).flatten()
# necessary if heteroscedastic regression has not yet been performed
if not hasattr(self, 'subcoefs'):
self.subcoefs = np.array([self.N / (self.N - 2) * np.mean((self.y - self.f)**2), 0., 0.])
#------------------------------------------------------------------------------------------------------#
def predict(self, x_query):
'''
Make a prediction (<f>) including uncertainty (<u>, 95% confidence interval (CI))
based on <x_query>.
'''
x_query = np.array(x_query)
X_query = PolynomialFeatures(1).fit_transform(x_query.reshape(-1, 1))
X_query2 = PolynomialFeatures(2).fit_transform(x_query.reshape(-1, 1))
f = X_query.dot(self.coefs_mean).flatten()
u = 1.96 * np.sqrt(X_query2.dot(self.subcoefs).flatten() + np.diag(X_query.dot(self.coefs_cov.dot(X_query.T))))
return f, u
#------------------------------------------------------------------------------------------------------#
def plot_bootstrap_results(self, show_ensemble=True):
'''
Plot the results of the bootstrapping procedure. If <show_ensemble> is True, all regression lines
will be plotted.
'''
if show_ensemble is True:
for b in range(1000):
if b == 0:
label_ = 'result for $b$th sample'
else:
label_ = None
plt.plot(self.x_grid,
self.X_grid.dot(self.coefs[b,:].reshape(-1, 1)),
color='#75bbfd',
linewidth=.5,
label=label_
)
f, u = self.predict(self.x_grid)
plt.plot(self.x_grid, f, 'k-', label='regression line')
plt.plot(self.x, self.y, 'k.', label='training data')
plt.fill_between(self.x_grid, (f + u), (f - u), facecolor='red', alpha=0.2, label='uncertainty (95% CI)')
plt.xlabel(r'p$K_a$ (theo)', fontsize=12)
plt.ylabel(r'p$K_a$ (exp)', fontsize=12)
plt.legend()
#------------------------------------------------------------------------------------------------------#
def query(self, x_query):
'''
Make a prediction (<f>) including uncertainty (<u>, 95% confidence interval (CI)) for a
user-specific query (<x_query>). Print statistics.
'''
x_query = np.array(x_query).flatten()
if len(x_query) != 1:
raise Exception("Multiple queries were made, but only one at a time is possible at the moment.")
self.plot_bootstrap_results(show_ensemble=False)
f, u = self.predict(x_query)
plt.errorbar(x_query, f, u, color='red', mfc='black', capsize=3, marker='o', label='queried prediction')
print('Prediction = ' + str(format(f.item(), '.3f')))
print('Uncertainty (95% CI) = ' + str(format(u.item(), '.3f')))
plt.legend()
#------------------------------------------------------------------------------------------------------#
def fit_variance(self):
'''
Heteroscedastic regression. Determine <var> as the <x>-dependent variance and <subcoefs> as
the coefficients of this additional regression model.
'''
model = BayesianRidge(fit_intercept=False, normalize=True).fit(self.X2, (self.y - self.f)**2)
self.var = model.predict(self.X2).flatten()
self.subcoefs = model.coef_
#------------------------------------------------------------------------------------------------------#
def check_query(self, query):
'''
Check whether a user-specify query is a valid number or not.
'''
try:
float(query)
return True
except ValueError:
return False
#------------------------------------------------------------------------------------------------------#
def run(self):
'''
The key method of the pKa class. Perform one run of homoscedastic regression (boostrapped)
followed by three runs of heteroscedastic regression (bootstrapped). Print statistics.
Allow users to make individual queries.
'''
if hasattr(self, 'var'):
del self.var
if hasattr(self, 'subcoefs'):
del self.subcoefs
self.bootstrap()
for i in range(3):
self.fit_variance()
self.bootstrap()
self.plot_bootstrap_results()
plt.show()
print('===============================================')
print('SUMMARY OF HETEROSCEDASTIC BOOTSTRAP REGRESSION')
print('intercept = ' + str(format(np.mean(self.coefs[:,0]), '.3f')) \
+ ' +/- ' + str(format(1.96 * np.std(self.coefs[:,0]), '.3f')) + ' (95% confidence)')
print('slope = ' + str(format(np.mean(self.coefs[:,1]), '.3f')) \
+ ' +/- ' + str(format(1.96 * np.std(self.coefs[:,1]), '.3f')) + ' (95% confidence)')
print('===============================================\n')
querying = True
while querying:
print('Enter any non-digit character to stop the procedure.')
query = input('Enter pKa value: ')
querying = self.check_query(query)
if querying:
self.query(float(query))
plt.show()
``` |
{
"source": "jproudlo/PyModel",
"score": 2
} |
#### File: samples/populations/filter3_lc.py
```python
import populations
def filter3():
return len(populations.population) <= 3
populations.statefilter = filter3
```
#### File: Socket/fsmpy/synchronous_graph.py
```python
def send_return(): pass
def send_call(): pass
def recv_call(): pass
def recv_return(): pass
# states, key of each state here is its number in graph etc. below
states = {
0 : {'synchronous': 0},
1 : {'synchronous': 1},
2 : {'synchronous': 2},
3 : {'synchronous': 3},
}
# initial state, accepting states, unsafe states, frontier states, deadend states
initial = 0
accepting = [0]
unsafe = []
frontier = []
finished = []
deadend = []
runstarts = [0]
# finite state machine, list of tuples: (current, (action, args, result), next)
graph = (
(0, (send_call, (), None), 1),
(1, (send_return, (), None), 2),
(2, (recv_call, (), None), 3),
(3, (recv_return, (), None), 0),
)
```
#### File: StackResult/fsmpy/Stack3FSM.py
```python
def Push(): pass
def Pop(): pass
# states, key of each state here is its number in graph etc. below
states = {
0 : {'Stack': {'stack': []}},
1 : {'Stack': {'stack': [2]}},
2 : {'Stack': {'stack': [1]}},
3 : {'Stack': {'stack': [2, 2]}},
4 : {'Stack': {'stack': [1, 2]}},
5 : {'Stack': {'stack': [2, 1]}},
6 : {'Stack': {'stack': [1, 1]}},
7 : {'Stack': {'stack': [2, 2, 2]}},
8 : {'Stack': {'stack': [1, 2, 2]}},
9 : {'Stack': {'stack': [2, 1, 2]}},
10 : {'Stack': {'stack': [1, 1, 2]}},
11 : {'Stack': {'stack': [2, 2, 1]}},
12 : {'Stack': {'stack': [1, 2, 1]}},
13 : {'Stack': {'stack': [2, 1, 1]}},
14 : {'Stack': {'stack': [1, 1, 1]}},
}
# initial state, accepting states, unsafe states, frontier states, deadend states
initial = 0
accepting = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
unsafe = []
frontier = []
finished = []
deadend = []
runstarts = [0]
# finite state machine, list of tuples: (current, (action, args, result), next)
graph = (
(0, (Push, (2,), None), 1),
(0, (Push, (1,), None), 2),
(1, (Pop, (), 2), 0),
(1, (Push, (2,), None), 3),
(1, (Push, (1,), None), 4),
(2, (Pop, (), 1), 0),
(2, (Push, (2,), None), 5),
(2, (Push, (1,), None), 6),
(3, (Pop, (), 2), 1),
(3, (Push, (2,), None), 7),
(3, (Push, (1,), None), 8),
(4, (Pop, (), 1), 1),
(4, (Push, (2,), None), 9),
(4, (Push, (1,), None), 10),
(5, (Pop, (), 2), 2),
(5, (Push, (2,), None), 11),
(5, (Push, (1,), None), 12),
(6, (Pop, (), 1), 2),
(6, (Push, (2,), None), 13),
(6, (Push, (1,), None), 14),
(7, (Pop, (), 2), 3),
(8, (Pop, (), 1), 3),
(9, (Pop, (), 2), 4),
(10, (Pop, (), 1), 4),
(11, (Pop, (), 2), 5),
(12, (Pop, (), 1), 5),
(13, (Pop, (), 2), 6),
(14, (Pop, (), 1), 6),
)
```
#### File: samples/StackResult/StackDepthTen.py
```python
import Stack
def StackDepthThree():
return len(Stack.stack) <= 10 # not 3 -!?
Stack.StateFilter = StackDepthThree
```
#### File: samples/tracemultiplexer/tracemultiplexer.py
```python
from copy import copy
## Parameters
# This simple program has only two threads, with only one API call in each
program = (( 'listfiles', ), # thread 0
( 'openfile', )) # thread 1
threads = list(range(len(program))) # one element of program for each thread
unsynchronized = False # False: use tracelock, True: ignore tracelock
### State
# tracecapture state
pc = list() # program counter for each thread
phase = list() # phase of each thread in tracecapture
log = list() # contents of tracelog written by all threads
# file system state
files = list() # filenames in filesystem
listing = list() # listfiles return value, FIXME ret should be in tracecapture
### Safety condition
# phases where a thread can write to the log
writing = ('start','finish')
def writing_threads():
"""
list of threads that can write to the log
"""
return [ t for t in threads if phase[t] in writing ]
def state_invariant():
"""
At most one thread can write to the log
"""
return len(writing_threads()) <= 1
### Other necessary functions
# run is allowed to stop
def accepting():
return all([ phase[t] == 'done' for t in threads ])
# reset before another run
def reset():
global pc, phase, log
pc = [ 0 for thread in program ]
phase = [ 'ready' for thread in program ]
log = []
files = []
### Initialize
reset()
### Actions
def start_enabled(thread):
return (phase[thread] == 'ready'
and (not writing_threads() # lock is free
or unsynchronized)) # ignore lock - might corrupt file
def start(thread):
phase[thread] = 'start' # acquire lock
# write log, if it might be corrupted write 'XXX' at the end
if state_invariant():
log.append((thread, program[thread][pc[thread]], 'start'))
else:
log.append((thread, program[thread][pc[thread]], 'start', 'XXX'))
def call_enabled(thread):
return phase[thread] == 'start' # holding lock
def call(thread):
global listing # we reassign whole list, we don't just update it
phase[thread] = 'call' # release lock, execute call
action = program[thread][pc[thread]]
# for now. handle each action in *program* as a special case inline here
if action == 'openfile':
files.append('file0') # only works if openfiles just called once
if action == 'listfiles':
listing = copy(files) # must copy now because open may change files
def finish_enabled(thread):
return (phase[thread] == 'call'
and (not writing_threads() # lock is free
or unsynchronized)) # ignore lock - might corrupt file
def finish(thread):
phase[thread] = 'finish' # acquire lock
action = program[thread][pc[thread]]
# for now, handle each action in *program* as a special case inline here
if action == 'openfile':
ret = files[-1] # most recently appended
if action == 'listfiles':
ret = listing # most recently appended
# write log, if it might be corrupted write 'XXX' at the end
if state_invariant():
log.append((thread, action, 'finish', ret))
else:
log.append((thread, action, 'finish', ret, 'XXX'))
def exit_enabled(thread):
return phase[thread] == 'finish' # holding lock
# For now, handle exit as a special case, assign phase 'done'.
# If the simulated threads had more than one action, here we
# would advance to the next action and reset phase to 'start'.
def exit(thread):
phase[thread] = 'done' # release lock, indicate done
### Metadata
state = ('pc', 'phase', 'log', 'files', 'listing')
actions = (start, call, finish, exit)
enablers = {start:(start_enabled,), call:(call_enabled,),
finish:(finish_enabled,), exit:(exit_enabled,)}
domains = { start: { 'thread': threads },
call: { 'thread': threads },
finish: { 'thread': threads },
exit: { 'thread': threads }}
```
#### File: WebApplication/fsmpy/OneUserNoIntScenarioFSM.py
```python
def ReadInt(): pass
def Logout(): pass
def Initialize(): pass
def Login(): pass
def UpdateInt(): pass
# states, key of each state here is its number in graph etc. below
states = {
0 : {'OneUserNoIntScenario': 0, 'WebModel': {'userToInt': {}, 'mode': 'Initializing', 'usersLoggedIn': []}},
1 : {'OneUserNoIntScenario': 0, 'WebModel': {'userToInt': {}, 'mode': 'Running', 'usersLoggedIn': []}},
2 : {'OneUserNoIntScenario': 1, 'WebModel': {'userToInt': {}, 'mode': 'Running', 'usersLoggedIn': ['VinniPuhh']}},
}
# initial state, accepting states, unsafe states, frontier states, deadend states
initial = 0
accepting = [0, 1]
unsafe = []
frontier = []
finished = []
deadend = []
runstarts = [0]
# finite state machine, list of tuples: (current, (action, args, result), next)
graph = (
(0, (Initialize, (), None), 1),
(1, (Login, ('VinniPuhh', 'Correct'), 'Success'), 2),
(2, (Logout, ('VinniPuhh',), None), 1),
)
```
#### File: WebApplication/fsmpy/WebModelFSM.py
```python
def Initialize(): pass
def ReadInt(): pass
def Login(): pass
def Logout(): pass
def UpdateInt(): pass
# states, key of each state here is its number in graph etc. below
states = {
0 : {'WebModel': {'userToInt': {}, 'mode': 'Initializing', 'usersLoggedIn': []}},
1 : {'WebModel': {'userToInt': {}, 'mode': 'Running', 'usersLoggedIn': []}},
2 : {'WebModel': {'userToInt': {}, 'mode': 'Running', 'usersLoggedIn': ['OleBrumm']}},
3 : {'WebModel': {'userToInt': {}, 'mode': 'Running', 'usersLoggedIn': ['VinniPuhh']}},
4 : {'WebModel': {'userToInt': {'OleBrumm': 1}, 'mode': 'Running', 'usersLoggedIn': ['OleBrumm']}},
5 : {'WebModel': {'userToInt': {}, 'mode': 'Running', 'usersLoggedIn': ['OleBrumm', 'VinniPuhh']}},
6 : {'WebModel': {'userToInt': {'OleBrumm': 2}, 'mode': 'Running', 'usersLoggedIn': ['OleBrumm']}},
7 : {'WebModel': {'userToInt': {}, 'mode': 'Running', 'usersLoggedIn': ['VinniPuhh', 'OleBrumm']}},
8 : {'WebModel': {'userToInt': {'VinniPuhh': 2}, 'mode': 'Running', 'usersLoggedIn': ['VinniPuhh']}},
9 : {'WebModel': {'userToInt': {'VinniPuhh': 1}, 'mode': 'Running', 'usersLoggedIn': ['VinniPuhh']}},
10 : {'WebModel': {'userToInt': {'OleBrumm': 1}, 'mode': 'Running', 'usersLoggedIn': ['OleBrumm', 'VinniPuhh']}},
11 : {'WebModel': {'userToInt': {'OleBrumm': 1}, 'mode': 'Running', 'usersLoggedIn': []}},
12 : {'WebModel': {'userToInt': {'VinniPuhh': 2}, 'mode': 'Running', 'usersLoggedIn': ['OleBrumm', 'VinniPuhh']}},
13 : {'WebModel': {'userToInt': {'VinniPuhh': 1}, 'mode': 'Running', 'usersLoggedIn': ['OleBrumm', 'VinniPuhh']}},
14 : {'WebModel': {'userToInt': {'OleBrumm': 2}, 'mode': 'Running', 'usersLoggedIn': ['OleBrumm', 'VinniPuhh']}},
15 : {'WebModel': {'userToInt': {'OleBrumm': 2}, 'mode': 'Running', 'usersLoggedIn': []}},
16 : {'WebModel': {'userToInt': {'OleBrumm': 1}, 'mode': 'Running', 'usersLoggedIn': ['VinniPuhh', 'OleBrumm']}},
17 : {'WebModel': {'userToInt': {'VinniPuhh': 2}, 'mode': 'Running', 'usersLoggedIn': ['VinniPuhh', 'OleBrumm']}},
18 : {'WebModel': {'userToInt': {'VinniPuhh': 1}, 'mode': 'Running', 'usersLoggedIn': ['VinniPuhh', 'OleBrumm']}},
19 : {'WebModel': {'userToInt': {'OleBrumm': 2}, 'mode': 'Running', 'usersLoggedIn': ['VinniPuhh', 'OleBrumm']}},
20 : {'WebModel': {'userToInt': {'VinniPuhh': 2}, 'mode': 'Running', 'usersLoggedIn': []}},
}
# initial state, accepting states, unsafe states, frontier states, deadend states
initial = 0
accepting = [0, 1, 11, 15, 20]
unsafe = []
frontier = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
finished = []
deadend = []
runstarts = [0]
# finite state machine, list of tuples: (current, (action, args, result), next)
graph = (
(0, (Initialize, (), None), 1),
(1, (Login, ('OleBrumm', 'Incorrect'), 'Failure'), 1),
(1, (Login, ('VinniPuhh', 'Incorrect'), 'Failure'), 1),
(1, (Login, ('OleBrumm', 'Correct'), 'Success'), 2),
(1, (Login, ('VinniPuhh', 'Correct'), 'Success'), 3),
(2, (UpdateInt, ('OleBrumm', 1), None), 4),
(2, (Login, ('VinniPuhh', 'Correct'), 'Success'), 5),
(2, (Login, ('VinniPuhh', 'Incorrect'), 'Failure'), 2),
(2, (Logout, ('OleBrumm',), None), 1),
(2, (ReadInt, ('OleBrumm',), 0), 2),
(2, (UpdateInt, ('OleBrumm', 2), None), 6),
(3, (Logout, ('VinniPuhh',), None), 1),
(3, (Login, ('OleBrumm', 'Correct'), 'Success'), 7),
(3, (ReadInt, ('VinniPuhh',), 0), 3),
(3, (UpdateInt, ('VinniPuhh', 2), None), 8),
(3, (Login, ('OleBrumm', 'Incorrect'), 'Failure'), 3),
(3, (UpdateInt, ('VinniPuhh', 1), None), 9),
(4, (UpdateInt, ('OleBrumm', 1), None), 4),
(4, (Login, ('VinniPuhh', 'Correct'), 'Success'), 10),
(4, (Login, ('VinniPuhh', 'Incorrect'), 'Failure'), 4),
(4, (ReadInt, ('OleBrumm',), 1), 4),
(4, (Logout, ('OleBrumm',), None), 11),
(4, (UpdateInt, ('OleBrumm', 2), None), 6),
(5, (Logout, ('VinniPuhh',), None), 2),
(5, (UpdateInt, ('OleBrumm', 1), None), 10),
(5, (ReadInt, ('VinniPuhh',), 0), 5),
(5, (UpdateInt, ('VinniPuhh', 2), None), 12),
(5, (UpdateInt, ('VinniPuhh', 1), None), 13),
(5, (Logout, ('OleBrumm',), None), 3),
(5, (ReadInt, ('OleBrumm',), 0), 5),
(5, (UpdateInt, ('OleBrumm', 2), None), 14),
(6, (UpdateInt, ('OleBrumm', 1), None), 4),
(6, (Login, ('VinniPuhh', 'Correct'), 'Success'), 14),
(6, (ReadInt, ('OleBrumm',), 2), 6),
(6, (Login, ('VinniPuhh', 'Incorrect'), 'Failure'), 6),
(6, (Logout, ('OleBrumm',), None), 15),
(6, (UpdateInt, ('OleBrumm', 2), None), 6),
(7, (Logout, ('VinniPuhh',), None), 2),
(7, (UpdateInt, ('OleBrumm', 1), None), 16),
(7, (ReadInt, ('VinniPuhh',), 0), 7),
(7, (UpdateInt, ('VinniPuhh', 2), None), 17),
(7, (UpdateInt, ('VinniPuhh', 1), None), 18),
(7, (Logout, ('OleBrumm',), None), 3),
(7, (ReadInt, ('OleBrumm',), 0), 7),
(7, (UpdateInt, ('OleBrumm', 2), None), 19),
(8, (Logout, ('VinniPuhh',), None), 20),
(8, (Login, ('OleBrumm', 'Correct'), 'Success'), 17),
(8, (UpdateInt, ('VinniPuhh', 2), None), 8),
(8, (Login, ('OleBrumm', 'Incorrect'), 'Failure'), 8),
(8, (UpdateInt, ('VinniPuhh', 1), None), 9),
)
```
#### File: samples/WebApplication/webapp.py
```python
import pprint
import urllib.parse
# page templates appear at the end of this file
# configuration
password = { 'user1':'<PASSWORD>', 'user2':'<PASSWORD>' }
# data state
integers = dict() # user to int
strings = dict() # user to str
sessions = dict() # cookie to user, assume each user has at most one session
next_cookie = 0
def application(environ, start_response):
global next_cookie
# print environ_template % pprint.pformat(environ) # DEBUG, voluminous!
response_headers = [] # add headers below
cookie = environ.get('HTTP_COOKIE') # might be None
# show login page
if (environ['PATH_INFO'] == '/webapp.py'
and environ['REQUEST_METHOD'] == 'GET'
and cookie not in sessions): # cookie might be None
response_body = login_page
response_headers += [
('Set-Cookie','PYSESSID=%s; path=/' % next_cookie)]
next_cookie += 1
status = '200 OK'
# log in, if successful show data form page
elif (environ['PATH_INFO'] == '/webapp.py'
and environ['REQUEST_METHOD'] == 'POST'):
wd = environ['wsgi.input']
method = environ['REQUEST_METHOD']
length = int(environ['CONTENT_LENGTH'])
request_body = wd.read(length).decode()
vars = urllib.parse.parse_qs(request_body)
user = vars['username'][0] # vars[x] are lists, get first item
passwd = vars['password'][0]
if user in password and password[user] == passwd:
sessions[cookie] = user
if not user in strings:
strings[user] = ''
# CORRECT CODE comented out
# if not user in integers:
# integers[user] = 0
# BUG follows, should be guarded by if ... like strings
integers[user] = 0 # BUG, always overwrites data from last session
# PHP version sends redirect back to doStuff instead of this response_body
#response_body = dostuff_template % (integers[user],
# strings[user])
response_headers += [('Location','webapp.py')]
status = "302 Found"
response_body = ''
else:
response_body = login_failure_page
status = '200 OK'
# submit data in form page
elif (environ['PATH_INFO'] == '/webapp.py'
and environ['REQUEST_METHOD'] == 'GET'
and cookie in sessions):
user = sessions[cookie]
vars = urllib.parse.parse_qs(environ['QUERY_STRING'])
if 'num' in vars:
integers[user] = str(vars['num'][0]) # vars[x] are lists, 1st item
if 'str' in vars:
strings[user] = vars['str'][0]
response_body = dostuff_template % (integers[user],
strings[user])
status = '200 OK'
# log out
elif environ['PATH_INFO'] == '/logout.py':
if cookie in sessions:
del sessions[cookie]
response_body = '' # blank page, like original NModel version
status = '200 OK'
pass
# unknown page
elif environ['PATH_INFO'] not in ('/webapp.py', '/logout.py'):
response_body = p404_page
status = '404 Not Found'
# nonsense: doStuff REQUEST_METHOD not GET or POST, or ... ?
else:
raise ValueError # send 500 Server Error
# response
response_headers += [('Content-Type', 'text/html'),
('Content-Length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode()]
environ_template = """environ is
%s
"""
p404_page = """<html>
<head>
<title>404 Not Found</title>
</head>
<body>
404 Not found
</body>
</html>
"""
login_page = """<html>
<head>
<title>LoginPage</title>
</head>
<body>
<form method="POST" action="webapp.py">
Username: <input type="text" name="username" size="20">
Password: <input type="password" name="password" size="20">
<input type="submit" value="Submit" name="login">
</form>
</body>
</html>
"""
login_failure_page = """
<head>
<title>Login Failure</title>
</head>
<body>
Incorrect login name or password. Please try again.
</body>
</html>
"""
# usage: dostuff_template % (integers[user], strings[user])
dostuff_template = """
<html>
<head>
<title>DoStuff</title>
</head>
<body>
Number: %s<br/>
String: %s
<form name="number" method="GET" action="webapp.py">
Number: <input type="text" name="num" size="2">
<input type="submit" value="Submit" name="inputNumber">
</form>
<form name="string" method="GET" action="webapp.py">
String: <input type="text" name="str" size="20">
<input type="submit" value="Submit" name="inputString">
</form>
<a href="logout.py">Log out</a>
</body>
</html>
"""
``` |
{
"source": "jprouty/mint-amazon-tagger",
"score": 3
} |
#### File: mint-amazon-tagger/mintamazontagger/args.py
```python
import argparse
import datetime
import os
TAGGER_BASE_PATH = os.path.join(os.path.expanduser("~"), 'MintAmazonTagger')
def get_name_to_help_dict(parser):
return dict([(a.dest, a.help) for a in parser._actions])
def define_common_args(parser):
"""Parseargs shared between both CLI & GUI programs."""
# Amazon creds:
parser.add_argument(
'--amazon_email', default=None,
help=('Amazon e-mail. If not provided, you will be '
'prompted for it.'))
parser.add_argument(
'--amazon_password', default=None,
help=('Amazon password. If not provided, you will be '
'prompted for it.'))
parser.add_argument(
'--order_history_start_date',
type=lambda s: datetime.datetime.strptime(s, '%Y-%m-%d').date(),
default=datetime.date.today() - datetime.timedelta(days=90),
help=('The start date for fetching Amazon order history. Defaults to '
'90 days ago from today. Format: YYYY-MM-DD'))
parser.add_argument(
'--order_history_end_date',
type=lambda s: datetime.datetime.strptime(s, '%Y-%m-%d').date(),
default=datetime.date.today(),
help=('The end date for fetching Amazon order history. Defaults to '
'today. Format: YYYY-MM-DD'))
parser.add_argument(
'--order_history_timeout',
type=int,
default=180,
help=('The amount of time (in seconds) to wait for order retrieval '
'from Amazon before considering the process to have timed out.'))
default_report_location = os.path.join(
TAGGER_BASE_PATH, 'Amazon Order Reports')
parser.add_argument(
'--report_download_location', type=str,
default=default_report_location,
help='Where to place the downloaded reports.')
# Amazon Input, as CSV file:
parser.add_argument(
'--items_csv', type=argparse.FileType('r'),
help=('The "Items" Order History Report from Amazon. If not present, '
'will try to fetch order history for you. See --amazon_email.'))
parser.add_argument(
'--orders_csv', type=argparse.FileType('r'),
help='The "Orders and Shipments" Order History Report from Amazon')
parser.add_argument(
'--refunds_csv', type=argparse.FileType('r'),
help='The "Refunds" Order History Report from Amazon. '
'This is optional.')
# Mint creds:
parser.add_argument(
'--mint_email', default=None,
help=('Mint e-mail address for login. If not provided here, will be '
'prompted for user.'))
parser.add_argument(
'--mint_password', default=<PASSWORD>,
help=('Mint password for login. If not provided here, will be '
'prompted for.'))
parser.add_argument(
'--mint_mfa_preferred_method',
default='email',
choices=['sms', 'email', 'soft-token'],
help='The perferred Mint MFA method (2factor auth codes).')
parser.add_argument(
'--mfa_soft_token',
default=None,
help='The MFA soft-token to pass to oathtool.')
parser.add_argument(
'--mint_login_timeout',
default=60*5,
help='The number of seconds to wait attempting to log into Mint.')
parser.add_argument(
'--mint_wait_for_sync',
action='store_true',
default=False,
help=('Wait for Mint accounts to sync, up to 5 minutes. By '
'default, do not wait for accounts to sync with the backing '
'financial institutions.'))
parser.add_argument(
'--mint_user_will_login',
action='store_true',
default=False,
help='If set, let the user log in on their own.')
parser.add_argument(
'--mint_intuit_account',
default=None,
help=('The intuit account to select if multiple are associated with '
'--mint_email.'))
# Mint API options:
default_session_path = os.path.join(
TAGGER_BASE_PATH, '.mintapi2020', 'session')
parser.add_argument(
'--session-path', nargs='?',
default=default_session_path,
help=('Directory to save browser session, including cookies. Use to '
'prevent repeated MFA prompts. Defaults to ~/.mintapi/session. '
'Set to None to use a temporary profile.'))
parser.add_argument(
'--headless',
action='store_true',
default=False,
help='Whether to execute chromedriver with no visible window.')
# Prefix customization:
parser.add_argument(
'--description_prefix_override', type=str,
help=('The prefix to use when updating the description for each Mint '
'transaction. By default, the \'Website\' value from Amazon '
'Items/Orders csv is used. If a string is provided, use '
'this instead for all matched transactions. If given, this is '
'used in conjunction with amazon_domains to detect if a '
'transaction has already been tagged by this tool.'))
parser.add_argument(
'--description_return_prefix_override', type=str,
help=('The prefix to use when updating the description for each Mint '
'refund. By default, the \'Website\' value from Amazon '
'Items/Orders csv is used with refund appended (e.g. '
'\'Amazon.com Refund: ...\'. If a string is provided here, use '
'this instead for all matched refunds. If given, this is '
'used in conjunction with amazon_domains to detect if a '
'refund has already been tagged by this tool.'))
parser.add_argument(
'--amazon_domains', type=str,
# From: https://en.wikipedia.org/wiki/Amazon_(company)#Website
default=('amazon.com,amazon.cn,amazon.in,amazon.co.jp,amazon.com.sg,'
'amazon.com.tr,amazon.fr,amazon.de,amazon.it,amazon.nl,'
'amazon.es,amazon.co.uk,amazon.ca,amazon.com.mx,'
'amazon.com.au,amazon.com.br'),
help=('A list of all valid Amazon domains/websites. These should '
'match the website column from Items/Orders and is used to '
'detect if a transaction has already been tagged by this tool.'))
# To itemize or not to itemize; that is the question:
parser.add_argument(
'--verbose_itemize', action='store_true',
help=('Itemize everything, instead of the default behavior, which is '
'to not itemize out shipping/promos/etc if '
'there is only one item per Mint transaction. Will also remove '
'free shipping.'))
parser.add_argument(
'--no_itemize', action='store_true',
help=('Do not split Mint transactions into individual items with '
'attempted categorization.'))
parser.add_argument(
'--num_updates', type=int,
default=0,
help=('Only send the first N updates to Mint (or print N updates at '
'dry run). If not present, all updates are sent or printed.'))
parser.add_argument(
'--retag_changed', action='store_true',
help=('For transactions that have been previously tagged by this '
'script, override any edits (like adjusting the category). This '
'feature works by looking for "Amazon.com: " at the start of a '
'transaction. If the user changes the description, then the '
'tagger won\'t know to leave it alone.'))
# Tagging options:
parser.add_argument(
'--no_tag_categories', action='store_true',
help=('Do not update Mint categories. This is useful as '
'Amazon doesn\'t provide the best categorization and it is '
'pretty common user behavior to manually change the categories. '
'This flag prevents tagger from wiping out that user work.'))
parser.add_argument(
'--do_not_predict_categories', action='store_true',
help=('Do not attempt to predict custom category tagging based on any '
'tagging overrides. By default (no arg) tagger will attempt to '
'find items that you have manually changed categories for.'))
parser.add_argument(
'--max_days_between_payment_and_shipping', type=int,
default=3,
help=('How many days are allowed to pass between when Amazon has '
'shipped an order and when the payment has posted to your '
'bank account (as per Mint\'s view).'))
parser.add_argument(
'--mint_input_merchant_filter', type=str,
default='amazon,amzn',
help=('Only consider Mint transactions that have one of these strings '
'in the merchant field. Case-insensitive comma-separated.'))
parser.add_argument(
'--mint_input_include_mmerchant', action='store_true',
help=('Consider using the mmerchant field when determining if '
'a transaction is an Amazon purchase. This can be necessary '
'when your bank renames transactions to "Debit card payment". '
'Mint sometimes auto-recovers these into "Amazon", and flipping '
'this flag will help match these. To know if you should use it, '
'find a transaction in the Mint tool, and click on the details. '
'Look for "Appears on your BANK ACCOUNT NAME statement as NOT '
'USEFUL NAME on DATE".'))
parser.add_argument(
'--mint_input_include_merchant', action='store_true',
help=('Consider using the merchant field when determining if '
'a transaction is an Amazon purchase. This is similar to '
'--mint_input_include_mmerchant but also includes any user '
'edits to the transaction name.'))
parser.add_argument(
'--mint_input_categories_filter', type=str,
help=('Only consider Mint transactions that match one of '
'the given categories here. Comma separated list of Mint '
'categories.'))
parser.add_argument(
'--save_pickle_backup', action='store_true',
default=False,
help=('Saves a backup of your Mint transactions to a python "Pickle" '
'file, just in case anything goes wrong or for rapid '
'development so you don\'t have to download from Mint every '
'time the tool is run. Off by default to prevent storing '
'sensitive information locally without a user knowing it.'))
parser.add_argument(
'--pickled_epoch', type=int,
help=('Do not fetch categories or transactions from Mint. Use this '
'pickled epoch instead. If coupled with --dry_run, no '
'connection to Mint is established.'))
default_pickle_path = os.path.join(TAGGER_BASE_PATH, 'Mint Backup')
parser.add_argument(
'--mint_pickle_location', type=str,
default=default_pickle_path,
help='Where to store the fetched Mint pickles (for backup).')
def define_gui_args(parser):
define_common_args(parser)
# TODO: Clean up and remove.
parser.add_argument(
'--prompt_retag',
default=False,
action='store_false',
help=('Unsupported for gui; but must be defined to false.'))
def define_cli_args(parser):
define_common_args(parser)
# Debugging/testing.
parser.add_argument(
'--dry_run', action='store_true',
help=('Do not modify Mint transaction; instead print the proposed '
'changes to console.'))
parser.add_argument(
'--skip_dry_print', action='store_true',
help=('Do not print dry run results (useful for development).'))
parser.add_argument(
'-V', '--version', action='store_true',
help='Shows the app version and quits.')
# Retag transactions that have already been tagged previously:
parser.add_argument(
'--prompt_retag', action='store_true',
help=('For transactions that have been previously tagged by this '
'script, override any edits (like adjusting the category) but '
'only after confirming each change. More gentle than '
'--retag_changed'))
parser.add_argument(
'--print_unmatched', action='store_true',
help=('At completion, print unmatched orders to help manual tagging.'))
```
#### File: mint-amazon-tagger/mintamazontagger/orderhistory.py
```python
import getpass
import logging
import os
from selenium.common.exceptions import NoSuchElementException, TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import Select
from selenium.webdriver.support.ui import WebDriverWait
from mintamazontagger.my_progress import no_progress_factory
from mintamazontagger.webdriver import get_element_by_id, get_element_by_xpath
logger = logging.getLogger(__name__)
ORDER_HISTORY_URL_VIA_SWITCH_ACCOUNT_LOGIN = (
'https://www.amazon.com/gp/navigation/redirector.html/ref=sign-in-redirect'
'?ie=UTF8&associationHandle=usflex¤tPageURL='
'https%3A%2F%2Fwww.amazon.com%2Fgp%2Fyourstore%2Fhome%3Fie%3DUTF8%26'
'ref_%3Dnav_youraccount_switchacct&pageType=&switchAccount=picker&'
'yshURL=https%3A%2F%2Fwww.amazon.com%2Fgp%2Fb2b%2Freports')
ORDER_HISTORY_REPORT_URL = 'https://www.amazon.com/gp/b2b/reports'
def fetch_order_history(args, webdriver_factory,
progress_factory=no_progress_factory):
email = get_email(args.amazon_email)
name = email.split('@')[0]
if args.items_csv and args.orders_csv:
return True
start_date = args.order_history_start_date
end_date = args.order_history_end_date
report_shortnames = ['Items', 'Orders', 'Refunds']
report_names = ['{} {} from {:%d %b %Y} to {:%d %b %Y}'.format(
name, t, start_date, end_date)
for t in report_shortnames]
report_types = ['ITEMS', 'SHIPMENTS', 'REFUNDS']
report_paths = [os.path.join(args.report_download_location, name + '.csv')
for name in report_names]
os.makedirs(args.report_download_location, exist_ok=True)
# Be lazy with getting the driver, as if no fetching is needed, then it's
# all good.
webdriver = None
for report_shortname, report_type, report_name, report_path in zip(
report_shortnames, report_types, report_names, report_paths):
if os.path.exists(report_path):
# Report has already been fetched! Woot
continue
# Report is not here. Go get it.
if not webdriver:
login_progress = progress_factory(
'Signing into Amazon.com to request order reports.', 0)
webdriver = webdriver_factory()
nav_to_amazon_and_login(webdriver, email, args.amazon_password)
login_progress.finish()
request_progress = progress_factory(
'Requesting {} report '.format(report_shortname), 0)
request_report(webdriver, report_name, report_type,
start_date, end_date)
request_progress.finish()
processing_progress = progress_factory(
'Waiting for {} report to be ready '.format(report_shortname), 0)
try:
wait_cond = EC.presence_of_element_located(
(By.XPATH, get_report_download_link_xpath(report_name)))
WebDriverWait(webdriver, args.order_history_timeout).until(
wait_cond)
processing_progress.finish()
except TimeoutException:
processing_progress.finish()
logger.critical("Cannot find download link after a minute!")
return False
download_progress = progress_factory(
'Downloading {} report '.format(report_shortname), 0)
download_report(webdriver, report_name, report_path)
download_progress.finish()
args.items_csv = open(report_paths[0], 'r', encoding='utf-8')
args.orders_csv = open(report_paths[1], 'r', encoding='utf-8')
args.refunds_csv = open(report_paths[2], 'r', encoding='utf-8')
return True
def get_email(email):
if not email:
email = input('Amazon email: ')
if not email:
logger.error('Empty Amazon email.')
exit(1)
return email
def get_password(password):
if not password:
password = <PASSWORD>('Amazon password: ')
if not password:
logger.error('Empty Amazon password.')
exit(1)
return password
def nav_to_amazon_and_login(webdriver, email, password):
logger.info('Starting login flow for Amazon.com')
webdriver.get(ORDER_HISTORY_URL_VIA_SWITCH_ACCOUNT_LOGIN)
webdriver.implicitly_wait(2)
# Go straight to the account switcher, and look for the given email.
# If present, click on it! Otherwise, click on "Add account".
desired_account_element = get_element_by_xpath(
webdriver,
"//div[contains(text(), '{}')]".format(email))
if desired_account_element:
desired_account_element.click()
webdriver.implicitly_wait(2)
# It's possible this account has already authed recently. If so, the
# next block will be skipped and the login is complete!
if not get_element_by_id(webdriver, 'report-confirm'):
webdriver.find_element_by_id('ap_password').send_keys(
get_password(password))
webdriver.find_element_by_name('rememberMe').click()
webdriver.find_element_by_id('signInSubmit').submit()
else:
# Cannot find the desired account in the switch. Log in via Add Account
webdriver.find_element_by_xpath(
'//div[text()="Add account"]').click()
webdriver.implicitly_wait(2)
webdriver.find_element_by_id('ap_email').send_keys(email)
# Login flow sometimes asks just for the email, then a
# continue button, then password.
if get_element_by_id(webdriver, 'continue'):
webdriver.find_element_by_id('continue').click()
webdriver.implicitly_wait(2)
webdriver.find_element_by_id('ap_password').send_keys(
get_password(password))
webdriver.find_element_by_name('rememberMe').click()
webdriver.find_element_by_id('signInSubmit').submit()
webdriver.implicitly_wait(2)
if not get_element_by_id(webdriver, 'report-confirm'):
logger.warning('Having trouble logging into Amazon. Please see the '
'browser and complete login within the next 5 minutes. '
'This script will continue automatically on success. '
'You may need to manually navigate to: {}'.format(
ORDER_HISTORY_REPORT_URL))
if get_element_by_id(webdriver, 'auth-mfa-otpcode'):
logger.warning('Hint: Looks like an auth challenge! Maybe check '
'your email')
try:
wait_cond = EC.presence_of_element_located((By.ID, 'report-confirm'))
WebDriverWait(webdriver, 60 * 5).until(wait_cond)
except TimeoutException:
logger.critical('Cannot complete Amazon login!')
return False
return True
def request_report(webdriver, report_name, report_type, start_date, end_date):
try:
# Do not request the report again if it's already available for
# download.
webdriver.find_element_by_xpath(
get_report_download_link_xpath(report_name))
return
except NoSuchElementException:
pass
Select(webdriver.find_element_by_id(
'report-type')).select_by_value(report_type)
Select(webdriver.find_element_by_id(
'report-month-start')).select_by_value(str(start_date.month))
Select(webdriver.find_element_by_id(
'report-day-start')).select_by_value(str(start_date.day))
Select(webdriver.find_element_by_id(
'report-year-start')).select_by_value(str(start_date.year))
Select(webdriver.find_element_by_id(
'report-month-end')).select_by_value(str(end_date.month))
Select(webdriver.find_element_by_id(
'report-day-end')).select_by_value(str(end_date.day))
Select(webdriver.find_element_by_id(
'report-year-end')).select_by_value(str(end_date.year))
webdriver.find_element_by_id('report-name').send_keys(report_name)
# Submit will not work as the input type is an image (nice Amazon)
webdriver.find_element_by_id('report-confirm').click()
def get_report_download_link_xpath(report_name):
return "//td[contains(text(), '{}')]/..//td/a[text()='Download']".format(
report_name)
def download_report(webdriver, report_name, report_path):
# 1. Find the report download link
report_url = None
try:
download_link = webdriver.find_element_by_xpath(
get_report_download_link_xpath(report_name))
report_url = download_link.get_attribute('href')
except NoSuchElementException:
logger.critical('Could not find the download link!')
exit(1)
# 2. Download the report to the AMZN Reports directory
response = webdriver.request('GET', report_url, allow_redirects=True)
response.raise_for_status()
with open(report_path, 'w', encoding='utf-8') as fh:
fh.write(response.text)
``` |
{
"source": "jprouty/safewayClipClip",
"score": 2
} |
#### File: safewayClipClip/safewayclipclip/args.py
```python
import os
BASE_PATH = os.path.join(os.path.expanduser("~"), 'SafewayClipClip')
def get_name_to_help_dict(parser):
return dict([(a.dest, a.help) for a in parser._actions])
def define_common_args(parser):
"""Parseargs shared between both CLI & GUI programs."""
# Amazon creds:
parser.add_argument(
'--safeway_username', default=None,
help=('Safeway username, either an e-mail or phone. If not provided, '
'you will be prompted for it.'))
parser.add_argument(
'--safeway_user_will_login',
action='store_true',
default=False,
help='If set, let the user log in on their own.')
default_session_path = os.path.join(BASE_PATH, 'ChromeSession')
parser.add_argument(
'--session-path', nargs='?',
default=default_session_path,
help=('Directory to save browser session, including cookies. Use to '
'prevent repeated MFA prompts. Defaults to a directory in your '
'home dir. Set to None to use a temporary profile.'))
parser.add_argument(
'--headless',
action='store_true',
default=False,
help='Whether to execute chromedriver with no visible window.')
parser.add_argument(
'-V', '--version', action='store_true',
help='Shows the app version and quits.')
```
#### File: safewayClipClip/safewayclipclip/webdriver.py
```python
import io
import logging
import os
import psutil
import re
import requests
import subprocess
from sys import platform
import zipfile
from selenium.common.exceptions import (
InvalidArgumentException, NoSuchElementException)
from selenium.webdriver import ChromeOptions
from seleniumrequests import Chrome
logger = logging.getLogger(__name__)
def get_webdriver(headless=False, session_path=None):
chrome_options = ChromeOptions()
if headless:
chrome_options.add_argument('headless')
chrome_options.add_argument('no-sandbox')
chrome_options.add_argument('disable-dev-shm-usage')
chrome_options.add_argument('disable-gpu')
if session_path is not None:
chrome_options.add_argument("user-data-dir=" + session_path)
home_dir = os.path.expanduser("~")
try:
return Chrome(options=chrome_options,
executable_path=get_stable_chrome_driver(home_dir))
except InvalidArgumentException as e:
if 'user data directory is already in use' not in e.msg:
logger.warning('reraising selenium exception')
raise e
logger.warn(
'Found existing webdriver from previous run, attempting to kill')
for proc in psutil.process_iter():
try:
if not proc.children():
continue
if not any(
[session_path in param for param in proc.cmdline()]):
continue
logger.info(
'Attempting to terminate process id {}'.format(proc.pid))
proc.terminate()
except (psutil.NoSuchProcess, psutil.AccessDenied,
psutil.ZombieProcess):
pass
return Chrome(options=chrome_options,
executable_path=get_stable_chrome_driver(home_dir))
def is_visible(element):
return element and element.is_displayed()
def get_element_by_id(driver, id):
try:
return driver.find_element_by_id(id)
except NoSuchElementException:
pass
return None
def get_element_by_name(driver, name):
try:
return driver.find_element_by_name(name)
except NoSuchElementException:
pass
return None
def get_element_by_xpath(driver, xpath):
try:
return driver.find_element_by_xpath(xpath)
except NoSuchElementException:
pass
return None
def get_element_by_link_text(driver, link_text):
try:
return driver.find_element_by_link_text(link_text)
except NoSuchElementException:
pass
return None
def get_elements_by_class_name(driver, class_name):
try:
return driver.find_elements_by_class_name(class_name)
except NoSuchElementException:
pass
return None
CHROME_DRIVER_BASE_URL = 'https://chromedriver.storage.googleapis.com/'
CHROME_DRIVER_DOWNLOAD_PATH = '{version}/chromedriver_{arch}.zip'
CHROME_DRIVER_LATEST_RELEASE = 'LATEST_RELEASE'
CHROME_ZIP_TYPES = {
'linux': 'linux64',
'linux2': 'linux64',
'darwin': 'mac64',
'win32': 'win32',
'win64': 'win32'
}
version_pattern = re.compile(
"(?P<version>(?P<major>\\d+)\\.(?P<minor>\\d+)\\."
"(?P<build>\\d+)\\.(?P<patch>\\d+))")
def get_chrome_driver_url(version, arch):
return CHROME_DRIVER_BASE_URL + CHROME_DRIVER_DOWNLOAD_PATH.format(
version=version, arch=CHROME_ZIP_TYPES.get(arch))
def get_chrome_driver_major_version_from_executable(local_executable_path):
# Note; --version works on windows as well.
# check_output fails if running from a thread without a console on win10.
# To protect against this use explicit pipes for STDIN/STDERR.
# See: https://github.com/pyinstaller/pyinstaller/issues/3392
with open(os.devnull, 'wb') as devnull:
version = subprocess.check_output(
[local_executable_path, '--version'],
stderr=devnull,
stdin=devnull)
version_match = version_pattern.search(version.decode())
if not version_match:
return None
return version_match.groupdict()['major']
def get_latest_chrome_driver_version():
"""Returns the version of the latest stable chromedriver release."""
latest_url = CHROME_DRIVER_BASE_URL + CHROME_DRIVER_LATEST_RELEASE
latest_request = requests.get(latest_url)
if latest_request.status_code != 200:
raise RuntimeError(
'Error finding the latest chromedriver at {}, status = {}'.format(
latest_url, latest_request.status_code))
return latest_request.text
def get_stable_chrome_driver(download_directory=os.getcwd()):
chromedriver_name = 'chromedriver'
if platform in ['win32', 'win64']:
chromedriver_name += '.exe'
local_executable_path = os.path.join(download_directory, chromedriver_name)
latest_chrome_driver_version = get_latest_chrome_driver_version()
version_match = version_pattern.match(latest_chrome_driver_version)
latest_major_version = None
if not version_match:
logger.error("Cannot parse latest chrome driver string: {}".format(
latest_chrome_driver_version))
else:
latest_major_version = version_match.groupdict()['major']
if os.path.exists(local_executable_path):
major_version = get_chrome_driver_major_version_from_executable(
local_executable_path)
if major_version == latest_major_version or not latest_major_version:
# Use the existing chrome driver, as it's already the latest
# version or the latest version cannot be determined at the moment.
return local_executable_path
logger.info('Removing old version {} of Chromedriver'.format(
major_version))
os.remove(local_executable_path)
if not latest_chrome_driver_version:
logger.critical(
'No local chrome driver found and cannot parse the latest chrome '
'driver on the internet. Please double check your internet '
'connection, then ask for assistance on the github project.')
return None
logger.info('Downloading version {} of Chromedriver'.format(
latest_chrome_driver_version))
zip_file_url = get_chrome_driver_url(
latest_chrome_driver_version, platform)
request = requests.get(zip_file_url)
if request.status_code != 200:
raise RuntimeError(
'Error finding chromedriver at {}, status = {}'.format(
zip_file_url, request.status_code))
zip_file = zipfile.ZipFile(io.BytesIO(request.content))
zip_file.extractall(path=download_directory)
os.chmod(local_executable_path, 0o755)
return local_executable_path
``` |
{
"source": "jprov410/mqds",
"score": 3
} |
#### File: run/analysis/mqds_class.py
```python
import os
import glob
import numpy as np
__all__ = ['LinearResponse','ThirdOrderResponse','DensityMatrix','SpectralDensity']
class DensityMatrix(object):
"""
class for mqds density matrix data
"""
def __init__(self, method):
"""
takes reduced density matrix method as input
e.g.) pldm, sqc, ipldm
"""
prefix = method + '.'
self.time = None
size = int( np.sqrt( len( glob.glob( prefix+'*' ) ) ) )
for filename in os.listdir('.'):
if filename.startswith(prefix):
if self.time is None:
with open(filename, 'r') as f:
self.time = []
for line in f:
self.time.append( float( line.split()[0] ) )
break
self.time = np.array( self.time )
self.rho = np.empty([size,size,len(self.time)], dtype=complex)
for filename in os.listdir('.'):
if filename.startswith(prefix):
suffix = filename.split('.')[1]
index1, index2 = suffix.split('-')
index1, index2 = int(index1), int(index2)
self.rho[index1-1,index2-1] = self.matrix_element( filename )
self.shape = self.rho.shape
def matrix_element(self, filename):
"""
Takes filename as an argument to retrieve data for each density matrix element
"""
real, imag = [], []
with open(filename,'r') as f:
for line in f:
real.append( float( line.split()[1] ) )
imag.append( float( line.split()[2] ) )
real = np.array( real )
imag = np.array( imag )
return real + 1j * imag
def __getitem__(self, i):
return self.rho[i]
"""
Response is the superclass for LinearResponse and ThirdOrderResponse
"""
class Response(object):
"""
Superclass for response functions calculated with mqds
"""
def ___init___(self):
self.type = 'Response Function'
def wrange(self, wmin=0.0, wmax=500.0, wpts=250):
"""
defines frequency range over which to Fourier transform the
linear response function
"""
w = []
for i in range(0, wpts+1):
w.append( float( i * (wmax - wmin) / wpts + wmin ) )
w = np.array( w )
return w
class LinearResponse(Response):
"""
class for mqds linear response function data
"""
def __init__(self, method):
"""
takes method used to compute the response function
as an argument
"""
infile = method + '_linrespfunc.out'
self.time, self.real, self.imag = [], [], []
with open(infile, 'r') as f:
for line in f:
self.time.append( float( line.split()[0] ) )
self.real.append( float( line.split()[1] ) )
self.imag.append( float( line.split()[2] ) )
self.time = np.array( self.time )
self.real = np.array( self.real )
self.imag = np.array( self.imag )
class ThirdOrderResponse(Response):
"""
class for mqds nonlinear response function data
"""
def __init__(self, method = 'pldm', signal = 'rephasing'):
"""
takes filename that contains the response function
as an agument
"""
prefix = method + '_nonlin'
self.time1, self.time2, self.time3 = [], [], []
self.real, self.imag = [], []
with open(infile, 'r') as f:
for line in f:
self.time1.append( float( line.split()[0] ) )
self.time2.append( float( line.split()[1] ) )
self.time3.append( float( line.split()[2] ) )
self.real.append( float( line.split()[3] ) )
self.imag.append( float( line.split()[4] ) )
self.time1 = np.array( self.time1 )
self.time2 = np.array( self.time2 )
self.time3 = np.array( self.time3 )
self.real = np.array( self.real )
self.imag = np.array( self.imag )
class SpectralDensity(object):
"""
class for spectral densities for the bath
types - obo, ohmic, lognorm
"""
def __init__(self, type= 'obo', wmin=0.0, wmax=2000.0, wpts=2000, reorg=20.0, wc=50.0):
self.type = type
sdtypes = ['obo','ohmic']
if self.type in sdtypes:
self.freq = self.omega(wmin, wmax, wpts)
self.sd = self.buildsd(reorg, wc)
else:
print('only obo (overdamped brownian oscillator) and ohmic')
def omega(self, wmin, wmax, wpts):
"""
defines frequency range over which to Fourier transform the
linear response function
"""
w = []
for i in range(0, wpts+1):
w.append( float( i * (wmax - wmin) / wpts + wmin ) )
w = np.array( w )
return w
def buildsd(self, reorg, wc):
"""
function that builds the spectral density
"""
if self.type == 'obo':
return self.obo_sd(reorg,wc)
elif self.type == 'ohmic':
return self.ohmic_sd(reorg,wc)
else:
print('There is no spectral density function for ' + self.type)
def obo_sd(self, reorg, wc):
"""
overdamped brownian oscillator spectral density function
"""
obo = []
for w in self.freq:
obo.append( 2.0 * reorg * ( (w/wc) / (1.0 + (w/wc)**2) ) )
obo = np.array(obo)
return obo
def ohmic_sd(self, reorg, wc):
"""
overdamped brownian oscillator spectral density function
"""
ohmic = []
for w in self.freq:
ohmic.append( np.pi * reorg * w / wc * np.exp(-w/wc) )
ohmic = np.array(ohmic)
return ohmic
def info(self):
print('type = ' + self.type)
print('wrange = ' + str(self.freq[0]) + ' to ' + str(self.freq[-1]) + ' cm-1 (dw = ' + str(self.freq[1] - self.freq[0]) + ')')
``` |
{
"source": "jprovaznik/os-cloud-management",
"score": 2
} |
#### File: utils/tests/test_environment.py
```python
import fixtures
import mock
import testtools
from os_cloud_management.cmd.utils import environment
from os_cloud_management import exception
from os_cloud_management.tests import base
class CMDEnviromentTest(base.TestCase):
def setUp(self):
super(CMDEnviromentTest, self).setUp()
for key in ('OS_AUTH_URL', 'OS_PASSWORD', 'OS_TENANT_NAME',
'OS_USERNAME', 'OS_CACERT'):
fixture = fixtures.EnvironmentVariable(key)
self.useFixture(fixture)
@mock.patch.dict('os.environ', {})
def test_ensure_environment_missing_all(self):
message = ("OS_AUTH_URL, OS_PASSWORD, OS_TENANT_NAME, OS_USERNAME "
"environment variables are required to be set.")
with testtools.ExpectedException(exception.MissingEnvironment,
message):
environment._ensure()
@mock.patch.dict('os.environ', {'OS_PASSWORD': 'a', 'OS_AUTH_URL': 'a',
'OS_TENANT_NAME': 'a'})
def test_ensure_environment_missing_username(self):
message = "OS_USERNAME environment variable is required to be set."
with testtools.ExpectedException(exception.MissingEnvironment,
message):
environment._ensure()
@mock.patch.dict('os.environ', {'OS_PASSWORD': 'a', 'OS_AUTH_URL': 'a',
'OS_TENANT_NAME': 'a', 'OS_USERNAME': 'a'})
def test_ensure_environment_missing_none(self):
self.assertIs(None, environment._ensure())
``` |
{
"source": "jprovaznik/tripleo-common",
"score": 2
} |
#### File: cmd/utils/_clients.py
```python
import logging
import os
from tripleo_common.utils import clients
LOG = logging.getLogger(__name__)
def _get_client_args():
return (os.environ["OS_USERNAME"],
os.environ["OS_PASSWORD"],
os.environ["OS_TENANT_NAME"],
os.environ["OS_AUTH_URL"],
os.environ.get("OS_CACERT"))
def get_heat_client():
return clients.get_heat_client(*_get_client_args())
def get_tuskar_client():
return clients.get_tuskar_client(*_get_client_args())
```
#### File: tripleo-common/tripleo_common/scales.py
```python
import json
import logging
import time
import libutils
from tuskarclient.common import utils as tuskarutils
LOG = logging.getLogger(__name__)
class ScaleManager:
def __init__(self, tuskarclient, heatclient, plan_id=None, stack_id=None):
self.tuskarclient = tuskarclient
self.heatclient = heatclient
self.stack_id = stack_id
self.plan = tuskarutils.find_resource(self.tuskarclient.plans, plan_id)
def scaleup(self, role, num):
param_name = "{0}::count".format(role)
self.plan = self.tuskarclient.plans.patch(
self.plan.uuid, [{'name': '{0}::count'.format(role),
'value': num}])
params = libutils.heat_params_from_templates(
self.tuskarclient.plans.templates(self.plan.uuid))
stack = self.heatclient.stacks.update(self.stack_id, **params)
``` |
{
"source": "jproyo/diefpy",
"score": 3
} |
#### File: diefpy/diefpy/dief.py
```python
import numpy as np
import matplotlib.pyplot as plt
def dieft(inputtrace, inputtest, t=-1.0):
"""
This function computes the dief@t metric.
:param inputtrace: Dataframe with the answer trace. Attributes of the dataframe: test, approach, answer, time.
:type inputtrace: numpy.ndarray
:param inputtest: Specifies the specific test to analyze from the answer trace.
:type inputtest: str
:param t: Point in time to compute dieft. By default, the function computes the minimum of the execution time among the approaches in the answer trace.
:type t: float
:return: Dataframe with the dief@t values for each approach. Attributes of the dataframe: test, approach, dieft.
:rtype: numpy.ndarray
"""
# Initialize output structure.
df = np.empty(shape=0, dtype=[('test', basestring),
('approach', basestring),
('dieft', float)])
# Obtain test and approaches to compare.
results = inputtrace[inputtrace['test'] == inputtest]
approaches = set(results['approach'])
# Obtain t per approach.
if t == -1:
n = []
for a in approaches:
x = results[results['approach'] == a]
if len(x) == 1 and x['answer'] == 0:
n.append(x[x['answer'] == 0]['time'][0])
else:
n.append(x[x['answer'] == len(x)]['time'][0])
t = max(n)
# Compute dieft per approach.
for a in approaches:
dief = 0
subtrace = results[(results['approach'] == a) & (results['time'] <= t)]
com = np.array([(inputtest, a, len(subtrace), t)],
dtype=[('test', basestring),
('approach', basestring),
('answer', int),
('time', float)])
if len(subtrace) == 1 and subtrace['answer'] == 0:
pass
else:
subtrace = np.concatenate((subtrace, com), axis=0)
if len(subtrace) > 1:
dief = np.trapz(subtrace['answer'], subtrace['time'])
res = np.array([(inputtest, a, dief)],
dtype=[('test', basestring),
('approach', basestring),
('dieft', float)])
df = np.append(df, res, axis=0)
return df
def diefk(inputtrace, inputtest, k=-1):
"""
This function computes the dief@k metric at a given k (number of answers).
:param inputtrace: Dataframe with the answer trace. Attributes of the dataframe: test, approach, answer, time.
:type inputtrace: numpy.ndarray
:param inputtest: Specifies the specific test to analyze from the answer trace.
:type inputtest: str
:param k: Number of answers to compute diefk. By default, the function computes the minimum of the total number of answers produced by the approaches.
:type k: int
:return: Dataframe with the dief@k values for each approach. Attributes of the dataframe: test, approach, diefk.
:rtype: numpy.ndarray
"""
# Initialize output structure.
df = np.empty(shape=0, dtype=[('test', basestring),
('approach', basestring),
('diefk', float)])
# Obtain test and approaches to compare.
results = inputtrace[inputtrace['test'] == inputtest]
approaches = set (inputtrace['approach'])
# Obtain k per approach.
if k == -1:
n = []
for a in approaches:
x = results[results['approach'] == a]
n.append(len(x))
k = min(n)
# Compute diefk per approach.
for a in approaches:
dief = 0
subtrace = results[(results['approach'] == a) & (results['answer'] <= k)]
if len(subtrace) > 1:
dief = np.trapz(subtrace['answer'], subtrace['time'])
res = np.array([(inputtest, a, dief)],
dtype=[('test', basestring),
('approach', basestring),
('diefk', float)])
df = np.append(df, res, axis=0)
return df
def diefk2(inputtrace, inputtest, kp=-1.0):
"""
This function computes the dief@k metric at a given kp (percentage of answers).
:param inputtrace: Dataframe with the answer trace. Attributes of the dataframe: test, approach, answer, time.
:type inputtrace: numpy.ndarray
:param inputtest: Specifies the specific test to analyze from the answer trace.
:type inputtest: str
:param kp: Ratio of answers to compute diefk (kp in [0.0;1.0]). By default and when kp=1.0, this function behaves the same as diefk. It computes the kp portion of of minimum of of number of answers produced by the approaches.
:type kp: float
:return: Dataframe with the dief@k values for each approach. Attributes of the dataframe: test, approach, diefk.
:rtype: numpy.ndarray
"""
# Initialize output structure.
df = np.empty(shape=0, dtype=[('test', basestring), ('approach', basestring), ('diefk', float)])
# Obtain test and approaches to compare.
results = inputtrace[inputtrace['test'] == inputtest]
approaches = set(inputtrace['approach'])
# Obtain k per approach.
n = []
for a in approaches:
x = results[results['approach'] == a]
n.append(len(x))
k = min(n)
if kp > -1:
k = k * kp
# Compute diefk.
df = diefk(inputtrace, inputtest, k)
return df
def plot_answer_trace(inputtrace, inputtest):
"""
This function plots the answer trace of a given test.
:param inputtrace: Dataframe with the answer trace. Attributes of the dataframe: test, approach, answer, time.
:type inputtrace: numpy.ndarray
:param inputtest: Specifies the specific test to analyze from the answer trace.
:type inputtest: str
:return: Plot of the answer traces of each approach when evaluating the input test.
:rtype: matplotlib.pyplot.plot
"""
# Obtain test and approaches to compare.
results = inputtrace[inputtrace['test'] == inputtest]
approaches = set(inputtrace['approach'])
# Generate plot.
for a in approaches:
subtrace = results[results['approach'] == a]
plt.plot(subtrace['time'], subtrace['answer'], label=a, marker='o', markeredgewidth=0.0, linestyle='None')
plt.xlabel('Time')
plt.ylabel('# Answers Produced')
plt.legend(loc='upper left')
plt.show()
return plt
def load_trace(filename):
"""
This function reads answer traces from a CSV file.
:param filename: Path to the CSV file that contains the answer traces. Attributes of the file specified in the header: test, approach, answer, time.
:type filename: str
:return: Dataframe with the answer trace. Attributes of the dataframe: test, approach, answer, time.
:rtype: numpy.ndarray
"""
# Conversion of columns to datatypes
d = {'test': basestring,
'approach': basestring,
'answer': int,
'time': float}
# Loading data.
df = np.genfromtxt(filename, delimiter=',', names=True, dtype=[basestring, basestring, basestring, basestring])
# Converting data to appropriate datatypes.
dtype_new = []
for elem in df.dtype.names:
dtype_new.append((elem, d[elem]))
df = np.asarray(df, dtype=dtype_new)
# Return dataframe in order.
return df[['test', 'approach', 'answer', 'time']]
``` |
{
"source": "jproyo/upc-miri-tfm",
"score": 2
} |
#### File: experiments/diepfy/experiments.py
```python
import diefpy.dief as diefpy
def plot_experiments():
root_path = '/Users/juan/Projects/upc/upc-miri-tfm/data/experiments'
# folders = ["moreno_crime","dbpedia","opsahl-ucforum","wang-amazon"]
folders = ["wang-amazon"]
colors = ["#4287f5","#19E67C","#1A211E","#244A4F","#0fC4DB","#8F2E73","#B36224","#ECC30B","#D56062"]
for f in folders:
traces = diefpy.load_trace(f'{root_path}/{f}/results/results.csv')
diefpy.plot_answer_trace(traces, f, colors).show()
metrics = diefpy.load_metrics(f"{root_path}/{f}/results/metrics.csv")
exp1 = diefpy.experiment1(traces, metrics)
diefpy.plotExperiment1Test(exp1, f, colors).show()
def to_scenario_id(name):
scenarios = {
'vertex-lower-low': 'VL-L',
'vertex-lower-medium': 'VL-M',
'vertex-lower-high': 'VL-H',
'vertex-upper-low': 'VU-L',
'vertex-upper-medium': 'VU-M',
'vertex-upper-high': 'VU-H',
'edge-low': 'E-L',
'edge-medium': 'E-M',
'edge-high': 'E-H',
}
return scenarios[name]
def dief_t_k():
root_path = '/Users/juan/Projects/upc/upc-miri-tfm/data/experiments'
folders = ["moreno_crime","dbpedia","opsahl-ucforum","wang-amazon"]
#folders = ["wang-amazon"]
colors = ["#4287f5","#19E67C","#1A211E","#244A4F","#0fC4DB","#8F2E73","#B36224","#ECC30B","#D56062"]
for f in folders:
traces = diefpy.load_trace(f'{root_path}/{f}/results/results.csv')
print("dief@t", f)
for (_, s, n) in diefpy.dieft(traces, f):
print('&', to_scenario_id(s), '&', '$'+"{:.2E}".format(round(n,2))+'$', '\\\\')
print('')
for f in folders:
traces = diefpy.load_trace(f'{root_path}/{f}/results/results.csv')
print("dief@k", f)
for (_, s, n) in diefpy.diefk(traces, f):
print('&', to_scenario_id(s), '&', '$'+"{:.2E}".format(round(n,2))+'$', '\\\\')
dief_t_k()
# plot_experiments()
``` |
{
"source": "jprsurendra/assignment",
"score": 3
} |
#### File: apis/candidate/serializers.py
```python
from rest_framework import serializers
from apis.candidate.models import Candidate
from apis.common.serializers import CitySerializer
class CandidateSerializer(serializers.ModelSerializer):
city_id = serializers.IntegerField()
class Meta:
model = Candidate
fields = ['candidate_name', 'address', 'city_id', 'owner_info', 'employee_size' ]
class CandidateReadSerializer(serializers.ModelSerializer):
candidate_name = serializers.SerializerMethodField()
city = CitySerializer()
class Meta:
model = Candidate
fields = '__all__'
def get_candidate_name(self, obj):
if obj.candidate_name:
return obj.candidate_name.title()
return None
``` |
{
"source": "jprsurendra/core_python",
"score": 4
} |
#### File: core/sorting/main.py
```python
print("Always executed")
class Student:
def __init__(self, name, age):
self.__name = name
self.__age = age
'''
# When you try to sort the list of Student's object, At a minimum, you should specify __eq__ and __lt__ operations.
Then just use sorted(<<list_of_objects>>) or <<list_of_objects>>.sort()
Otherwise Exception will occur:
TypeError: '<' not supported between instances of 'Student' and 'Student'
'''
def __eq__(self, other):
return ( self.__class__ == other.__class__ and
self.__name == other.name and self.__age == other.__age )
def __lt__(self, other):
return self.__age < other.__age
'''
def __ne__(self, other):
return self.__age != other.__age
def __gt__(self, other):
return self.__age > other.__age
def __le__(self, other):
return self.__age <= other.__age
def __ge__(self, other):
return self.__age >= other.__age
'''
def __hash__(self):
return hash((self.__name, self.__age))
def __str__(self):
return "Student: {name:%s, age:%s}"%(self.__name,self.__age)
def __repr__(self):
return f"Student<<{self.__hash__()}>>(name:{self.__name}, age:{self.__age})"
# return f"Student<<hash(self)>>(name:{self.__name}, age:{self.__age})"
#invoke __str__ and __repr__
# print(str(D1))
# print(repr(D1))
if __name__ == "__main__":
print("Executed when invoked directly")
lst= []
lst.append(Student("Rohit", 10))
lst.append(Student("Surendra", 11))
lst.append(Student("Manish", 8))
lst.append(Student("Pooja", 6))
lst.append(Student("Bhuwan", 25))
print(lst)
lst2 = sorted(lst)
print(lst2)
lst.sort()
print(lst)
else:
print("Executed when imported")
'''
Output:
Always executed
Executed when invoked directly
[Student<<-5241922802715972405>>(name:AAAA, age:10), Student<<-3132639429598342108>>(name:AAAA, age:11), Student<<-6856489529845274464>>(name:AAAA, age:8), Student<<-4072698873990208395>>(name:AAAA, age:6), Student<<6374970296428362712>>(name:AAAA, age:25)]
[Student<<-4072698873990208395>>(name:AAAA, age:6), Student<<-6856489529845274464>>(name:AAAA, age:8), Student<<-5241922802715972405>>(name:AAAA, age:10), Student<<-3132639429598342108>>(name:AAAA, age:11), Student<<6374970296428362712>>(name:AAAA, age:25)]
None
'''
```
#### File: core_python/oops/aggregation_and_composition.py
```python
class Salary:
def __init__(self, pay):
self.pay = pay
def get_total(self):
return (self.pay * 12)
'''
Example of Aggregation in Python
Aggregation is a week form of composition. If you delete the container object contents objects can live without container object.
'''
class Employee:
def __init__(self, pay, bonus):
self.pay = pay
self.bonus = bonus
def annual_salary(self):
return "Total: " + str(self.pay.get_total() + self.bonus)
'''
Example of Composition in Python
In composition one of the classes is composed of one or more instance of other classes. In other words one class is container and other class is content and if you delete the container object then all of its contents objects are also deleted.
'''
class ComposedEmployee:
def __init__(self, pay, bonus):
self.pay = pay
self.bonus = bonus
self.obj_salary = Salary(self.pay)
def annual_salary(self):
return "Total: " + str(self.obj_salary.get_total() + self.bonus)
def main_aggregation():
obj_sal = Salary(600)
obj_emp = Employee(obj_sal, 500)
print(obj_emp.annual_salary())
def main_composition():
obj_emp = ComposedEmployee(600, 500)
print(obj_emp.annual_salary())
if __name__ == "__main__":
main_aggregation()
main_composition()
```
#### File: core_python/oops/magic_methods.py
```python
class IntegerNumber:
def __init__(self, num=0):
if type(num)== int:
self.__num = num
else:
self.__num = 0
raise ValueError("Argument 'num' must be an Integer Number")
def __str__(self):
return f'<<IntegerNumber>> value is {str(self.__num)}'
def __repr__(self):
return f'<<IntegerNumber>>(value is {str(self.__num)})'
'''
Magic Method used for overload Binary Operator Example
-------------- --------------------------------- -------
__add__ + object.__add__(self, other)
__sub__ - object.__sub__(self, other)
__mul__ * object.__mul__(self, other)
__floordiv__ // object.__floordiv__(self, other)
__truediv__ / object.__truediv__(self, other)
__mod__ % object.__mod__(self, other)
__pow__ ** object.__pow__(self, other[, modulo])
__lshift__ << object.__lshift__(self, other)
__rshift__ >> object.__rshift__(self, other)
__and__ & object.__and__(self, other)
__xor__ ^ object.__xor__(self, other)
__or__( | object.__or__(self, other)
'''
def __add__(self, other):
if type(other) == IntegerNumber:
return IntegerNumber(self.__num + other.__num)
elif type(other) == int:
return IntegerNumber(self.__num + other)
else:
raise ValueError("Argument must be an object of class IntegerNumber or int")
'''
Extended Assignments Operator Magic Method
+= object.__iadd__(self, other)
-= object.__isub__(self, other)
*= object.__imul__(self, other)
/= object.__idiv__(self, other)
//= object.__ifloordiv__(self, other)
%= object.__imod__(self, other)
**= object.__ipow__(self, other[, modulo])
<<= object.__ilshift__(self, other)
>>= object.__irshift__(self, other)
&= object.__iand__(self, other)
^= object.__ixor__(self, other)
|= object.__ior__(self, other)
'''
def __iadd__(self, other):
if type(other) == IntegerNumber:
self.__num = self.__num + other.__num
return self
elif type(other) == int:
self.__num = self.__num + other
return self
else:
raise ValueError("Argument must be an object of class IntegerNumber or int")
'''
Unary Operators Operator Magic Method
- object.__neg__(self)
+ object.__pos__(self)
abs() object.__abs__(self)
~ object.__invert__(self)
complex() object.__complex__(self)
int() object.__int__(self)
long() object.__long__(self)
float() object.__float__(self)
oct() object.__oct__(self)
hex() object.__hex__(self
'''
def __int__(self):
return int(self.__num)
'''
Comparison Operators Magic Method
< object.__lt__(self, other)
<= object.__le__(self, other)
== object.__eq__(self, other)
!= object.__ne__(self, other)
>= object.__ge__(self, other)
> object.__gt__(self, other)
'''
def __eq__(self, other):
if type(other) == IntegerNumber:
return True if self.__num == other.__num else False
elif type(other) == int:
return True if self.__num == other else False
else:
return False
def main():
num1 = IntegerNumber(10)
print(num1)
num2 = IntegerNumber(20)
print(num2)
num3 = num1 + num2
print(num3)
num4 = int(num1)
print(num4)
num3 +=num4
print(num3)
if(num4 == num1):
print("num1 and num4 are same value")
else:
print("num1 and num4 are not same value")
if __name__ == "__main__":
main()
``` |
{
"source": "jprsurendra/CowinSlotAvailability",
"score": 3
} |
#### File: slot_search/slot_search/CowinSlotAvailability.py
```python
import requests
import json
from datetime import datetime
import datetime
import sched, time
from datetime import date
from home.models import District, Center, Slot
class CowinSlotAvailability:
def __init__(self, chk_district=506, min_age_limit = 18, chk_date = None):
self.chk_district = chk_district
self.min_age_limit = min_age_limit if min_age_limit==18 or min_age_limit==45 else 18
if chk_date:
self.chk_date = chk_date
else:
today = date.today()
self.chk_date = today.strftime("%d-%m-%Y") # DD-MM-YYYY
self.task = sched.scheduler(time.time, time.sleep)
def get_db_center(self, json_center):
center_id = json_center.get('center_id', None)
try:
center = Center.objects.get(center_id=center_id)
return center
except Center.DoesNotExist:
dict_center={
'district_id': self.district.id,
'center_id': json_center.get('center_id', None),
'center_name': json_center.get('name', None),
'address': json_center.get('address', None),
'block_name': json_center.get('block_name', None),
'pincode': json_center.get('pincode', None),
'state_name': json_center.get('state_name', None),
'fee_type': json_center.get('fee_type', None),
}
center= Center.objects.create(**dict_center)
return center
def check_availability(self, sc):
# chk_date = "20-05-2021"
# chk_district = 506 # "Jaipur II"
# min_age_limit = 18
browser_header = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}
URL = "https://cdn-api.co-vin.in/api/v2/appointment/sessions/public/calendarByDistrict?district_id={dist}&date={cdate}".format(dist = self.chk_district, cdate = self.chk_date)
availabile_centers = []
if(self.chk_date == '' or self.chk_district == ''):
raise ValueError("Input Parameter missing")
format = "%d-%m-%Y"
try:
datetime.datetime.strptime(self.chk_date, format)
except:
raise ValueError("Incorrect date format, should be DD-MM-YYYY")
try:
self.district = District.objects.get(id=self.chk_district)
except District.DoesNotExist:
raise ValueError("Incorrect district")
print("===============================================================================")
now = datetime.datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
print(" .... Search started ..... ", dt_string)
response = requests.get(URL, headers=browser_header)
if (response.ok) and ('centers' in json.loads(response.text)):
resp_json = json.loads(response.text)['centers']
for api_center in resp_json:
db_center = self.get_db_center(json_center=api_center)
for api_session in api_center['sessions']:
db_slot = {}
center = {}
if(api_session['available_capacity'] > 0 and api_session['min_age_limit'] == self.min_age_limit and api_session['date'] == self.chk_date):
db_slot = {
'center_id': db_center.id,
'min_age_limit': api_session.get('min_age_limit', None),
'available_capacity':api_session.get('available_capacity', None),
'available_capacity_dose1':api_session.get('available_capacity_dose1', None),
'available_capacity_dose2':api_session.get('available_capacity_dose2', None),
'date': api_session.get('date', None),
'vaccine': api_session.get('vaccine', None),
'created_on': now
}
Slot.objects.create(**db_slot)
center['slots'] = api_session['slots']
center['date'] = api_session['date']
center['capacity'] = api_session['available_capacity']
center['cid'] = api_center['center_id']
center['name'] = api_center['name']
center['address'] = api_center['address']
if bool(center):
availabile_centers.append(center)
print("---------------------------------------------------------------")
print("slots: ", str(','.join(center['slots'])))
print("date: ", str(center['date']))
print("capacity: ", str(center['capacity']))
print("cid: ", str(center['cid']))
print("name: ", str(center['name']))
print("address: ", str(center['address']))
# return True if len(availabile_centers)>0 else False
if availabile_centers and len(availabile_centers)>0:
return True
else:
self.task.enter(120, 1, self.check_availability, (sc,))
def do_start(self):
self.task.enter(5, 1, self.check_availability, (self.task,))
self.task.run()
# if __name__ == "__main__":
# slot_finder = CowinSlotAvailability(chk_date = "22-05-2021") # chk_district=571
# slot_finder.do_start()
```
#### File: slot_search/slot_search/views.py
```python
from home.models import District
import threading
class CowinSlotAvailabilityThread (threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
from slot_search.CowinSlotAvailability import CowinSlotAvailability
slot_finder = CowinSlotAvailability(chk_date="22-05-2021") # chk_district=571
slot_finder.do_start()
def load_on_startup():
pass
# thread1 = CowinSlotAvailabilityThread()
# thread1.start()
# districts=[
# {
# "id": 507,
# "district_name": "Ajmer"},
# {
# "id": 512,
# "district_name": "Alwar"},
# {
# "id": 519,
# "district_name": "Banswara"},
# {
# "id": 516,
# "district_name": "Baran"},
# {
# "id": 528,
# "district_name": "Barmer"},
# {
# "id": 508,
# "district_name": "Bharatpur"},
# {
# "id": 523,
# "district_name": "Bhilwara"},
# {
# "id": 501,
# "district_name": "Bikaner"},
# {
# "id": 514,
# "district_name": "Bundi"},
# {
# "id": 521,
# "district_name": "Chittorgarh"},
# {
# "id": 530,
# "district_name": "Churu"},
# {
# "id": 511,
# "district_name": "Dausa"},
# {
# "id": 524,
# "district_name": "Dholpur"},
# {
# "id": 520,
# "district_name": "Dungarpur"},
# {
# "id": 517,
# "district_name": "Hanumangarh"},
# {
# "id": 505,
# "district_name": "Jaipur I"},
# {
# "id": 506,
# "district_name": "Jaipur II"},
# {
# "id": 527,
# "district_name": "Jaisalmer"},
# {
# "id": 533,
# "district_name": "Jalore"},
# {
# "id": 515,
# "district_name": "Jhalawar"},
# {
# "id": 510,
# "district_name": "Jhunjhunu"},
# {
# "id": 502,
# "district_name": "Jodhpur"},
# {
# "id": 525,
# "district_name": "Karauli"},
# {
# "id": 503,
# "district_name": "Kota"},
# {
# "id": 532,
# "district_name": "Nagaur"},
# {
# "id": 529,
# "district_name": "Pali"},
# {
# "id": 522,
# "district_name": "Pratapgarh"},
# {
# "id": 518,
# "district_name": "Rajsamand"},
# {
# "id": 534,
# "district_name": "Sawai Madhopur"},
# {
# "id": 513,
# "district_name": "Sikar"},
# {
# "id": 531,
# "district_name": "Sirohi"},
# {
# "id": 509,
# "district_name": "Sri Ganganagar"},
# {
# "id": 526,
# "district_name": "Tonk"},
# {
# "id": 504,
# "district_name": "Udaipur"}]
# save_district_list = []
# for district in districts:
# save_district_list.append(District(**district))
# District.objects.bulk_create(save_district_list)
``` |
{
"source": "jprsurendra/DjangoProject",
"score": 3
} |
#### File: apis/noteapi/models.py
```python
from django.db import models
# Create your models here.
class Note(models.Model):
title = models.CharField(max_length=50)
text = models.CharField(max_length=255)
class Meta:
db_table = 'tbl_notes'
def __str__(self):
return self.title
``` |
{
"source": "jprsurendra/EmployeeProjectPanel",
"score": 3
} |
#### File: djxml/build/rst_converter.py
```python
import os
import re
import subprocess
import distutils
class PandocRSTConverter(object):
replacements = (
# Remove lists with internal links (effectively, the TOC in a README).
# We remove the TOC because the anchors in a github markdown file
# are broken when the rst is displayed on pypi
(re.compile(ur'(?ms)^\- ((?:.(?!\n[\n\-]))*?\`__\n)'), u''),
# (shorten link terminator from two underscores to one): `__ => `_
(re.compile(ur'\`__'), u'`_'),
# Replace, for example:
#
# code sample:
#
# ::
#
# def example(): pass
#
# with:
#
# code sample::
#
# def example(): pass
(re.compile(ur'(?ms)(\:)\n\n\:(\:\n\n)'), ur'\1\2'),
# replace 3+ line breaks with 2
(re.compile(ur'\n\n\n+'), u'\n\n'),
# Remove syntax highlighting hints, which don't work on pypi
(re.compile(ur'(?m)\.\. code\:\: .*$'), u'::'),
)
from_format = 'markdown'
def convert(self, path_):
path = path_ if path_[0] == '/' else os.path.join(
os.path.dirname(__file__), '..', '..', path_)
if not os.path.exists(path):
raise distutils.errors.DistutilsSetupError("File '%s' does not exist" % path_)
pandoc_path = distutils.spawn.find_executable("pandoc")
if pandoc_path is None:
raise distutils.errors.DistutilsSetupError(
"pandoc must be installed and in PATH to convert markdown to rst")
rst = subprocess.check_output([
pandoc_path,
"-f", self.from_format,
"-t", "rst",
path])
rst = self.replace_header_chars(rst)
for regex, replacement in self.replacements:
rst = regex.sub(replacement, rst)
return rst
header_char_map = (
('=', '#'),
('-', '='),
('^', '-'),
("'", '.'),
)
def replace_header_chars(self, rst_string):
"""Replace the default header chars with more sensible ones"""
for from_char, to_char in self.header_char_map:
def replace(matchobj):
return to_char * len(matchobj.group(0))
regex = r'(?m)^%(from)s%(from)s+$' % {'from': re.escape(from_char), }
rst_string = re.sub(regex, replace, rst_string)
return rst_string
def replace_section(self, rst, section_name, replacement, remove_header=False):
if not len(replacement):
replacement = u"\n"
elif replacement[-1] != u"\n":
replacement = u"%s\n" % replacement
if remove_header:
replacement = u"%s\n" % replacement
else:
replacement = u"\\1\n%s\n" % replacement
regex = (ur"""(?msx)
(\n
%(section_name)s\n
([%(header_chars)s])\2[^\n]+\n
).*?\n
(?=(?:
^[^\n]+\n
\2\2\2
|
\Z
))
""") % {
'section_name': re.escape(section_name),
'header_chars': re.escape('-#=.'),
}
return re.sub(regex, replacement, rst)
```
#### File: djxml/tests/test_advanced_example.py
```python
import os
from doctest import Example
from lxml import etree
from lxml.doctestcompare import LXMLOutputChecker
from django import test
from .xmlmodels import AtomFeed, AtomEntry
class TestAdvancedExample(test.TestCase):
@classmethod
def setUpClass(cls):
super(TestAdvancedExample, cls).setUpClass()
cls.example = AtomFeed.create_from_file(
os.path.join(os.path.dirname(__file__), 'data', 'atom_feed.xml'))
def assertXmlEqual(self, got, want):
checker = LXMLOutputChecker()
if not checker.check_output(want, got, 0):
message = checker.output_difference(Example("", want), got, 0)
raise AssertionError(message)
def test_feed_title(self):
self.assertEqual(self.example.title, "Example Feed")
def test_feed_entry_title(self):
self.assertIsInstance(self.example.entries[0], AtomEntry)
self.assertEqual(self.example.entries[0].title, "An example entry")
def test_transform_to_rss(self):
expected = "\n".join([
'<rss version="2.0">',
' <channel><description>Example Feed</description>',
'',
' <link>http://example.org/</link>',
' <pubDate>Thu, 05 Jul 2012 18:30:02Z</pubDate>',
'',
' <item>',
'',
' <link>http://example.org/2003/12/13/atom03</link>',
' <guid>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</guid>',
' <pubDate>Thu, 05 Jul 2012 18:30:02Z</pubDate>',
' <description><div>Some text.</div></description>',
' </item>',
'</channel>',
'</rss>\n'])
self.assertXmlEqual(expected, etree.tounicode(self.example.transform_to_rss()))
``` |
{
"source": "jprsurendra/layered_architecture_in_django",
"score": 2
} |
#### File: common/utilities/generic_configuration.py
```python
from apis.common.models import GenericSystemSettings
from django.conf import settings
class GenericConfiguration:
__instance = None
def __init__(self):
""" Virtually private constructor. """
if GenericConfiguration.__instance != None:
raise Exception("This class is a singleton!")
else:
self.system_settings_dict = dict(GenericSystemSettings.objects.values_list('prop_key', 'prop_value'))
GenericConfiguration.__instance = self
@staticmethod
def get_instance():
""" Static access method. """
if GenericConfiguration.__instance == None:
GenericConfiguration()
return GenericConfiguration.__instance
def refresh_system_settings_dict(self):
self.system_settings_dict = dict(GenericSystemSettings.objects.values_list('prop_key', 'prop_value'))
def fetch_value(self, key):
if key in self.system_settings_dict:
return self.system_settings_dict[key]
return eval('settings.'+ key)
```
#### File: components/base/base_manager.py
```python
import logging
import traceback
import uuid
from django.apps import apps
from django.db.models import Subquery
from django.core.paginator import Paginator
from apis.common.utils import find_in_dic_or_list, Base64EncodeDecode
from django.db import connection, transaction
class BaseManager(object):
def __init__(self):
pass
@staticmethod
def getManagerName():
raise NotImplemented()
def getErrorInfo(self, errors_info = {}, error_code='UNKNOWN_ERROR', description='An unknown error has occurred , Please contact your customer care', error_no=1):
err_no=0
if errors_info and isinstance(errors_info, dict):
if not 'errors' in errors_info:
errors_info['errors'] = []
if not 'error_no' in errors_info:
errors_info['error_no'] = 0
else:
errors_info = {'errors': [], 'error_no':0}
errors = errors_info['errors']
err_no = int(errors_info['error_no'])
if error_code:
if not description:
description = " "
errors.append({'error_code': error_code, 'description': description})
errors_info['errors'] = errors
if err_no<error_no:
errors_info['error_no'] = error_no
return errors_info
def server_side_validation(self, params_pk, params=None, **kwargs):
return True
def retrive(self, pk, params=None, **kwargs):
raise NotImplemented()
def fetch(self, params=None, **kwargs): # def fetch(self, *args, **kwargs):
raise NotImplemented()
def exists(self, params=None, **kwargs): # def exists(self, *args, **kwargs):
raise NotImplemented()
def save_or_update(self, params=None, *args, **kwargs):
raise NotImplemented()
def create(self, params=None, **kwargs):
raise NotImplemented()
def update(self, pk, params=None, **kwargs):
raise NotImplemented()
def delete(self, pk, params):
raise NotImplemented()
def get_additional_params(self, params = {}, request_params=None, **kwargs):
return params
'''
Description: This is utility method, which will combine all category of parameters.
Parameters:
1. request_params (Dic): It contains different types of data e.g. 'query_params', 'request_params', 'logged_in_user'.
Returns: Dic of combined all category of parameters
Exception: None
'''
def get_all_request_params(self, request_params=None, **kwargs):
params = {}
# request_params = request_params.copy() if request_params else {}
for key in ['query_params', 'request_params', 'logged_in_user', 'additional_parameters']:
if key in request_params and request_params[key]:
if isinstance(request_params[key], dict):
params.update(request_params[key])
else:
params[key] = request_params[key]
return self.get_additional_params(params=params, request_params=request_params, **kwargs)
class BaseModelManager(BaseManager):
'''
Description: Common Functionality for all Managers at the time of object initialization.
In this method, common_manager also assign, where you can write common code related to your Business-Logic
or any Database related code without Managers.
Parameters:
app_name (String): Name of App
model_name (String): Name of Model in the App
Returns: None
Exception: None
'''
def __init__(self, app_name, model_name):
self.context = {}
if app_name==None or model_name == None:
self.Model = None
else:
self.Model = apps.get_model(app_name, model_name)
try:
from apis.common.managers import CommonManager
if not isinstance(self, CommonManager):
from apis.components.factories.managers_factory import ManagersFactory
self.common_manager = ManagersFactory.get_instance().get_common_manager()
else:
self.common_manager = None
except Exception as e:
self.common_manager = None
logging.info("Path apis/common/components/base_manager.py Class: BaseModelManager Method: __init__ Error: %s"%(str(e)))
logging.info(traceback.format_exc())
@staticmethod
def get_first_occurrence(dictionary: dict, key: str):
value_found = None
for k, v in dictionary.items():
if key in dictionary:
return dictionary[key]
elif isinstance(v, dict):
value_found = BaseModelManager.get_first_occurrence(v, key)
if isinstance(value_found, list):
return value_found[0]
else:
return value_found
def get_dic_item(self, obj_dic, key, default_value = None, do_trim=False):
dic_item = obj_dic.get(key, default_value)
if do_trim:
dic_item = dic_item.strip() if dic_item else dic_item
return dic_item
'''
Description: implementation of this method in all Managers is mandatory. This method return unique names of
your Manager, this name used for register/retrieve your Manager into/from Manager-Factory.
Parameters:None
Returns: (String) Unique-Names of your Manager
Exception: None
'''
@staticmethod
def get_manager_name():
raise NotImplemented()
def is_int(self, val):
try:
if type(val) == int:
return True
else:
if val.is_integer():
return True
else:
return False
except Exception as e:
return False
def execute_raw_query(self, raw_query, list_of_column_index = None):
with connection.cursor() as cursor:
cursor.execute(raw_query)
exclude_sql = cursor.fetchall()
# if list_of_column_index == None:
# pass
# elif type(list_of_column_index) is list or type(list_of_column_index) is tuple:
# list_of_column_values = []
# if len(exclude_sql)>1:
# for row in exclude_sql:
# list_of_column_values.append([row[k] for k in list_of_column_index])
# else:
# row = exclude_sql
# list_of_column_values.append([row[k] for k in list_of_column_index])
# return list_of_column_values
# elif self.is_int(list_of_column_index): # Fetch only first column
# if len(exclude_sql)>1:
# list_of_column_values = [int(k[list_of_column_index]) for k in exclude_sql]
# else:
# list_of_column_values =[exclude_sql[0][list_of_column_index]]
# return list_of_column_values
if list_of_column_index: # Fetch only first column (as a prymary key)
if len(exclude_sql)>1:
list_of_column_values = [int(k[0]) for k in exclude_sql]
else:
list_of_column_values =[exclude_sql[0][0]]
return list_of_column_values
return exclude_sql
'''
Description: This method return all fields of Model bind with Manager
Parameters:None
Returns: List<String>
Exception: None
'''
def model_fields(self):
model_fields = []
for field in self.Model._meta.fields:
model_fields.append(field.name)
return model_fields
'''
Description: This method return select query ("Select * FROM table_name") of Model bind with Manager
Parameters:None
Returns: String
Exception: None
'''
def get_native_select_query(self, fields=None, where_close = ""):
table_name = self.Model._meta.db_table
if fields:
return "SELECT " + fields + " FROM " + table_name + " " + where_close
else:
return "SELECT * FROM " + table_name + " " + where_close
'''
Description: This method return select query ("Select * FROM table_name") of Model bind with Manager
Parameters:None
Returns: String
Exception: None
'''
def execute_native_query(self, raw_query ):
result = None
try:
with connection.cursor() as cursor:
cursor.execute(raw_query);
result = cursor.fetchall()
return result
except self.Model.DoesNotExist:
return None
'''
Description: This method return select query ("Select * FROM table_name") of Model bind with Manager
Parameters:None
Returns: String
Exception: None
'''
def execute_update_native_query(self, raw_query):
result = None
try:
with connection.cursor() as cursor:
cursor.execute(raw_query)
connection.commit()
return result
except self.Model.DoesNotExist:
return None
def execute_raw_query_with_model(self, raw_query, raw_count_query=None, app_name=None, model_name=None, RowSqlModel=None):
if RowSqlModel:
LocalModel = RowSqlModel
else:
LocalModel = self.get_model(app_name, model_name)
return LocalModel.objects.raw(raw_query, raw_count_query)
'''
Description: This method return primary field-name of Model bind with Manager
Parameters:None
Returns: String
Exception: None
'''
def get_pk_field_name(self):
pk_field_name = self.Model._meta.pk.name
return pk_field_name
def get_model(self, app_name=None, model_name=None):
if app_name == None and model_name == None:
LocalModel = self.Model
elif app_name == None or model_name == None:
raise ValueError("Invalid arguments (app_name and model_name)")
else:
LocalModel = apps.get_model(app_name, model_name)
return LocalModel
def get_model_fields(self, app_name=None, model_name=None, obj_entity_model=None):
if obj_entity_model:
LocalModel = obj_entity_model
else:
LocalModel = self.get_model(app_name, model_name)
return [f.name for f in LocalModel._meta.get_fields()]
def get_manager_param_dict(self, **kwargs):
request_params = { }
if kwargs:
for key, value in kwargs.items():
if value:
request_params[key] = value
v_params = {}
v_params['request_params'] = request_params
return v_params
'''
Description: This method use for parsing nested data-dic which will used in Serializer.
Parameters:
kwargs (**): data which will parse
Returns:
1. data-dic of outer object
2. data-dic of inner object
Exception: None
'''
def parse_nested_obj_dic(self, **kwargs):
service_data = {}
nested_obj_dic = {}
for key, value in kwargs.items():
if isinstance(value, dict):
nested_obj_dic[key] = kwargs[key]
else:
service_data[key] = kwargs[key]
return service_data, nested_obj_dic
'''
Description: This method return Manager's object, based on provided search_key.
Remember that, generally this search_key is a name of nested field in Serializer
Parameters: None
Returns: object of Manager
Exception: None
'''
def get_manager(self, search_key):
from apis.components.factories.managers_factory import ManagersFactory
manager = ManagersFactory.get_instance().get_manager(managerName=search_key)
return manager
'''
Description: This is a service-method use for retrieve data from database based on different-criteria.
Parameters:
1. id_value (int): if value of id_value provied then Id based database-query will execute.
2. params (Dic): Controller can share additional data in this dictionary.
3. kwargs (**): additional parameters for search data
Returns: single object or None (if not found in Database)
Exception: None
'''
def retrieve(self, id_value=None, params=None, **kwargs):
try:
if id_value:
obj = self.Model.objects.filter(id = id_value)
return obj[0] if obj else None
else:
obj = self.Model.objects.get(**kwargs)
return obj
except self.Model.DoesNotExist:
return None
'''
Description: This service-method is similar to "retrieve" but it may be return list of one or more result.
Parameters:
1. params (Dic): Controller can share additional data in this dictionary.
2. kwargs (**): additional parameters for search data
Returns: List of objects
Exception: None
'''
def fetch(self, params=None, **kwargs): # def fetch(self, *args, **kwargs):
lst_obj = self.Model.objects.filter(**kwargs)
return lst_obj
# queryset = self.Model.objects.filter(**kwargs)
# return self.queryset_sorting(queryset, query_params=params, method='list', **kwargs)
'''
Description: This service-method checks whether data exist in data or not.
Parameters:
1. params (Dic): Controller can share additional data in this dictionary.
2. kwargs (**): additional parameters for search data
Returns: True or False
Exception: None
'''
def exists(self, params=None, **kwargs): # def exists(self, *args, **kwargs):
return self.Model.objects.filter(**kwargs).exists()
def event_before_save(self, params=None, *args, **kwargs):
return params, args, kwargs
'''
Description: This service-method create or update the Model.
Parameters:
1. params (Dic): Controller can share additional data in this dictionary.
2. args (*): Controller can share additional list of parameters in this.
3. kwargs (**): additional parameters for search data
Returns: object of Model
Exception: None
'''
def save_or_update(self, params=None, *args, **kwargs):
service_data, nested_obj_dic = self.parse_nested_obj_dic(**kwargs)
if nested_obj_dic:
for key, value in nested_obj_dic.items():
manager = self.get_manager(key)
inner_obj = manager.save(params=params, **value)
service_data[key] = inner_obj
params, args, updated_service_data = self.event_before_save(params, *args, **service_data)
obj = self.save(params, *args, **updated_service_data)
else:
params, args, updated_kwargs = self.event_before_save(params, *args, **kwargs)
if 'id' in updated_kwargs:
rows_updated = self.update(updated_kwargs['id'], params, *args, **updated_kwargs)
obj = self.Model(**updated_kwargs)
else:
obj = self.create(params, *args, **updated_kwargs)
return obj
def event_before_create(self, params=None, *args, **kwargs):
return params, args, kwargs
'''
Description: This service-method create/Insert the Model into database.
Parameters:
1. params (Dic): Controller can share additional data in this dictionary.
2. kwargs (**): additional parameters for search data
Returns: object of Model
Exception: None
'''
def create(self, params=None, *args, **kwargs):
service_data, nested_obj_dic = self.parse_nested_obj_dic(**kwargs)
if nested_obj_dic:
for key, value in nested_obj_dic.items():
manager = self.get_manager(key)
inner_obj = manager.create(params=params, **value)
service_data[key] = inner_obj
params, args, updated_service_data = self.event_before_create(params, *args, **service_data)
obj = self.Model.objects.create(**updated_service_data)
else:
params, args, updated_kwargs = self.event_before_create(params, *args, **kwargs)
obj = self.Model.objects.create(**updated_kwargs)
return obj
def event_before_update(self, pk, params=None, *args, **kwargs):
return params, args, kwargs
'''
Description: This service-method update the Model into database.
Parameters:
1. pk (int): prymary-key's value, used to retrieve object of Model from database.
2. params (Dic): Controller can share additional data in this dictionary.
2. kwargs (**): key-value data for update value in database
Returns: no of rows updated in database
Exception: None
'''
def update(self, pk, params=None, *args, **kwargs):
params, args, updated_kwargs = self.event_before_update(pk, params, *args, **kwargs)
# self.Model(**updated_kwargs)
updated_rows = self.Model.objects.filter(id = pk).update(**updated_kwargs)
# updated_rows = self.Model.objects.get(id=pk).update(**updated_kwargs)
return updated_rows
def update_object(self, pk, params=None, *args, **kwargs):
params, args, updated_kwargs = self.event_before_update(pk, params, *args, **kwargs)
# self.Model(**updated_kwargs)
updated_rows = self.Model.objects.filter(id = pk).update(**updated_kwargs)
return self.Model.objects.get(id = pk), updated_rows
'''
Description: This service-method update the Model into database.
Parameters:
1. id_value (int): prymary-key's value, used to retrieve object of Model from database for deletion.
2. params (Dic): Controller can share additional data in this dictionary.
3. kwargs (**): additional parameters for search objects of Model from database for deletion.
Returns: no of rows deleted from database
Exception: None
'''
def delete(self, id_value=None, params=None, **kwargs):
if id_value:
return self.Model.objects.filter(id = id_value).delete()
elif kwargs:
return self.Model.objects.filter(**kwargs).delete()
'''
Description: This utility-method add filters(include/exclude) on queryset without Manager.
Parameters:
1. db_model (object):object of Model.
2. include_params (Dic): include filters parameters.
3. exclude_params (Dic): exclude filters parameters.
4. kwargs (**): additional parameters for search objects of Model from database.
Returns: no of rows deleted from database
Exception: None
'''
def filter_on_model(self, db_model, include_params={}, exclude_params={}, **kwargs):
if include_params:
lst_queryset = db_model.objects.filter(**include_params)
elif kwargs:
lst_queryset = db_model.objects.filter(**kwargs)
else:
lst_queryset = db_model.objects.all()
if exclude_params:
lst_queryset = lst_queryset.exclude(**exclude_params)
return lst_queryset
'''
Description: This utility-method add filters(include/exclude) on Model bind with Manager.
Parameters:
1. include_params (Dic): include filters parameters.
2. exclude_params (Dic): exclude filters parameters.
3. kwargs (**): additional parameters for search objects of Model from database.
Returns: no of rows deleted from database
Exception: None
'''
def filter(self, include_params={}, exclude_params={}, **kwargs):
return self.filter_on_model( db_model=self.Model, include_params=include_params, exclude_params=exclude_params, **kwargs)
'''
Description: This demo-method to show how to add filter (named 'xyz'). you can replace 'xyz' with your filter name
Parameters:
1. value (String): filter with this value.
2. queryset : filter will add on this queryset
3. request_params(Dic): Controller/Manager can share additional data in this dictionary.
Returns: queryset after add filter
Exception: None
'''
def filter_xyz(self, value, queryset, request_params=None):
return queryset
'''
Description: This is will call before all filters, you can say this is first filter method.
Parameters:
1. queryset : filter will add on this queryset
2. request_params(Dic): Controller/Manager can share additional data in this dictionary.
Returns: queryset after add filter
Exception: None
'''
def filter_startfiltering(self, queryset, request_params=None):
if request_params:
filter_session_key = uuid.uuid1()
request_params['filter_session_key'] = filter_session_key
self.context[filter_session_key] = {}
return queryset
'''
Description: This is will call after all filters, you can say this is lst filter method.
Parameters:
1. queryset : filter will add on this queryset
2. request_params(Dic): Controller/Manager can share additional data in this dictionary.
Returns: queryset after add filter
Exception: None
'''
def filter_endfiltering(self, queryset, request_params=None):
return queryset
'''
Description: This is will call after all filters, you can handel sorting in this method.
Parameters:
1. queryset : filter will add on this queryset
2. query_params (Dic): you can provides order_by fields in this dictionary.
3. method (String): when this method auto call, can you be identified by your service_method name.
4. kwargs (**): additional parameters for search objects of Model from database. (Not used till Now)
Returns: queryset
Exception: None
'''
def queryset_sorting(self, queryset, query_params=None, method='list', **kwargs):
if 'order_by' in query_params:
for order_by in query_params['order_by'].split(','):
queryset = queryset.order_by(order_by)
return queryset
def event_before_filter(self, name, value, queryset, request_params=None):
if name not in ['order_by', 'service_method', 'fields', 'page', 'page_size', 'logged_in_user', 'filter_session_key']:
return queryset
else:
return None
#def filter_domain_name(self, value, queryset, request_params=None):
def clear_filter_session(self, query_params):
try:
if 'filter_session_key' in query_params:
filter_session_key = query_params['filter_session_key']
if filter_session_key in self.context:
del (self.context[filter_session_key])
except Exception as e:
pass
def apply_filters(self, queryset, query_params=None, method='list', select_related_fields=[], **kwargs):
filter_dict = {}
try:
queryset = self.filter_startfiltering(queryset, request_params=query_params)
if query_params:
for query_param_key in query_params.keys() :
q = self.event_before_filter(name=query_param_key, value=query_params[query_param_key], queryset=queryset, request_params=query_params)
if q : #query_param_key not in ['order_by', 'service_method', 'page', 'page_size', 'logged_in_user']:
queryset = q
if hasattr(self.__class__, 'filter_%s' % (query_param_key)) and \
callable(getattr(self.__class__, 'filter_%s' % (query_param_key))):
queryset = getattr(self, 'filter_%s' % (query_param_key))(query_params[query_param_key], queryset, query_params)
# else:
# filter_dict[query_param_key] = query_params[query_param_key]
if filter_dict:
queryset = queryset.filter(**filter_dict)
queryset = self.filter_endfiltering(queryset, request_params=query_params)
queryset = self.queryset_sorting(queryset, query_params=query_params, method=method, **kwargs)
self.clear_filter_session(query_params)
return queryset
except Exception as e:
self.clear_filter_session(query_params)
e.args = (e.args if e.args else tuple()) + ('Error in filters',)
raise # re-raise current exception
'''
Description: This is utility method, which will actual implantation of filters.
Parameters:
1. query_params (Dic): you can provides order_by fields in this dictionary.
2. method (String): when this method auto call, can you be identified by your service_method name.
3. select_related_fields (List): parameters for Model.objects.select_related()
4. kwargs (**): additional parameters for search objects of Model from database. (Not used till Now)
Returns: queryset
Exception: None
'''
def get_queryset(self, query_params=None, method='list', select_related_fields=[], **kwargs):
if select_related_fields:
queryset = self.Model.objects.select_related(select_related_fields)
else:
queryset = self.Model.objects.all()
return self.apply_filters(queryset, query_params, method, select_related_fields, **kwargs)
def log_sql(self, queryset, msg=' ==> ', params=None, **kwargs):
try:
msg_query = ""
if 'service_method' in params:
msg_query = msg_query + 'params[service_method]: %s' % (params['service_method'])
if 'service_method' in kwargs:
msg_query = msg_query + ' kwargs[service_method]: %s' % (kwargs['service_method'])
sql_query = str(queryset.query)
sql_parts = sql_query.split("FROM")
del (sql_parts[0])
sql_query = "FROM".join(sql_parts)
# sql_query = sql_query.split("FROM")[1]
if msg:
msg_query = msg_query + ' '+ msg + ' SELECT * FROM ' + sql_query
else:
msg_query = msg_query + ' ==> SELECT * FROM ' + sql_query
logging.info(msg_query)
except Exception as e:
pass
'''
Description: This is a service-method use for retrieve list of data (with pagination data) from database
based on different-criteria.
Parameters:
1. params (Dic): Controller can share additional data in this dictionary.
2. kwargs (**): additional parameters for search data
Returns: list of object
Exception: None
'''
def list_by_queryset(self, queryset, query_params):
if 'page_size' in query_params or 'page' in query_params :
items_per_page = int(query_params.get('page_size', 10))
current_page = int(query_params.get('page', 1))
paginator = Paginator(queryset, items_per_page)
page = paginator.page(current_page)
objects = page.object_list # len(paginator(current_page).object_list
total_count = paginator.count #(items_per_page * current_page + 1) + len(objects) #paginator(current_page).object_list)
previous_url = False
next_url = False
if (current_page * items_per_page) < total_count:
next_url = True
if current_page > 1:
previous_url = True
result= {
'page_info' : { 'num_pages': paginator.num_pages,
'start_count': items_per_page * (current_page-1)+1 ,
# start_value = (int(query_params.get('page')) * page_size) - page_size
'end_count': (items_per_page * (current_page-1))+1 + len(objects), #(items_per_page * current_page + 1) + len(paginator(current_page).object_list)
# end_value = start_value + page_size
'current_page': current_page,
'items_per_page': items_per_page,
'next_url': next_url,
'previous_url':previous_url
},
'data' : objects, # result_data = queryset[start_value:end_value]
'count': total_count,
'pagination': True
}
else:
result = { 'count': len(queryset), 'data': queryset, 'pagination': False }
# data = [model_to_dict(model_row) for model_row in queryset]
return result
def get_raw_query_params(self, params=None, **kwargs):
dict_params = { 'raw_query': None,
'raw_count_query': None,
'app_name': None,
'model_name': None,
'fields': None }
return dict_params
def row_query_filters(self, raw_query_params, params=None, **kwargs):
return ""
def build_raw_sql(self, raw_query_params, params=None, **kwargs):
return raw_query_params.get('raw_query', None)
def build_raw_sql_with_filters(self, raw_query_params, params=None, **kwargs):
base_raw_query = self.build_raw_sql(raw_query_params=raw_query_params, params=params, **kwargs)
if base_raw_query:
base_raw_query = base_raw_query + " " + self.row_query_filters(raw_query_params=raw_query_params, params=params, **kwargs)
# print("build_raw_sql_with_filters --> raw_query: ", base_raw_query)
return base_raw_query
'''
Please do not delete it, This is a Sample override function "build_raw_sql_model_info"
def build_raw_sql_model_info(self, raw_query_params, params=None, **kwargs):
sql_cols = "id, field_1, field_2, field_3,"
raw_query_params['fields'] = [field.strip() for field in sql_cols.split(",")]
raw_query_params['app_name'] = 'quotationapis'
raw_query_params['model_name'] = "QuotationListingModel"
# raw_query_params['Model'] = self.get_model(raw_query_params['app_name'], raw_query_params['model_name'])
# raw_query_params['fields'] = self.get_model_fields(obj_entity_model=raw_query_params['Model'])
return raw_query_params
'''
def build_count_raw_sql(self, raw_query_params, base_raw_query=None, params=None, **kwargs):
return raw_query_params.get('raw_count_query', None)
def get_row_query_model_info(self, raw_query_params, params=None, **kwargs):
tmp_raw_query_params = dict(raw_query_params)
if kwargs.get('model_name', None):
tmp_raw_query_params['model_name'] = kwargs.get['model_name']
elif tmp_raw_query_params.get('model_name', None) == None:
tmp_raw_query_params['model_name'] = self.Model._meta.model_name
if kwargs.get('app_name', None):
tmp_raw_query_params['app_name'] = kwargs.get['app_name']
elif tmp_raw_query_params.get('app_name', None) == None:
tmp_raw_query_params['app_name'] = self.Model._meta.app_label
LocalModel = apps.get_model(tmp_raw_query_params['app_name'], tmp_raw_query_params['model_name'])
tmp_raw_query_params['fields'] = self.get_model_fields(obj_entity_model=LocalModel)
tmp_raw_query_params['Model']= LocalModel
return tmp_raw_query_params
def set_order_by_in_row_query(self, raw_query_params, base_raw_query, sql_cols, params=None, **kwargs):
raw_query_params['raw_query'] = 'SELECT %s FROM (%s) AS t ORDER BY id DESC ' % ( sql_cols, raw_query_params['raw_query'])
raw_query_params['raw_count_query'] = self.build_count_raw_sql(raw_query_params, base_raw_query=base_raw_query, params=params, **kwargs)
return raw_query_params
def get_queryset_by_row_query(self, params=None, **kwargs):
raw_query_params = self.get_raw_query_params(params=params, **kwargs)
base_raw_query = self.build_raw_sql_with_filters(raw_query_params=raw_query_params, params=params, **kwargs)
raw_query_params['raw_query'] = base_raw_query
if kwargs.get('use_row_query', False) or base_raw_query:
raw_query_params['raw_query'] = base_raw_query
if hasattr(self.__class__, 'build_raw_sql_model_info' ) and callable(getattr(self.__class__, 'build_raw_sql_model_info' )):
raw_query_params = getattr(self, 'build_raw_sql_model_info' )(raw_query_params, params, **kwargs)
if raw_query_params.get('Model', None)==None:
LocalModel = apps.get_model(raw_query_params['app_name'], raw_query_params['model_name'])
raw_query_params['Model'] = LocalModel
else:
raw_query_params = self.get_row_query_model_info(raw_query_params, params=None, **kwargs)
LocalModel = raw_query_params.pop('Model', None)
sql_cols = ','.join(raw_query_params['fields'])
# raw_query_params['raw_query'] = 'SELECT %s FROM (%s) AS t ORDER BY id DESC ' % (sql_cols, raw_query_params['raw_query'])
# raw_query_params['raw_count_query'] = self.build_count_raw_sql(raw_query_params, base_raw_query=base_raw_query, params=params, **kwargs)
raw_query_params = self.set_order_by_in_row_query(raw_query_params, base_raw_query, sql_cols, params=params, **kwargs)
if raw_query_params['raw_count_query'] == None:
raw_query_params['raw_count_query'] = 'SELECT count(*) FROM (%s) AS t ' % (base_raw_query)
# print("Final Row SQL :: ", raw_query_params.get('raw_query', None))
queryset = self.execute_raw_query_with_model(raw_query=raw_query_params.get('raw_query', None),
raw_count_query=raw_query_params.get('raw_count_query', None),
RowSqlModel = LocalModel)
# app_name=raw_query_params.get('app_name', None),
# model_name=raw_query_params.get('model_name', None))
return queryset
else:
return None
def list(self, params=None, **kwargs):
query_params = self.get_all_request_params(request_params=params, **kwargs)
# raw_query_params = self.build_raw_sql(raw_query_params=self.get_raw_query_params(params=params, **kwargs),
# params=params, **kwargs)
# if raw_query_params and raw_query_params.get('raw_query', None):
# queryset = self.execute_raw_query_with_model(raw_query = raw_query_params.get('raw_query', None) , raw_count_query=raw_query_params.get('raw_count_query', None), app_name=raw_query_params.get('app_name', None), model_name=raw_query_params.get('model_name', None))
# else:
# queryset = self.get_queryset(query_params=query_params, method='list', **kwargs)
kwargs['service_method'] = kwargs.get('service_method', params.get('service_method', 'list') if params else 'list')
queryset = self.get_queryset_by_row_query(params=params, **kwargs)
if queryset==None:
queryset = self.get_queryset(query_params=query_params, method='list', **kwargs)
return self.list_by_queryset(queryset, query_params)
def list_old(self, params=None, **kwargs):
query_params = self.get_all_request_params(request_params=params, **kwargs)
raw_query_params = self.build_raw_sql(raw_query_params=self.get_raw_query_params(params=params, **kwargs),
params=params, **kwargs)
if raw_query_params and raw_query_params.get('raw_query', None):
queryset = self.execute_raw_query_with_model(raw_query = raw_query_params.get('raw_query', None) , raw_count_query=raw_query_params.get('raw_count_query', None), app_name=raw_query_params.get('app_name', None), model_name=raw_query_params.get('model_name', None))
else:
queryset = self.get_queryset(query_params=query_params, method='list', **kwargs)
return self.list_by_queryset(queryset, query_params)
'''
Description: This is utility method, which will combine all category of parameters.
Parameters:
1. request_params (Dic): It contains different types of data e.g. 'query_params', 'request_params', 'logged_in_user'.
Returns: Dic of combined all category of parameters
Exception: None
'''
def decrypt_id(self, id_value, request_params=None):
logged_in_user = find_in_dic_or_list('logged_in_user')
if id_value:
if isinstance(id_value, (int, float, complex)) and not isinstance(id_value, bool):
decode_response = id_value
elif id_value.isdigit():
decode_response = id_value
else:
decode_response = Base64EncodeDecode.decode_string(logged_in_user, id_value)
else:
decode_response = id_value
return decode_response
'''
Description: This is utility method, which will combine all category of parameters.
Parameters:
1. request_params (Dic): It contains different types of data e.g. 'query_params', 'request_params', 'logged_in_user'.
Returns: Dic of combined all category of parameters
Exception: None
'''
def get_sql_execute(self, query):
count = 0
cursor = connection.cursor() #MySQLdb.cursors.DictCursor)
# query = "SELECT * FROM fcl_commodity_type WHERE id = %s" % (fcl_commodity.id)
cursor.execute(query)
# return cursor.fetchall()
# for row in cursor.fetchall():
desc = cursor.description
# return [ dict(zip([col[0] for col in desc], row)) for row in cursor.fetchall()]
rows = cursor.fetchall()
count = len(rows)
data_list = {}
data_list['data'] = [dict(zip([col[0] for col in desc], row)) for row in rows]
data_list['count'] = count
return data_list
def get_request_params(self, params, default_value={}, **kwargs):
return params.get('request_params',default_value)
def is_request_param_exist(self, params, param_name, request_params=None, raise_error=False, error_msg= "Missing mandatory parameter(s)."):
if request_params == None:
request_params = self.get_request_params(params)
if param_name in request_params:
return True
elif raise_error:
raise ValueError(error_msg)
else:
return False
def get_request_param_value(self, params, param_name, default_value=None, request_params=None, raise_error=False, error_msg= "Missing mandatory parameter(s)."):
if request_params == None:
request_params = self.get_request_params(params)
# return request_params.get(param_name, default_value)
if param_name in request_params:
return request_params.get(param_name, default_value)
elif raise_error:
raise ValueError(error_msg)
else:
return default_value
def get_query_common_params(self, params, **kwargs):
query_params = {}
try:
query_common_params = {'logged_in_user': params['logged_in_user'], 'request': params['request'], 'query_params': {},
'data_source': params['data_source'], 'service_method': params['service_method'], 'fields': params['fields']}
query_params.update(query_common_params)
if kwargs:
query_params.update(kwargs)
return query_params
except Exception as e:
logging.info("Path apis/common/components/base_manager.py Class: BaseModelManager Method: get_query_common_params(...) Error: %s"%(str(e)))
logging.info(traceback.format_exc())
return query_params
def to_boolean_value(self, value):
if value:
bool_value = False
if isinstance(value, str):
value = value.strip().upper()
bool_value = True if value=="1" or value =="TRUE" or value=="YES" else False
elif value==1 or value==True:
bool_value = True
return bool_value
else:
return value
def to_date_range(self, value): # created_on
if value:
created_on = value.split('-')
created_on_start_split = created_on[0].strip().split('/')
created_on_end_split = created_on[1].strip().split('/')
created_on_start = created_on_start_split[2] + '-' + created_on_start_split[0] + '-' + created_on_start_split[1]
created_on_end = created_on_end_split[2] + '-' + created_on_end_split[0] + '-' + created_on_end_split[1]
return [created_on_start, created_on_end]
return []
def validate_parameter(self, param_name, param_value, validate_failed_list=[], params=None, is_raise_exception=True, **kwargs):
validate = { 'is_validate': True, 'validate_msg': None }
if not param_value:
validate['is_validate'] = False
validate['validate_msg'] = param_name + " is a mandatory parameter."
return validate
def validate_mandatory_parameters(self, params=None, is_raise_exception=True, *args, **kwargs):
parameters = {'error_message': None, 'errors': []}
mandatory_parameter = []
validate_failed_list = []
data = {}
if args and len(args)>0:
request_params = params.get('request_params',{})
for param_name in args:
param_name = str(param_name).replace(" ", '')
param_value = request_params.get(param_name, kwargs.get(param_name, None))
data[param_name] = param_value
validate = self.validate_parameter(param_name=param_name, param_value=param_value, validate_failed_list=validate_failed_list, params=params, is_raise_exception=is_raise_exception, **kwargs)
if not validate.get('is_validate', False):
mandatory_parameter.append(param_name)
validate_failed_list.append({ param_name: validate})
if len(mandatory_parameter)>0:
if len(mandatory_parameter)>1:
mandatory_parameter_csv = ", ".join(mandatory_parameter)
errors_message = mandatory_parameter_csv + " are mandatory parameters"
else:
errors_message = mandatory_parameter[0] + " is mandatory parameter"
if is_raise_exception:
raise ValueError(errors_message)
else:
parameters['error_message'] = errors_message
parameters['errors'] = validate_failed_list
parameters['data'] = data
return parameters
def split_date_range(self, date_range):
# created_on_range = ['08/03/2021 ', ' 09/08/2021']
arr_date_range = date_range.split('-')
created_on_start_split = arr_date_range[0].strip().split('/')
created_on_end_split = arr_date_range[1].strip().split('/')
created_on_start = created_on_start_split[2] + '-' + created_on_start_split[0] + '-' + created_on_start_split[1]
created_on_end = created_on_end_split[2] + '-' + created_on_end_split[0] + '-' + created_on_end_split[1]
return [created_on_start, created_on_end]
class OneToManyRelationshipModelManager(BaseModelManager):
def __init__(self, app_name, linked_table_model_name, linked_right_model_name,
linked_model_left_field_name, linked_model_right_field_name ):
super(OneToManyRelationshipModelManager, self).__init__(app_name, linked_table_model_name)
self.linkedTableModel = None if linked_right_model_name == None else apps.get_model(app_name, linked_right_model_name)
self.linked_model_left_field_name = linked_model_left_field_name if linked_model_left_field_name else None
self.linked_model_right_field_name = linked_model_right_field_name if linked_model_right_field_name else None
def delete_link_table_row(self, objLinkedLeftModel, objLinkedRightModel, params=None, **kwargs):
left_model_ids = []
right_model_ids = []
kwargs = {}
if type(objLinkedRightModel) is list or type(objLinkedRightModel) is tuple:
for item in objLinkedRightModel:
item_id = item if isinstance(item, int) else (int(item) if isinstance(item, str) else item.id)
right_model_ids.append(item_id)
else:
item = objLinkedRightModel
if isinstance(item, str) and ',' in item:
right_model_ids = [int(i.lstrip()) for i in item.split(',')]
else:
item_id = item if isinstance(item, int) else (int(item) if isinstance(item, str) else item.id)
right_model_ids.append(item_id)
if type(objLinkedLeftModel) is list or type(objLinkedLeftModel) is tuple:
for item in objLinkedLeftModel:
item_id = item if isinstance(item, int) else (int(item) if isinstance(item, str) else item.id)
left_model_ids.append(item_id)
else:
item = objLinkedLeftModel
if isinstance(item, str) and ',' in item:
left_model_ids = [int(i.lstrip()) for i in item.split(',')]
else:
item_id = item if isinstance(item, int) else (int(item) if isinstance(item, str) else item.id)
left_model_ids.append(item_id)
kwargs[self.linked_model_left_field_name + '_id__in'] = left_model_ids #objLinkedLeftModel if isinstance(objLinkedLeftModel, int) else ( int(objLinkedLeftModel) if isinstance(objLinkedLeftModel, str) else objLinkedLeftModel.id )
kwargs[self.linked_model_right_field_name + '_id__in'] = right_model_ids #objLinkedRightModel if isinstance(objLinkedRightModel, int) else ( int(objLinkedRightModel) if isinstance(objLinkedRightModel, str) else objLinkedRightModel.id )
res_tuple = self.linkedTableModel.objects.filter(**kwargs).delete()
return res_tuple[0]
def save_link_table(self, objLinkedLeftModel, objLinkedRightModelList=[], params=None, **kwargs):
default_param_dic = params.get('default_param_dic', None)
existing_right_model_ids = []
to_be_deleted_ids = []
kwargs={}
kwargs[self.linked_model_left_field_name + '_id'] = objLinkedLeftModel.id
linked_objects = self.linkedTableModel.objects.filter(**kwargs).values()
del_rows = 0
no_change_rows = 0
new_rows = 0
for obj in linked_objects:
found = False
for item in objLinkedRightModelList:
item_id = item if isinstance(item, int) else ( int(item) if isinstance(item, str) else item.id )
if obj[self.linked_model_right_field_name + '_id'] == item_id:
found = True
break
if found == False:
to_be_deleted_ids.append(obj['id'])
else:
existing_right_model_ids.append(obj[self.linked_model_right_field_name + '_id'])
no_change_rows = no_change_rows + 1
if to_be_deleted_ids:
del_rows = self.linkedTableModel.objects.filter(id__in= to_be_deleted_ids).delete()
del_rows = len(to_be_deleted_ids)
for item in objLinkedRightModelList:
item_id = item if isinstance(item, int) else ( int(item) if isinstance(item, str) else item.id )
found = False
for existing_id in existing_right_model_ids:
if item_id == existing_id:
found = True
break
if found == False:
updated_kwargs = {}
if default_param_dic:
updated_kwargs.update(default_param_dic)
updated_kwargs[self.linked_model_left_field_name + '_id'] = objLinkedLeftModel.id
updated_kwargs[self.linked_model_right_field_name + '_id'] = item_id
obj = self.linkedTableModel.objects.create(**updated_kwargs)
new_rows = new_rows + 1
kwargs = {}
kwargs[self.linked_model_left_field_name + '_id'] = objLinkedLeftModel.id
linked_objects = self.linkedTableModel.objects.filter(**kwargs).values()
return new_rows, del_rows, no_change_rows, linked_objects
def event_before_filter(self, name, value, queryset, request_params=None):
# print("name: ", name)
if name in ['order_by', 'service_method', 'fields', 'page', 'page_size', 'logged_in_user', 'list_method_name', 'list_method_name', 'fetch_clients']:
return None
if request_params.get('list_method_name', None) == 'right_model_list' and not ( name.startswith("both_model") or name.startswith("right_model") ):
return None
if request_params.get('list_method_name', None) == 'left_model_list' and not ( name.startswith("both_model") or name.startswith("left_model") or name =="right_model_pk" ):
return None
return queryset
'''
Funtion: To fetch list of Clients(User Summary) with linked object info
Relation bet ween Client & Linked Object is Meany-to-One
Filter: filter_user_id(...)
client_detail_list
'''
def right_model_list(self, params=None, **kwargs):
query_params = self.get_all_request_params(request_params=params, **kwargs)
query_params['list_method_name'] = 'right_model_list'
if 'left_model_pk' in query_params:
query_params.pop('left_model_pk')
queryset_linked_model = self.linkedTableModel.objects.all()
queryset_linked_model = self.apply_filters(queryset_linked_model, query_params=query_params, method='list', **kwargs)
return self.list_by_queryset(queryset_linked_model, query_params)
'''
Funtion: To fetch list of Linked objects with set of Clients(User Summary)
Relation between Linked Object & Client is One-to-Meany
Filter: filter_user_id(...)
linked_object_list
'''
def left_model_list(self, params=None, **kwargs):
query_params = self.get_all_request_params(request_params=params, **kwargs)
if query_params.get('directly_call_list', False):
queryset = self.get_queryset(query_params=query_params, method='list', **kwargs)
else:
query_params['list_method_name'] = 'left_model_list'
# if 'right_model_pk' in query_params:
# query_params['left_model_pk'] = query_params.pop('right_model_pk')
queryset_linked_model = self.linkedTableModel.objects.all()
queryset_linked_model = self.apply_filters(queryset_linked_model, query_params=query_params, method='list', **kwargs)
if 'right_model_pk' in query_params:
query_params.pop('right_model_pk')
if 'list_method_name' in query_params:
query_params.pop('list_method_name')
queryset = self.get_queryset(query_params=query_params, method='list', **kwargs)
'''
SELECT C.id, C.company_name
FROM company_detail
WHERE C.id IN (SELECT V0.company_detail_id FROM company_detail_user_link V0
WHERE V0.company_detail_id IN (SELECT U0.company_detail_id FROM company_detail_user_link U0 WHERE U0.user_detail_id IN (80, 62, 79)))
'''
queryset = queryset.filter(id__in=Subquery(queryset_linked_model.values(self.linked_model_left_field_name+'_id')))
return self.list_by_queryset(queryset, query_params)
'''
Filter to fetch list of client (of given Linked Object's id)
i.e. linked_objects_field_name = company_detail_id
'''
def filter_left_model_pk(self, value, queryset, request_params=None):
if value:
kwargs = {}
if ',' in value:
company_ids = []
value = value.replace(" ", "")
value = value.strip()
for param in value.split(','):
if param:
company_ids.append(param)
kwargs[self.linked_model_left_field_name + '_id__in'] = company_ids
#queryset = queryset.filter(company_detail_id__in=company_ids)
else:
kwargs[self.linked_model_left_field_name + '_id'] = value
# queryset = queryset.filter(company_detail_id=value)
queryset = queryset.filter(**kwargs)
return queryset
'''
SQL: SELECT L.id, L.company_detail_id, L.user_detail_id
FROM company_detail_user_link AS L
WHERE L.company_detail_id IN (SELECT U0.company_detail_id FROM company_detail_user_link U0 WHERE U0.user_detail_id IN (80, 62, 79))
'''
def filter_right_model_pk(self, value, queryset, request_params=None):
if value:
kwargs_user_detail = {}
kwargs_linked_objects = {}
subquery_field_name = self.linked_model_left_field_name + '_id__in'
if ',' in value:
user_ids = []
value = value.replace(" ", "")
value = value.strip()
for param in value.split(','):
if param:
user_ids.append(param)
kwargs_user_detail[self.linked_model_right_field_name + '_id__in']=user_ids
#field_name = self.linked_model_left_field_name + '_id__in'
else:
kwargs_user_detail[self.linked_model_right_field_name + '_id'] = value
links_queryset = self.linkedTableModel.objects.filter(**kwargs_user_detail)
kwargs_linked_objects[subquery_field_name] = Subquery(links_queryset.values(self.linked_model_left_field_name + '_id'))
queryset = queryset.filter(**kwargs_linked_objects)
return queryset
```
#### File: components/factories/managers_factory.py
```python
from apis.components.base.base_manager import BaseModelManager
from apis.components.factories.utility import SingletonBaseClass
class ManagersFactory(metaclass=SingletonBaseClass):
# __metaclass__ = SingletonBaseClass
self_obj = None
def __init__(self):
self.managers_dic = {}
self.manager_type = "FILE"
@staticmethod
def get_instance():
""" Static access method. """
if ManagersFactory.self_obj is None:
ManagersFactory.self_obj = ManagersFactory()
return ManagersFactory.self_obj
@staticmethod
def register(objManager, managerName=None):
instance = ManagersFactory.get_instance()
if managerName == None:
handler = getattr(objManager, "get_manager_name", "MethodNotFound")
if handler == "MethodNotFound":
raise NotImplementedError("Method 'get_manager_name' not found.")
managerName = handler()
managerName_upper = managerName.upper()
if not managerName_upper in instance.managers_dic:
instance.managers_dic[managerName_upper] = objManager
def get_manager(self, managerName):
managerName_upper = managerName.upper()
if not managerName_upper in self.managers_dic:
raise NotImplemented()
# raise Exception('Getter for "%s" Not Implemented in ManagersFactory'%(managerName))
return self.managers_dic[managerName_upper]
get_manager.__annotations__ = {'return': BaseModelManager}
# Register all Managers to this Factory
def register_all_managers(self):
from apis.common.managers import CommonManager
common_manager = CommonManager()
self.register(common_manager, CommonManager.get_manager_name())
def get_common_manager(self):
from apis.common.managers import CommonManager
manager = self.get_manager(CommonManager.get_manager_name())
return manager
``` |
{
"source": "jprsurendra/pocs",
"score": 3
} |
#### File: project/accounts/models.py
```python
from django.db import models
from django.contrib.auth.models import AbstractUser, BaseUserManager
from django.core.validators import EmailValidator
from validator import zip_validate, phone_validate
# Create your models here.
class UserManager(BaseUserManager):
"""Define a model manager for User model with no username field."""
use_in_migrations = True
def _create_user(self, email, password, **extra_fields):
"""Create and save a User with the given email and password."""
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
"""Create and save a regular User with the given email and password."""
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
"""Create and save a SuperUser with the given email and password."""
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_active', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(email, password, **extra_fields)
class Department(models.Model):
name = models.CharField('Department Name', max_length=30, unique=True)
class User(AbstractUser):
"""Custom User model."""
username = None
email = models.EmailField('email address', unique=True, validators=[EmailValidator(message="Please enter a valid email address.")] )
first_name = models.CharField('first name', max_length=30, blank=True, null=True )
last_name = models.CharField('last name', max_length=30, blank=True, null=True)
age = models.IntegerField(default=0, null=True, blank=True)
phone = models.CharField(max_length=15, null=True, blank=True, default=None, validators=[phone_validate])
department = models.ForeignKey(Department, blank=True, null=True, on_delete=models.CASCADE)
is_employee = models.BooleanField(default=False, help_text='Designates whether this user '
'should be treated as Employee in case True.')
is_hr = models.BooleanField(default=False, help_text='Designates whether this user '
'should be treated as HR in case True.')
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
@property
def full_name(self):
if self.first_name or self.last_name:
return self.first_name + ' ' + self.last_name
def __str__(self):
return self.email
class EmployeeAddress(models.Model):
employee = models.ForeignKey(User, blank=True, null=True, on_delete=models.CASCADE, related_name='employee_address')
address = models.CharField('Address', max_length=100, blank=True, null=True)
country = models.CharField('Country', max_length=50, blank=True, null=True)
city = models.CharField('City', max_length=50, blank=True, null=True)
zip = models.CharField('zip Code', max_length=5, blank=True, null=True, validators=[zip_validate])
created_on = models.DateTimeField(auto_now_add=True)
updated_on = models.DateTimeField(auto_now=True)
``` |
{
"source": "jpruiz84/ScientoPy",
"score": 2
} |
#### File: jpruiz84/ScientoPy/generateBibtex.py
```python
import paperUtils
import paperSave
import globalVar
import os
import matplotlib.pyplot as plt
import numpy as np
import graphUtils
import sys
import re
import unidecode
import sys
import os
def generateBibtex(inputLatexFile):
print("\n\nGenerating BibTeX")
print("=================\n")
fileobject = (open(inputLatexFile, 'r'))
rawtext = fileobject.read()
fileobject.close()
start = '\\begin{document}'
end = '\\begin{thebibliography}'
bodytext = rawtext[rawtext.find(start) + len(start):rawtext.rfind(end)]
# Extracts the cites keys
citesDict = {}
for char in range(0, len(bodytext) - 10):
if bodytext[char:char + 6] == '\\cite{':
cite = ''
char += len('\\cite{')
while (bodytext[char] != '}'):
if (bodytext[char] == ' '):
char += 1
elif (bodytext[char] == ','):
char += 1
if cite in citesDict.keys():
cite = ''
else:
citesDict[cite] = False
cite = ''
else:
cite += (bodytext[char])
char += 1
if cite in citesDict.keys():
pass
else:
citesDict[cite] = False
print("%d cites found." % len(citesDict))
# Start paper list empty
papersDict = []
papersToBib = []
# Open the storage database and add to papersDict
INPUT_FILE = os.path.join(globalVar.DATA_OUT_FOLDER, globalVar.OUTPUT_FILE_NAME)
ifile = open(INPUT_FILE, "r", encoding='utf-8')
print("Reading file: %s" % (INPUT_FILE))
paperUtils.openFileToDict(ifile, papersDict)
ifile.close()
print("Loaded %d docuemnts" % (len(papersDict)))
# Find the number of total papers per year
count = 1
for paper in papersDict:
# print("%d, %s" % (count, paper["title"]))
# count += 1
if paper["eid"] in citesDict.keys():
if citesDict[paper["eid"]] == False:
print("Added paper(%s): %s" % (paper["eid"], paper["title"]))
papersToBib.append(paper)
citesDict[paper["eid"]] = True
OUT_FILE = os.path.join(globalVar.LATEX_EXAMPLE_FOLDER, globalVar.OUTPUT_FILE_BIB)
ofile = open(OUT_FILE, 'w', encoding='utf-8')
for paper in papersToBib:
authorsNames = paper["authorFull"]
if paper["dataBase"] == "Scopus":
authorsNames = authorsNames.replace(",", ";")
authorsNames = authorsNames.split(";")
authorsNames = [x.strip() for x in authorsNames]
authorsNames = [x.replace(" ", ", ", 1) for x in authorsNames]
authorsNames = " and ".join(authorsNames)
if paper["dataBase"] == "WoS":
authorsNames = authorsNames.replace("; ", " and ")
# Preprocess fields
paper["title"] = unidecode.unidecode(paper["title"])
paper["title"] = paper["title"].replace("&", "\&").replace("_", "\_")
paper["title"] = paper["title"].replace('"', '``', 1).replace('"', "''")
paper["sourceTitle"] = unidecode.unidecode(paper["sourceTitle"])
paper["sourceTitle"] = paper["sourceTitle"].replace("&", "\&").replace("_", "\_")
paper["pageCount"] = paper["pageCount"].replace("&", "\&").replace("_", "\_")
paper["publisher"] = paper["publisher"].replace("&", "\&").replace("_", "\_")
paper["publisherAddress"] = paper["publisherAddress"].replace("&", "\&").replace("_", "\_")
paper["conferenceTitle"] = paper["conferenceTitle"].replace("&", "\&").replace("_", "\_")
paper["conferenceLocation"] = paper["conferenceLocation"].replace("&", "\&").replace("_", "\_")
paper["conferenceDate"] = paper["conferenceDate"].replace("&", "\&").replace("_", "\_")
if (paper["documentType"].split(";")[0] in ["Article", "Review", "Article in Press"]):
ofile.write('@Article{%s,\n' % paper["eid"])
ofile.write(' Author \t=\t"%s",\n' % authorsNames)
ofile.write(' Title\t\t=\t"%s",\n' % paper["title"])
ofile.write(' Journal \t=\t"%s",\n' % paper["sourceTitle"])
if paper["pageCount"]:
ofile.write(' Numpages\t=\t"%s",\n' % paper["pageCount"])
if paper["pageSart"] and paper["pageEnd"]:
ofile.write(' Pages \t=\t"%s-%s",\n' % (paper["pageSart"], paper["pageEnd"]))
if paper["volume"]:
ofile.write(' Volume \t=\t"%s",\n' % paper["volume"])
if paper["artNo"]:
ofile.write(' Article-Number \t=\t"%s",\n' % paper["artNo"])
ofile.write(' Year \t\t=\t"%s",\n' % paper["year"])
if paper["issn"]:
ofile.write(' ISSN \t\t=\t"%s",\n' % paper["issn"])
if paper["isbn"]:
ofile.write(' ISBN \t\t=\t"%s",\n' % paper["isbn"])
if paper["doi"]:
ofile.write(' DOI \t\t=\t"%s",\n' % paper["doi"])
ofile.write('}\n\n\n')
if (paper["documentType"].split(";")[0] in ["Conference Paper", "Proceedings Paper", ]):
ofile.write('@Inproceedings{%s,\n' % paper["eid"])
ofile.write(' Author \t=\t"%s",\n' % authorsNames)
ofile.write(' Title\t\t=\t"%s",\n' % paper["title"])
if paper["publisher"]:
ofile.write(' Publisher \t=\t"%s",\n' % paper["publisher"])
if paper["publisherAddress"]:
ofile.write(' Numpages\t=\t"%s",\n' % paper["publisherAddress"])
if paper["conferenceTitle"] and paper["conferenceLocation"] and paper["conferenceDate"]:
ofile.write(' Note\t\t=\t"In Proceedings of the %s, %s, %s",\n' %
(paper["conferenceTitle"], paper["conferenceLocation"], paper["conferenceDate"]))
elif paper["conferenceTitle"] and paper["conferenceDate"]:
ofile.write(' Note\t\t=\t"In {Proceedings of the } %s, %s",\n' %
(paper["conferenceTitle"], paper["conferenceDate"]))
if paper["pageCount"]:
ofile.write(' Address\t=\t"%s",\n' % paper["pageCount"])
if paper["pageSart"] and paper["pageEnd"]:
ofile.write(' Pages \t=\t"%s-%s",\n' % (paper["pageSart"], paper["pageEnd"]))
if paper["volume"]:
ofile.write(' Volume \t=\t"%s",\n' % paper["volume"])
if paper["artNo"]:
ofile.write(' Article-Number \t=\t"%s",\n' % paper["artNo"])
ofile.write(' Year \t\t=\t"%s",\n' % paper["year"])
if paper["issn"]:
ofile.write(' ISSN \t\t=\t"%s",\n' % paper["issn"])
if paper["isbn"]:
ofile.write(' ISBN \t\t=\t"%s",\n' % paper["isbn"])
if paper["doi"]:
ofile.write(' DOI \t\t=\t"%s",\n' % paper["doi"])
ofile.write('}\n\n\n')
print("\nFinished, total references generated: %d\n" % len(papersToBib))
ofile.close()
return OUT_FILE
if __name__ == '__main__':
filename = sys.argv[1]
generateBibtex(filename)
```
#### File: jpruiz84/ScientoPy/paperUtils.py
```python
import csv
import globalVar
import re
import unicodedata
import sys
import unidecode
import time
def openFileToDict(ifile, papersDict):
firstLineTell = ifile.tell()
firstLine = ifile.readline()
ifile.seek(firstLineTell)
if "\t" in firstLine:
reader = csv.reader(ifile, delimiter='\t')
else:
reader = csv.reader(ifile, delimiter=',')
csv.field_size_limit(int(2e9))
header = 0
rownum = 0
for row in reader:
if globalVar.cancelProcess:
return
# Save header row.
if rownum == 0:
header = row
else:
colnum = 0
paperIn = {}
# Init key elements as zero
paperIn["author"] = ""
paperIn["title"] = ""
paperIn["year"] = ""
paperIn["source"] = ""
paperIn["doi"] = ""
paperIn["volume"] = ""
paperIn["issue"] = ""
paperIn["artNo"] = ""
paperIn["pageSart"] = ""
paperIn["pageEnd"] = ""
paperIn["pageCount"] = ""
paperIn["link"] = ""
paperIn["affiliations"] = ""
paperIn["authorsWithAffiliations"] = ""
paperIn["correspondenceAddress"] = ""
paperIn["publisherAddress"] = ""
paperIn["conferenceTitle"] = ""
paperIn["conferenceLocation"] = ""
paperIn["conferenceDate"] = ""
paperIn["editors"] = ""
paperIn["publisher"] = ""
paperIn["issn"] = ""
paperIn["isbn"] = ""
paperIn["coden"] = ""
paperIn["pubMedId"] = ""
paperIn["languageOfOriginalDocument"] = ""
paperIn["abbreviatedSourceTitle"] = ""
paperIn["abstract"] = ""
paperIn["authorKeywords"] = ""
paperIn["indexKeywords"] = ""
paperIn["documentType"] = ""
paperIn["affiliations"] = ""
paperIn["cr"] = ""
paperIn["eid"] = ""
paperIn["dataBase"] = ""
paperIn["countries"] = ""
paperIn["subject"] = ""
paperIn["sourceTitle"] = ""
paperIn["orcid"] = ""
paperIn["citedReferences"] = ""
paperIn["citedBy"] = ""
paperIn["duplicatedIn"] = []
paperIn["emailHost"] = ""
paperIn["country"] = ""
paperIn["institution"] = ""
paperIn["institutionWithCountry"] = ""
paperIn["bothKeywords"] = ""
paperIn["authorFull"] = ""
for col in row:
if colnum >= len(header):
break
# remove special accents
headerCol = unidecode.unidecode(header[colnum])
# Scopus fields
if headerCol == "Authors": paperIn["author"] = col
if headerCol == "Authors": paperIn["authorFull"] = col
if headerCol == "Title": paperIn["title"] = col
if headerCol == "Year": paperIn["year"] = col
if headerCol == "Source title": paperIn["sourceTitle"] = col
if headerCol == "Volume": paperIn["volume"] = col
if headerCol == "Issue": paperIn["issue"] = col
if headerCol == "Art. No.": paperIn["artNo"] = col
if headerCol == "Page start": paperIn["pageSart"] = col
if headerCol == "Page end": paperIn["pageEnd"] = col
if headerCol == "Page count": paperIn["pageCount"] = col
if headerCol == "Cited by": paperIn["citedBy"] = col
if headerCol == "DOI": paperIn["doi"] = col
if headerCol == "Link": paperIn["link"] = col
if headerCol == "Affiliations": paperIn["affiliations"] = col
if headerCol == "Authors with affiliations": paperIn["authorsWithAffiliations"] = col
if headerCol == "Abstract": paperIn["abstract"] = col
if headerCol == "Author Keywords": paperIn["authorKeywords"] = col
if headerCol == "Index Keywords": paperIn["indexKeywords"] = col
if headerCol == "Correspondence Address": paperIn["correspondenceAddress"] = col
if headerCol == "Conference name": paperIn["conferenceTitle"] = col
if headerCol == "Conference date": paperIn["conferenceDate"] = col
if headerCol == "Conference location": paperIn["conferenceLocation"] = col
if headerCol == "Publisher Address": paperIn["publisherAddress"] = col
if headerCol == "Editors": paperIn["editors"] = col
if headerCol == "Publisher": paperIn["publisher"] = col
if headerCol == "ISSN": paperIn["issn"] = col
if headerCol == "ISBN": paperIn["isbn"] = col
if headerCol == "CODEN": paperIn["coden"] = col
if headerCol == "PubMed ID": paperIn["pubMedId"] = col
if headerCol == "Language of Original Document": paperIn["languageOfOriginalDocument"] = col
if headerCol == "Abbreviated Source Title": paperIn["abbreviatedSourceTitle"] = col
if headerCol == "Document Type": paperIn["documentType"] = col
if headerCol == "Source": paperIn["source"] = col
if headerCol == "EID": paperIn["eid"] = col
# WoS fields
#if headerCol == "PT": paperIn[""] = col # Publication Type (J=Journal; B=Book; S=Series; P=Patent)
if headerCol == "AU": paperIn["author"] = col # Authors
#if headerCol == "BA": paperIn[""] = col # Book authors
if headerCol == "BE": paperIn["editors"] = col # Editors
#if headerCol == "GP": paperIn[""] = col # Book Group Author(s)
if headerCol == "AF": paperIn["authorFull"] = col # Authors full name
#if headerCol == "BF": paperIn[""] = col # Book Authors Full Name
#if headerCol == "CA": paperIn[""] = col # Group authors
if headerCol == "TI": paperIn["title"] = col # Document Title
if headerCol == "SO": paperIn["sourceTitle"] = col # Publication Name
#if headerCol == "SE": paperIn[""] = col # Book Series Title
#if headerCol == "BS": paperIn[""] = col # Book Series subtitle
if headerCol == "LA": paperIn["languageOfOriginalDocument"] = col # Language
if headerCol == "DT": paperIn["documentType"] = col # Language
if headerCol == "CT": paperIn["conferenceTitle"] = col # Conference Title
if headerCol == "CY": paperIn["conferenceDate"] = col # Conference Date
if headerCol == "CL": paperIn["conferenceLocation"] = col # Conference Location
#if headerCol == "SP": paperIn[""] = col # Conference Sponsor
#if headerCol == "HO": paperIn[""] = col # Conference Host
if headerCol == "DE": paperIn["authorKeywords"] = col # Author Keywords
if headerCol == "ID": paperIn["indexKeywords"] = col # Keywords Plus
if headerCol == "AB": paperIn["abstract"] = col # Abstract
if headerCol == "C1": paperIn["affiliations"] = col # Author Address
#if headerCol == "RP": paperIn[""] = col # Reprint Address
if headerCol == "EM": paperIn["correspondenceAddress"] = col # E-mail Address
#if headerCol == "RI": paperIn[""] = col # ResearcherID Number
if headerCol == "OI": paperIn["orcid"] = col # ORCID Identifier (Open Researcher and Contributor ID)
#if headerCol == "FU": paperIn[""] = col # Funding Agency and Grant Number
#if headerCol == "FX": paperIn[""] = col # Funding Text
if headerCol == "CR": paperIn["citedReferences"] = col # Cited References
#if headerCol == "NR": paperIn[""] = col # Cited Reference Count
#if headerCol == "TC": paperIn["citedBy"] = col # Web of Science Core Collection Times Cited Count
# Total Times Cited Count (Web of Science Core Collection, BIOSIS Citation Index,
# Chinese Science Citation Database, Data Citation Index, Russian Science Citation Index, SciELO Citation Index)
if headerCol == "Z9": paperIn["citedBy"] = col
#if headerCol == "U1": paperIn[""] = col # Usage Count (Last 180 Days)
#if headerCol == "U2": paperIn[""] = col # Usage Count (Since 2013)
if headerCol == "PU": paperIn["publisher"] = col # Publisher
#if headerCol == "PI": paperIn[""] = col # Publisher City
if headerCol == "PA": paperIn["publisherAddress"] = col # Publisher Address
if headerCol == "SN": paperIn["issn"] = col # International Standard Serial Number (ISSN)
#if headerCol == "EI": paperIn[""] = col # Electronic International Standard Serial Number (eISSN)
if headerCol == "BN": paperIn["isbn"] = col # International Standard Book Number (ISBN)
if headerCol == "J9": paperIn["abbreviatedSourceTitle"] = col # 29-Character Source Abbreviation
#if headerCol == "JI": paperIn[""] = col # ISO Source Abbreviation
#if headerCol == "PD": paperIn[""] = col # Publication Date
if headerCol == "PY": paperIn["year"] = col # Year Published
if headerCol == "VL": paperIn["volume"] = col # Volume
if headerCol == "IS": paperIn["issue"] = col # Issue
#if headerCol == "PN": paperIn[""] = col # Part Number
#if headerCol == "SU": paperIn[""] = col # Supplement
#if headerCol == "SI": paperIn[""] = col # Special Issue
#if headerCol == "MA": paperIn[""] = col # Meeting Abstract
if headerCol == "BP": paperIn["pageSart"] = col # Beginning Page
if headerCol == "EP": paperIn["pageEnd"] = col # Ending Page
if headerCol == "AR": paperIn["artNo"] = col # Article Number
if headerCol == "DI": paperIn["doi"] = col # Digital Object Identifier (DOI)
#if headerCol == "D2": paperIn[""] = col # Book Digital Object Identifier (DOI)
if headerCol == "PG": paperIn["pageCount"] = col # Page Count
#if headerCol == "WC": paperIn["subject"] = col # Web of Science Categories
if headerCol == "SC": paperIn["subject"] = col # Research Areas
#if headerCol == "GA": paperIn[""] = col #Document Delivery Number
if headerCol == "UT": paperIn["eid"] = col # Accession Number
if headerCol == "PM": paperIn["pubMedId"] = col # PubMed ID
#if headerCol == "OA": paperIn[""] = col # Open Access Indicator
#if headerCol == "HC": paperIn[""] = col # ESI Highly Cited Paper. Note that this field is valued only for ESI subscribers.
#if headerCol == "HP": paperIn[""] = col # ESI Hot Paper. Note that this field is valued only for ESI subscribers.
#if headerCol == "DA": paperIn[""] = col # Date this report was generated.
# Own fields
if headerCol == "Subject": paperIn["subject"] = col
if headerCol == "duplicatedIn": paperIn["duplicatedIn"] = col.split(";")
if headerCol == "country": paperIn["country"] = col
if headerCol == "institution": paperIn["institution"] = col
if headerCol == "institutionWithCountry": paperIn["institutionWithCountry"] = col
if headerCol == "bothKeywords": paperIn["bothKeywords"] = col
if headerCol == "emailHost": paperIn["emailHost"] = col
if headerCol == "authorFull": paperIn["authorFull"] = col
colnum += 1
# Omit papers with invalid year
if not paperIn["year"].isdigit():
continue
# Omit papers without title
if paperIn["title"] == "":
continue
# Put the database ussing eid
if paperIn["dataBase"] == "":
if paperIn["eid"].startswith("WOS"):
paperIn["dataBase"] = "WoS"
paperIn["source"] = "WoS"
elif paperIn["eid"].startswith("2-"):
paperIn["dataBase"] = "Scopus"
# If not valid eid
else:
continue
# If cited by is emtpy add 0
if paperIn["citedBy"] == "":
paperIn["citedBy"] = "0"
# Change to false to not preprocess author
if True:
if paperIn["dataBase"] == "WoS":
paperIn["author"] = paperIn["author"].replace(";", ",")
# Remove accents in author
paperIn["author"] = unidecode.unidecode(paperIn["author"])
paperIn["authorFull"] = unidecode.unidecode(paperIn["authorFull"])
# Put a dot after the name initials in uppercase
author = []
for i in range(len(paperIn["author"])):
author.append(paperIn["author"][i])
# if the last character and uppercase, put the dot
if i == len(paperIn["author"]) - 1:
if paperIn["author"][i].isupper():
author.append('.')
break
# if upper and the next is upper or "," put the dot
if paperIn["author"][i].isupper() and \
(paperIn["author"][i + 1].isupper() or paperIn["author"][i + 1] == ','):
author.append('.')
paperIn["author"] = ''.join(author)
# Remove the "-" that is before an initial, to have coherence between WoS and Scoups
paperIn["author"] = paperIn["author"].replace(".-", ".")
# Replace authors separater ".," with ";" for scientopy analysis, put it back in paperSave
paperIn["author"] = paperIn["author"].replace(".,", ".;")
# Extract country, institution and institutionWithCountry from affilation
if paperIn["country"] == "" or paperIn["institution"] == "" or paperIn["institutionWithCountry"] == "":
# Get each author affiliations
affiliations = re.split("; (?=[^\]]*(?:\[|$))", paperIn["affiliations"])
countries = []
institutions = []
institutionsWithCoutnry = []
# For each affiliation
for affiliation in affiliations:
# Divide affiliation in sections by ",", but not consider "," inside "[]"
afSections = re.split(", (?=[^\]]*(?:\[|$))|]", affiliation)
# The last item in affiliation list is the country
country = afSections[-1].strip()
# Remove dots in country
country = country.replace(".", "")
if "BOSNIA & HERCEG".upper() == country.upper():
country = "Bosnia and Herzegovina"
if "CHINA".upper() in country.upper():
country = "China"
if "ENGLAND".upper() in country.upper():
country = "United Kingdom"
if "SCOTLAND".upper() in country.upper():
country = "United Kingdom"
if "WALES".upper() in country.upper():
country = "United Kingdom"
if "UK".upper() == country.upper():
country = "United Kingdom"
if "KINGDOM OF SAUDI ARABIA".upper() == country.upper():
country = "Saudi Arabia"
if "RUSSIA".upper() in country.upper():
country = "Russian Federation"
if "TRINID & TOBAGO".upper() == country.upper():
country = "Trinidad and Tobago"
if "U ARAB EMIRATES".upper() == country.upper():
country = "United Arab Emirates"
if "USA".upper() in country.upper():
country = "United States"
if "VIET NAM".upper() == country.upper():
country = "Vietnam"
# To do not duplicate countries in country field
if country.upper() not in [x.upper() for x in countries]:
countries.append(country)
# Get institution
institution = ""
if paperIn["dataBase"] == "WoS" and affiliations != "":
# Extract institution as the second element in affiliation sections
if len(afSections) >= 2:
institution = afSections[1].strip()
if institution.upper() not in [x.upper() for x in institutions]:
institutions.append(institution)
institutionWithCoutnry = ""
if institution != "":
institutionWithCoutnry = ("%s, %s" % (institution.replace(",", ""), country.replace(",", "")))
if institutionWithCoutnry.upper() not in [x.upper() for x in institutionsWithCoutnry]:
institutionsWithCoutnry.append(institutionWithCoutnry)
#print("\nOriginal: %s" % affiliation)
#print("afSections: %s" % str(afSections))
#print("country: %s" % country)
#print("institution: %s" % institution)
#print("institutionWithCoutnry: %s" % institutionWithCoutnry)
paperIn["country"] = ";".join(countries)
paperIn["institution"] = ";".join(institutions)
paperIn["institutionWithCountry"] = ";".join(institutionsWithCoutnry)
# Get email host
if paperIn["emailHost"] == "":
splited1 = paperIn["correspondenceAddress"].split("@")
if len(splited1) > 1:
splited2 = splited1[1].split(";")
paperIn["emailHost"] = splited2[0]
else:
paperIn["emailHost"] = "No email"
# Both keywords
if paperIn["bothKeywords"] == "":
bothKeywords = []
for keyword in paperIn["authorKeywords"].split(";"):
keywordStriped = keyword.strip()
if keywordStriped == "":
continue
if keywordStriped.upper() not in [x.upper() for x in bothKeywords]:
bothKeywords.append(keywordStriped)
for keyword in paperIn["indexKeywords"].split(";"):
keywordStriped = keyword.strip()
if keywordStriped == "":
continue
if keywordStriped.upper() not in [x.upper() for x in bothKeywords]:
bothKeywords.append(keywordStriped)
paperIn["bothKeywords"] = ";".join(bothKeywords)
# printPaper(paperIn)
globalVar.loadedPapers += 1
# Filter papers that are not in document tipe list
if any(pType.upper() in paperIn["documentType"].upper().split("; ") \
for pType in globalVar.INCLUDED_TYPES):
papersDict.append(paperIn)
if paperIn["dataBase"] == "WoS":
globalVar.papersWoS += 1
if paperIn["dataBase"] == "Scopus":
globalVar.papersScopus += 1
else:
globalVar.omitedPapers += 1
rownum += 1
ifile.close()
def getPapersLinkFromFile(ifile, papersDict):
firstLineTell = ifile.tell()
firstLine = ifile.readline()
ifile.seek(firstLineTell)
if "\t" in firstLine:
reader = csv.reader(ifile, delimiter='\t')
else:
reader = csv.reader(ifile,delimiter=',')
header = 0
rownum = 0
for row in reader:
# Save header row.
if rownum == 0:
header = row
else:
colnum = 0
paperIn = {}
# Init key elements as zero
paperIn["Link"] = ""
for col in row:
#if colnum >= len(header):
# break
#headerCol = header[colnum].decode("ascii", errors="ignore").encode()
# Scopus fields
if col.startswith("https://www.scopus.com"):
paperIn["Link"] = col
colnum += 1
if paperIn["Link"] != "":
papersDict.append(paperIn)
rownum += 1
ifile.close()
def printPaper(paper):
print('Authors: %s' % (paper["author"]))
print('Title: %s' % (paper["title"]))
print('Year: %s' % (paper["year"]))
print('Source: %s' % (paper["source"]))
print('DOI: %s' % (paper["doi"]))
#print('Abstract: %s' % (paper["abstract"]))
print('Author Key: %s' % (paper["authorKeywords"]))
print('Index Key: %s' % (paper["indexKeywords"]))
print('eid: %s' % (paper["eid"]))
print('Data base: %s' % (paper["dataBase"]))
print('Affilations:')
for af in re.split("; (?=[^\]]*(?:\[|$))",paper["affiliations"]):
print("- " + af)
print('Country: %s' % (paper["country"]))
print('Document type: %s' % (paper["documentType"]))
print('Cited by: %s' % (paper["citedBy"]))
print('\n')
def removeDuplicates(paperDict, logWriter=None, preProcessBrief=None):
duplicatedPapersCount = 0
removedPapersScopus = 0
removedPapersWoS = 0
duplicatedWithDifferentCitedBy = 0
noAuthors = 0
# Remove part of the title inside parentisis or square brakets
# Some journals put this the original language tile in the brakets
# Remove whitespace at the end and start of the tile
for paper in paperDict:
paper["titleB"] = unidecode.unidecode(paper["title"])
paper["titleB"] = re.sub("[\(\[].*?[\)\]]", "", paper["titleB"].upper()).strip()
paper["titleB"] = re.sub("[^a-zA-Z0-9]+", "", paper["titleB"])
paper["firstAuthorLastName"] = unidecode.unidecode(paper["author"])
paper["firstAuthorLastName"] = paper["firstAuthorLastName"].upper().strip()
paper["firstAuthorLastName"] = re.sub(";|\.|,", " ", paper["firstAuthorLastName"]).split(" ")[0]
paper["firstAuthorLastName"] = re.sub("[^a-zA-Z]+", "", paper["firstAuthorLastName"])
# Short by database, to put WoS first over Scopus, reverse True
paperDict = sorted(paperDict, key=lambda x: x["dataBase"], reverse=True)
paperDict = sorted(paperDict, key=lambda x: x["titleB"])
print("Removing duplicates...")
globalVar.progressText = 'Removing duplicates'
countMatch2 = 0
progressPerPrev = 0
# Run on paper list
for i in range(0, len(paperDict)):
match = True
while(match):
# If we are on the last paper in the list
if i >= (len(paperDict) - 1):
match = False
continue
# Compare first author last name and titleB in uppercase
match = (paperDict[i]["firstAuthorLastName"] == paperDict[i+1]["firstAuthorLastName"])
match &= (paperDict[i]["titleB"] == paperDict[i+1]["titleB"])
if(paperDict[i]["doi"] != ""):
match |= (paperDict[i]["doi"] == paperDict[i + 1]["doi"])
match2 = (paperDict[i]["year"] != paperDict[i + 1]["year"]) & match
if (match2 == True):
countMatch2 += 1
#print(countMatch2)
# If the criterion match
if(match == True):
#print("\nPaper %s duplicated with %s" % (i, i+1))
#print("Dup A: %s, %s" % (paperDict[i]["title"], paperDict[i]["year"]))
#print("Authors: %s, Database: %s, Cited by: %s" %
#(paperDict[i]["author"], paperDict[i]["dataBase"], paperDict[i]["citedBy"]))
#print("Dup B: %s, %s" % (paperDict[i+1]["title"], paperDict[i+1]["year"]))
#print("Authors: %s, Database: %s, Cited by: %s" %
#(paperDict[i+1]["author"], paperDict[i+1]["dataBase"], paperDict[i+1]["citedBy"]))
# Update the removed count
if paperDict[i+1]["dataBase"] == "WoS":
removedPapersWoS += 1
if paperDict[i+1]["dataBase"] == "Scopus":
removedPapersScopus += 1
#print("Removing: %s" % paperDict[i+1]["dataBase"])
# Add all duplicated in duplicatedIn
#paperDict[i]["duplicatedIn"] = ";".join(paperDict[i]["duplicatedIn"].split(";") + [paperDict[i+1]["eid"]])
#paperDict[i]["duplicatedIn"] += (paperDict[i + 1]["eid"] + ";")
paperDict[i]["duplicatedIn"].append(paperDict[i + 1]["eid"])
# Find how many duplicated documents has different cited by
if int(paperDict[i]["citedBy"]) != int(paperDict[i + 1]["citedBy"]):
duplicatedWithDifferentCitedBy += 1
# Average the two citedBy
paperDict[i]["citedBy"] = int((int(paperDict[i + 1]["citedBy"]) + int(paperDict[i]["citedBy"])) / 2)
# Remove paper i + 1
paperDict.remove(paperDict[i + 1])
# Update progress percentage
duplicatedPapersCount += 1
progressPer = int(float(i) / float(len(paperDict)) * 100)
globalVar.progressPer = progressPer
if globalVar.cancelProcess:
return 0
if progressPerPrev != progressPer:
progressPerPrev = progressPer
time.sleep(0.001)
if progressPer < 100:
#print("p: %d" % progressPer)
sys.stdout.write("\r%d%% " % (int(progressPer)))
sys.stdout.flush()
print("\nDuplicated papers found: %s" % duplicatedPapersCount)
print("Original papers count: %s" % globalVar.OriginalTotalPapers)
print("Actual papers count: %s" % len(paperDict))
if logWriter != None:
# To avoid by zero division
if globalVar.papersScopus > 0:
preProcessBrief["percenRemPapersScopus"] = 100.0*removedPapersScopus/globalVar.papersScopus
else:
preProcessBrief["percenRemPapersScopus"] = 0
if globalVar.papersWoS > 0:
preProcessBrief["percenRemPapersWos"] = 100.0 * removedPapersWoS / globalVar.papersWoS
else:
preProcessBrief["percenRemPapersWos"] = 0
if duplicatedPapersCount > 0:
percenDuplicatedWithDifferentCitedBy = 100.0*duplicatedWithDifferentCitedBy/duplicatedPapersCount
else:
percenDuplicatedWithDifferentCitedBy = 0
globalVar.progressPer = 100
print("\r{0:.0f}%".format(100))
print("Removed papers WoS: %s, %.1f %%" %
(removedPapersWoS, preProcessBrief["percenRemPapersWos"]))
print("Removed papers Scopus: %s, %.1f %%" %
(removedPapersScopus, preProcessBrief["percenRemPapersScopus"]))
if(duplicatedPapersCount != 0):
print("Duplicated documents with different cited by: %s, %.1f %%\n" % (duplicatedWithDifferentCitedBy,
percenDuplicatedWithDifferentCitedBy))
globalVar.totalAfterRemDupl = len(paperDict)
preProcessBrief["totalAfterRemDupl"] = globalVar.totalAfterRemDupl
preProcessBrief["removedTotalPapers"] = duplicatedPapersCount
preProcessBrief["removedPapersScopus"] = removedPapersScopus
preProcessBrief["removedPapersWoS"] = removedPapersWoS
preProcessBrief["papersScopus"] = preProcessBrief["loadedPapersScopus"] - preProcessBrief["removedPapersScopus"]
preProcessBrief["papersWoS"] = preProcessBrief["loadedPapersWoS"] - preProcessBrief["removedPapersWoS"]
logWriter.writerow({'Info': ''})
logWriter.writerow({'Info': 'Duplicated removal results:'})
logWriter.writerow({'Info': 'Duplicated papers found',
'Number':("%d" % (duplicatedPapersCount)),
'Percentage': ("%.1f%%" % (100.0 * duplicatedPapersCount / globalVar.OriginalTotalPapers))})
logWriter.writerow({'Info': 'Removed duplicated papers from WoS',
'Number':("%d" % (removedPapersWoS)),
'Percentage': ("%.1f%%" % (preProcessBrief["percenRemPapersWos"]))})
logWriter.writerow({'Info': 'Removed duplicated papers from Scopus',
'Number':("%d" % (removedPapersScopus)),
'Percentage': ("%.1f%%" % (preProcessBrief["percenRemPapersScopus"]))})
logWriter.writerow({'Info': 'Duplicated documents with different cited by',
'Number':("%d" % (duplicatedWithDifferentCitedBy)),
'Percentage': ("%.1f%%" % (percenDuplicatedWithDifferentCitedBy))})
logWriter.writerow({'Info': 'Total papers after rem. dupl.', 'Number': str(globalVar.totalAfterRemDupl)})
return paperDict
def sourcesStatics(paperDict, logWriter):
statics = {}
totalPapers = len(paperDict)
statics["WoS"] = {}
for typeIn in globalVar.INCLUDED_TYPES:
statics["WoS"][typeIn] = 0
statics["WoS"]["Total"] = 0
statics["WoS"]["Source"] = "WoS"
statics["Scopus"]={}
for typeIn in globalVar.INCLUDED_TYPES:
statics["Scopus"][typeIn] = 0
statics["Scopus"]["Total"] = 0
statics["Scopus"]["Source"] = "Scopus"
noDocumentTypeCount = 0
# On each paper to count statics
for paper in paperDict:
try:
statics[paper["dataBase"]][paper["documentType"].split("; ")[0]] += 1
statics[paper["dataBase"]]["Total"] += 1
except:
noDocumentTypeCount += 1
# Put the percentajes
for key1, value1 in statics.items():
for key2, value2 in statics[key1].items():
if type(statics[key1][key2]) == int:
statics[key1][key2] = ("%d, %.1f%%" % (statics[key1][key2], (100.0 * statics[key1][key2]/totalPapers)))
logWriter.writerow(statics["WoS"])
logWriter.writerow(statics["Scopus"])
```
#### File: jpruiz84/ScientoPy/ScientoPyClass.py
```python
import paperUtils
import paperSave
import globalVar
import os
import matplotlib.pyplot as plt
import numpy as np
import graphUtils
import sys
import re
import time
from PIL import Image
class ScientoPyClass:
def __init__(self, from_gui=False):
# Parameters variables
self.criterion = 'authorKeywords'
self.graphType = 'bar_trends'
self.length = 10
self.skipFirst = 0
self.topics = ''
self.startYear = globalVar.DEFAULT_START_YEAR
self.endYear = globalVar.DEFAULT_END_YEAR
self.savePlot = ''
self.noPlot = False
self.agrForGraph = False
self.wordCloudMask = ''
self.windowWidth = 2
self.previousResults = False
self.onlyFirst = False
self.graphTitle = ''
self.pYear = False
self.plotWidth = globalVar.DEFAULT_PLOT_WIDTH
self.plotHeight = globalVar.DEFAULT_PLOT_HEIGHT
self.trend = False
self.yLog = False
self.filter = ""
self.fromGui = from_gui
# Working variables
self.papersDict = []
self.resultsFileName = ''
self.extResultsFileName = ''
self.lastPreviousResults = ''
self.preprocessBriefFileName = os.path.join(globalVar.DATA_OUT_FOLDER, globalVar.PREPROCESS_LOG_FILE)
self.preprocessDatasetFile = os.path.join(globalVar.DATA_OUT_FOLDER, globalVar.OUTPUT_FILE_NAME)
self.topicResults = []
self.yearArray = []
self.startYearIndex = 0
self.endYearIndex = 0
def closePlot(self):
plt.close()
def scientoPy(self, args=''):
globalVar.cancelProcess = False
globalVar.progressText = "Reading dataset"
globalVar.progressPer = 0
# To let progress bar open
if self.fromGui:
time.sleep(0.01)
if args == '':
args = self
print("\n\nScientoPy: %s" % (globalVar.SCIENTOPY_VERSION))
print("================\n")
# Check python version
if sys.version_info[0] < 3:
print("ERROR, you are using Python 2, Python 3.X.X required")
print("")
exit()
# Validate window Width
if args.windowWidth < 1:
print("ERROR: minimum windowWidth 1")
exit()
# Validate start and end years
if args.startYear > args.endYear:
print("ERROR: startYear > endYear")
exit()
# Create output folders if not exist
if not os.path.exists(os.path.join(globalVar.GRAPHS_OUT_FOLDER)):
os.makedirs(os.path.join(globalVar.GRAPHS_OUT_FOLDER))
if not os.path.exists(os.path.join(globalVar.RESULTS_FOLDER)):
os.makedirs(os.path.join(globalVar.RESULTS_FOLDER))
# Select the input file
if args.previousResults:
INPUT_FILE = os.path.join(globalVar.RESULTS_FOLDER, globalVar.OUTPUT_FILE_NAME)
else:
INPUT_FILE = os.path.join(globalVar.DATA_OUT_FOLDER, globalVar.OUTPUT_FILE_NAME)
# Start the output list empty
papersDictOut = []
topicList = []
loadDataSet = False
if len(self.papersDict) == 0 or args.previousResults:
loadDataSet = True
if args.previousResults == False and self.lastPreviousResults == True:
loadDataSet = True
# Open the dataset only if not loaded in papersDict
if loadDataSet:
self.papersDict = []
self.lastPreviousResults = args.previousResults
# Open the storage database and add to sel.fpapersDict
if not os.path.isfile(INPUT_FILE):
print("ERROR: %s file not found" % INPUT_FILE)
print("Make sure that you have run the preprocess step before run scientoPy")
exit()
ifile = open(INPUT_FILE, "r", encoding='utf-8')
print("Reading file: %s" % (INPUT_FILE))
globalVar.progressPer = 10
paperUtils.openFileToDict(ifile, self.papersDict)
ifile.close()
if globalVar.cancelProcess:
return
# If reading previous results, remove possible duplicated from multiple topics
if args.previousResults:
self.papersDict= paperUtils.removeDuplicates(self.papersDict)
print("Scopus papers: %s" % globalVar.papersScopus)
print("WoS papers: %s" % globalVar.papersWoS)
print("Omitted papers: %s" % globalVar.omitedPapers)
print("Total papers: %s" % len(self.papersDict))
# Create a self.yearArray
self.yearArray = range(args.startYear, args.endYear + 1)
yearPapers = {}
for i in range(args.startYear, args.endYear + 1):
yearPapers[i] = 0
# Filter papers with invalid year
self.papersDict = list(filter(lambda x: x["year"].isdigit(), self.papersDict))
# Filter the papers outside the year range
papersDictInside = self.papersDict.copy()
papersDictInside = list(filter(lambda x: int(x["year"]) >= args.startYear, papersDictInside))
papersDictInside = list(filter(lambda x: int(x["year"]) <= args.endYear, papersDictInside))
print("Total papers in range (%s - %s): %s" %
(args.startYear, args.endYear, len(papersDictInside)))
# If no papers in the range exit
if (len(papersDictInside) == 0):
print("ERROR: no papers found in the range.")
del papersDictInside
return
# Find the number of total papers per year
for paper in papersDictInside:
if int(paper["year"]) in yearPapers.keys():
yearPapers[int(paper["year"])] += 1
# Get the filter options
filterSubTopic = ""
if args.filter:
filterSubTopic = args.filter.strip()
print("Filter Sub Topic: %s" % filterSubTopic)
# Parse custom topics
if args.topics:
print("Custom topics entered:")
# Divide the topics by ;
topicsFirst = args.topics.split(";")
for x in topicsFirst:
topicList.append(x.split(","))
# Remove beginning and ending space from topics, and empty topics
for topic in topicList:
for idx, item in enumerate(topic):
topic[idx] = item.strip()
if not topic[idx]:
topic.remove(topic[idx])
if not topic:
topicList.remove(topic)
# Remove for each sub topic, start and end spaces
for item1 in topicList:
for item2 in item1:
item2 = item2.strip()
for topic in topicList:
print(topic)
# Find the top topics
else:
print("Finding the top topics...")
globalVar.progressPer = 30
globalVar.progressText = "Finding the top topics"
topicDic = {}
# For each paper, get the full topicDic
for paper in papersDictInside:
if globalVar.cancelProcess:
return
# For each item in paper criteria
for item in paper[args.criterion].split(";"):
# Strip paper item and upper case
item = item.strip()
item = item.upper()
# If paper item empty continue
if item == "":
continue
# If filter sub topic, omit items outside that do not match with the subtopic
if filterSubTopic != "" and len(item.split(",")) >= 2:
if (item.split(",")[1].strip().upper() != filterSubTopic.upper()):
continue
# If topic already in topicDic
if item in topicDic:
topicDic[item] += 1
# If topic is not in topicDic, create this in topicDic
else:
topicDic[item] = 1
# If onlyFirst, only keep the firt processesing
if args.onlyFirst:
break
# If trending analysis, the top topic list to analyse is bigger
if args.trend:
topicListLength = globalVar.TOP_TREND_SIZE
startList = 0
else:
topicListLength = args.length
startList = args.skipFirst
# Get the top topics by the topDic count
topTopcis = sorted(topicDic.items(),
key=lambda x: -x[1])[startList:(startList + topicListLength)]
# Put the topTopics in topic List
for topic in topTopcis:
topicList.append([topic[0]])
if len(topicList) == 0:
print("\nFINISHED : There is not results with your inputs criteria or filter")
del papersDictInside
return
# print("Topic list:")
# print(topicList)
# Create a dictonary in self.topicResults list per element in topicList
self.topicResults = []
for topics in topicList:
topicItem = {}
topicItem["upperName"] = topics[0].upper()
# If the topic name was given as an argument, use the first one given, else keep empty to use the first one found
if args.topics:
topicItem["name"] = topics[0]
else:
topicItem["name"] = ""
topicItem["allTopics"] = topics
topicItem["year"] = self.yearArray
topicItem["PapersCount"] = [0] * len(self.yearArray)
topicItem["PapersCountAccum"] = [0] * len(self.yearArray)
topicItem["PapersCountRate"] = [0] * len(self.yearArray)
topicItem["PapersTotal"] = 0
topicItem["AverageDocPerYear"] = 0 # ADY
topicItem["PapersInLastYears"] = 0
topicItem["PerInLastYears"] = 0 # PDLY
topicItem["CitedByCount"] = [0] * len(self.yearArray)
topicItem["CitedByCountAccum"] = [0] * len(self.yearArray)
topicItem["CitedByTotal"] = 0
topicItem["papers"] = []
topicItem["topicsFound"] = []
topicItem["hIndex"] = 0
topicItem["agr"] = 0 # Average growth rate
self.topicResults.append(topicItem)
# Find papers within the arguments, and fill the self.topicResults fields per year.
print("Calculating papers statistics...")
globalVar.progressText = "Calculating papers statistics"
papersLen = len(papersDictInside)
papersCounter = 0
# For each paper
for paper in papersDictInside:
papersCounter += 1
progressPer = int(float(papersCounter) / float(papersLen) * 100)
globalVar.progressPer = progressPer
if globalVar.cancelProcess:
return
# For each item in paper criteria
for item in paper[args.criterion].split(";"):
# Strip paper item and upper
item = item.strip()
itemUp = item.upper()
# For each topic in topic results
for topicItem in self.topicResults:
# for each sub topic
for subTopic in topicItem["allTopics"]:
# Check if the sub topic match with the paper item
if args.topics and "*" in subTopic.upper():
subTopicRegex = subTopic.upper().replace("*", ".*")
p = re.compile(subTopicRegex)
match = p.match(itemUp)
else:
match = subTopic.upper() == itemUp
# If match, sum it to the topicItem
if match:
yearIndex = topicItem["year"].index(int(paper["year"]))
topicItem["PapersCount"][yearIndex] += 1
topicItem["PapersTotal"] += 1
topicItem["CitedByCount"][yearIndex] += int(paper["citedBy"])
topicItem["CitedByTotal"] += int(paper["citedBy"])
# If no name in the topicItem, put the first one that was found
if topicItem["name"] == "":
topicItem["name"] = item
topicItem["papers"].append(paper)
# Add the matched paper to the papersDictOut
papersDictOut.append(paper)
# If it is a new topic, add it to topicItem["topicsFound"]
if itemUp not in [x.upper() for x in topicItem["topicsFound"]]:
topicItem["topicsFound"].append(item)
# Only process one (the first one) if args.onlyFirst
if args.onlyFirst:
break
# Print the topics found if the asterisk willcard was used
for topicItem in self.topicResults:
for subTopic in topicItem["allTopics"]:
if args.topics and "*" in subTopic.upper():
print("\nTopics found for %s:" % subTopic)
print('"' + ';'.join(topicItem["topicsFound"]) + '"')
print("")
print("Calculating accumulative ...")
# Extract accumulative
for topicItem in self.topicResults:
citedAccumValue = 0
papersAccumValue = 0
for i in range(0, len(topicItem["CitedByCountAccum"])):
citedAccumValue += topicItem["CitedByCount"][i]
topicItem["CitedByCountAccum"][i] = citedAccumValue
papersAccumValue += topicItem["PapersCount"][i]
topicItem["PapersCountAccum"][i] = papersAccumValue
print("Calculating Average Growth Rate (AGR)...")
# Extract the Average Growth Rate (AGR)
for topicItem in self.topicResults:
# Calculate rates
pastCount = 0
# Per year with papers count data
for i in range(0, len(topicItem["PapersCount"])):
topicItem["PapersCountRate"][i] = topicItem["PapersCount"][i] - pastCount
pastCount = topicItem["PapersCount"][i]
# Calculate AGR from rates
self.endYearIndex = len(topicItem["year"]) - 1
self.startYearIndex = self.endYearIndex - (args.windowWidth - 1)
topicItem["agr"] = \
round(np.mean(topicItem["PapersCountRate"][self.startYearIndex: self.endYearIndex + 1]), 1)
print("Calculating Average Documents per Year (ADY)...")
# Extract the Average Documents per Year (ADY)
for topicItem in self.topicResults:
# Calculate ADY from rates
self.endYearIndex = len(topicItem["year"]) - 1
self.startYearIndex = self.endYearIndex - (args.windowWidth - 1)
topicItem["AverageDocPerYear"] = \
round(np.mean(topicItem["PapersCount"][self.startYearIndex: self.endYearIndex + 1]), 1)
topicItem["PapersInLastYears"] = \
np.sum(topicItem["PapersCount"][self.startYearIndex: self.endYearIndex + 1])
if topicItem["PapersTotal"] > 0:
topicItem["PerInLastYears"] = \
round(100 * topicItem["PapersInLastYears"] / topicItem["PapersTotal"], 1)
# Scale in percentage per year
if args.pYear:
for topicItem in self.topicResults:
for year, value in yearPapers.items():
index = topicItem["year"].index(year)
if value != 0:
topicItem["PapersCount"][index] /= (float(value) / 100.0)
print("Calculating h-index...")
# Calculate h index per topic
for topicItem in self.topicResults:
# print("\n" + topicName)
# Sort papers by cited by count
papersIn = topicItem["papers"]
papersIn = sorted(papersIn, key=lambda x: int(x["citedBy"]), reverse=True)
count = 1
hIndex = 0
for paper in papersIn:
# print(str(count) + ". " + paper["citedBy"])
if int(paper["citedBy"]) >= count:
hIndex = count
count += 1
# print("hIndex: " + str(hIndex))
topicItem["hIndex"] = hIndex
# Sort by PapersTotal, and then by name.
self.topicResults = sorted(self.topicResults, key=lambda x: x["name"], reverse=False)
self.topicResults = sorted(self.topicResults, key=lambda x: int(x["PapersTotal"]), reverse=True)
# If trend analysis, sort by agr, and get the first ones
if args.trend:
self.topicResults = sorted(self.topicResults, key=lambda x: int(x["agr"]), reverse=True)
self.topicResults = self.topicResults[args.skipFirst:(args.skipFirst + args.length)]
# Print top topics
print("\nTop topics:")
print("Average Growth Rate (AGR) and Average Documents per Year (ADY) period: %d - %d\n\r"
% (self.yearArray[self.startYearIndex], self.yearArray[self.endYearIndex]))
print('-' * 87)
print("{:<4s}{:<30s}{:>10s}{:>10s}{:>10s}{:>10s}{:>12s}".format("Pos", args.criterion, "Total", "AGR", "ADY",
"PDLY", "h-index"))
print('-' * 87)
count = 0
for topicItem in self.topicResults:
print("{:<4d}{:<30s}{:>10d}{:>10.1f}{:>10.1f}{:>10.1f}{:>10d}".format(
count + 1, topicItem["name"], topicItem["PapersTotal"], topicItem["agr"],
topicItem["AverageDocPerYear"], topicItem["PerInLastYears"], topicItem["hIndex"]))
count += 1
print('-' * 87)
print("")
if filterSubTopic != "":
for topicItem in self.topicResults:
topicItem["name"] = topicItem["name"].split(",")[0].strip()
globalVar.progressText = "Saving results"
if self.fromGui:
time.sleep(0.01)
if globalVar.cancelProcess:
return
self.resultsFileName = paperSave.saveTopResults(self.topicResults, args.criterion, args.savePlot)
if self.fromGui:
time.sleep(0.01)
if globalVar.cancelProcess:
return
self.extResultsFileName = paperSave.saveExtendedResults(self.topicResults, args.criterion, args.savePlot)
# Only save results if that is result of a not previous result
if not args.previousResults:
paperSave.saveResults(papersDictOut, os.path.join(globalVar.RESULTS_FOLDER,
globalVar.OUTPUT_FILE_NAME))
del papersDictInside
globalVar.progressPer = 101
print("\nAnalysis finished.")
def plotResults(self, args=''):
if args == '':
args = self
if args.noPlot:
return
# If more than 100 results and not wordCloud, no plot.
if len(self.topicResults) > 100 and not args.graphType == "word_cloud" and not args.noPlot:
args.noPlot = True
print("\nERROR: Not allowed to graph more than 100 results")
return
if args.graphType == "evolution":
graphUtils.plot_evolution(plt, self.topicResults, self.yearArray[self.startYearIndex], self.yearArray[self.endYearIndex], args)
if args.graphType == "word_cloud":
from wordcloud import WordCloud
my_dpi = 96
plt.figure(figsize=(1960 / my_dpi, 1080 / my_dpi), dpi=my_dpi)
if args.wordCloudMask:
imageMask = np.array(Image.open(args.wordCloudMask))
wc = WordCloud(background_color="white", max_words=5000, width=1960, height=1080, colormap="tab10",
mask=imageMask)
else:
wc = WordCloud(background_color="white", max_words=5000, width=1960, height=1080, colormap="tab10")
freq = {}
for topicItem in self.topicResults:
freq[topicItem["name"]] = topicItem["PapersTotal"]
# generate word cloud
wc.generate_from_frequencies(freq)
# show
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
fig = plt.gcf()
fig.canvas.set_window_title(args.criterion + ' word cloud graph')
if args.graphType == "bar":
graphUtils.plot_bar_horizontal(plt, self.topicResults, args)
if args.graphType == "bar_trends":
graphUtils.plot_bar_horizontal_trends(plt, self.topicResults,
self.yearArray[self.startYearIndex], self.yearArray[self.endYearIndex], args)
if args.graphType == "time_line":
graphUtils.plot_time_line(plt, self.topicResults, False, args)
fig = plt.gcf()
fig.set_size_inches(args.plotWidth, args.plotHeight)
if args.yLog:
plt.yscale('log')
# TODO: Fix mticker
# plt.gca().yaxis.set_minor_formatter(mticker.ScalarFormatter())
if args.pYear:
plt.ylabel("% of documents per year")
if args.graphTitle:
# plt.title(args.graphTitle)
fig = plt.gcf()
fig.suptitle(args.graphTitle, y=1.0)
plt.tight_layout(rect=[0, 0, 1, 0.95])
else:
plt.tight_layout()
if args.savePlot == "":
if self.fromGui:
plt.show(block=False)
else:
plt.show(block=True)
else:
plt.savefig(os.path.join(globalVar.GRAPHS_OUT_FOLDER, args.savePlot),
bbox_inches='tight', pad_inches=0.01)
print("Plot saved on: " + os.path.join(globalVar.GRAPHS_OUT_FOLDER, args.savePlot))
if args.savePlot == "":
if self.fromGui:
plt.show()
```
#### File: jpruiz84/ScientoPy/ScientoPyGui.py
```python
from tkinter import *
from tkinter import filedialog
from tkinter import ttk
from tkinter import messagebox
from tkinter import font
from tkinter.ttk import Progressbar
import time
import threading
import tkinter.scrolledtext as scrolledtext
from PIL import ImageTk, ImageColor, Image
import globalVar
from PreProcessClass import PreProcessClass
from ScientoPyClass import ScientoPyClass
from generateBibtex import generateBibtex
import webbrowser
import os.path
class ScientoPyGui:
cb_square_color = 'white'
def __init__(self):
self.scientoPy = ScientoPyClass(from_gui=True)
self.preprocess = PreProcessClass(from_gui=True)
self.root = Tk()
self.root.geometry("853x480")
self.root.resizable(width=False, height=False)
try:
bg_color = self.root.cget('bg')
bg_color_rgb = ImageColor.getcolor(bg_color, "RGB")
bg_color_avg = sum(bg_color_rgb)/len(bg_color_rgb)
if(bg_color_avg < 75):
self.cb_square_color = bg_color
except:
pass
default_font = font.nametofont("TkDefaultFont")
default_font.configure(size=10)
self.root.option_add("*font", default_font)
if os.path.exists('scientopy_icon.png'):
self.root.iconphoto(True, PhotoImage(file="scientopy_icon.png"))
self.root.title("ScientoPy")
# Starting the tabs
self.nb = ttk.Notebook(self.root)
preprocess_page = Frame(self.nb)
process_page = Frame(self.nb)
self.nb.add(preprocess_page, text='1. Pre-processing')
self.nb.add(process_page, text='2. Analysis')
self.nb.pack(expand=1, fill="both")
self.nb.select(preprocess_page)
# Pre processing tab *******************************
if os.path.exists('scientopy_logo.png'):
load = Image.open("scientopy_logo.png")
render = ImageTk.PhotoImage(load)
img = Label(preprocess_page, image=render)
img.image = render
img.place(relx=0.5, rely=0.35, anchor=CENTER)
version_label = Label(preprocess_page, text=("Universidad del Cauca, Popayán, Colombia"
"\nMIT License \nVersion %s" % globalVar.SCIENTOPY_VERSION))
version_label.place(relx=0.5, rely=0.7, anchor=CENTER)
Label(preprocess_page, text="Dataset folder:").grid(column=0, row=0, padx=17)
preprocess_page.grid_rowconfigure(0, pad=700)
self.datasetLoc = StringVar()
preprocess_page.grid_columnconfigure(2, weight=1)
self.datasetLocEntry = Entry(preprocess_page, textvariable=self.datasetLoc)
# self.datasetLocEntry.place(relx=0.47, rely=0.8, anchor=CENTER)
self.datasetLocEntry.grid(column=1, row=0, columnspan=2, sticky='we')
dataset_button = Button(preprocess_page, text="Select dataset", command=self.select_dataset)
# dataset_button.place(relx=0.9, rely=0.8, anchor=CENTER)
dataset_button.grid(column=3, row=0, sticky='w', padx=17)
self.chkValueRemoveDupl = BooleanVar()
self.chkValueRemoveDupl.set(True)
Checkbutton(preprocess_page, var=self.chkValueRemoveDupl,
text="Remove duplicated documents",
selectcolor=self.cb_square_color).place(relx=0.015, rely=0.9, anchor=W)
# Buttons ****************************
run_preprocess_button = Button(preprocess_page, text="Run preprocess", command=self.run_preprocess)
run_preprocess_button.place(relx=0.9, rely=0.9, anchor=CENTER)
open_preprocess_brief = Button(preprocess_page, text="Open preprocess brief",
command=self.open_preprocess_brief)
open_preprocess_brief.place(relx=0.57, rely=0.9, anchor=W)
# Analysis tab ************************************************************
Label(process_page, text="").grid(sticky=W, column=0, row=0)
Label(process_page, text="Criterion:", borderwidth=10).grid(sticky=W, column=0, row=1)
self.comboCriterion = ttk.Combobox(process_page, values=globalVar.validCriterion, width=15)
self.comboCriterion.current(3)
self.comboCriterion.grid(column=1, row=1)
Label(process_page, text="Graph type:", borderwidth=10).grid(sticky=W, column=0, row=2)
self.comboGraphType = ttk.Combobox(process_page, values=globalVar.validGrapTypes, width=15)
self.comboGraphType.current(0)
self.comboGraphType.grid(column=1, row=2)
Label(process_page, text="Start Year:", borderwidth=10).grid(sticky=W, column=0, row=3)
self.spinStartYear = Spinbox(process_page, from_=1900, to=2100,
textvariable=DoubleVar(value=globalVar.DEFAULT_START_YEAR), width=15)
self.spinStartYear.grid(column=1, row=3)
Label(process_page, text="End Year:", borderwidth=10).grid(sticky=W, column=0, row=4)
self.spinEndYear = Spinbox(process_page, from_=1900, to=2100,
textvariable=DoubleVar(value=globalVar.DEFAULT_END_YEAR), width=15)
self.spinEndYear.grid(column=1, row=4)
Label(process_page, text="Topics length:", borderwidth=10).grid(sticky=W, column=0, row=5)
self.spinTopicsLength = Spinbox(process_page, from_=0, to=1000, textvariable=DoubleVar(value=10),
width=15)
self.spinTopicsLength.grid(column=1, row=5)
Label(process_page, text="Skip first:", borderwidth=10).grid(sticky=W, column=0, row=6)
self.spinSkipFirst = Spinbox(process_page, from_=0, to=1000, textvariable=DoubleVar(value=0),
width=15)
self.spinSkipFirst.grid(column=1, row=6)
Label(process_page, text="Window (years):", borderwidth=10).grid(sticky=W, column=0, row=7)
self.spinWindowWidth = Spinbox(process_page, from_=1, to=100, textvariable=DoubleVar(value=2),
width=15)
self.spinWindowWidth.grid(column=1, row=7)
self.chkValuePreviusResults = BooleanVar()
self.chkValuePreviusResults.set(False)
Checkbutton(process_page, var=self.chkValuePreviusResults, selectcolor=self.cb_square_color,
text="Use previous results").grid(sticky=W, column=0, row=8, padx=7)
self.chkValueTrendAnalysis = BooleanVar()
self.chkValueTrendAnalysis.set(False)
Checkbutton(process_page, var=self.chkValueTrendAnalysis, selectcolor=self.cb_square_color,
text="Trend analysis").grid(sticky=W, column=0, row=9, padx=7)
process_page.grid_columnconfigure(2, weight=1)
Label(process_page, text="Custom topics:", borderwidth=10).grid(sticky=W, column=2, row=1, padx=15)
self.entryCustomTopics = scrolledtext.ScrolledText(process_page, undo=True, height=18)
self.entryCustomTopics.grid(column=2, row=2, rowspan=9, sticky=E, padx=25)
# Buttons ****************************
results_button = Button(process_page, text="Open results table", command=self.open_results)
results_button.place(relx=0.008, rely=0.92, anchor=W)
ext_results_button = Button(process_page, text="Open extended results", command=self.open_ext_results)
ext_results_button.place(relx=0.20, rely=0.92, anchor=W)
genbibtex_button = Button(process_page, text="Generate BibTeX", command=self.generate_bibtex)
genbibtex_button.place(relx=0.45, rely=0.92, anchor=W)
run_button = Button(process_page, text="Run", command=self.scientoPyRun)
run_button.place(relx=0.96, rely=0.92, anchor=E)
def cancel_run(self):
globalVar.cancelProcess = True
print("Canceled")
def progress_bar_fun(self):
def on_closing():
self.cancel_run()
#start progress bar
popup = Toplevel()
popup.protocol("WM_DELETE_WINDOW", on_closing)
x = self.root.winfo_x()
y = self.root.winfo_y()
popup.geometry('300x120+%d+%d' % (x + 250, y + 120))
popup.title("Progress")
label_text = StringVar()
label = Label(popup, textvariable=label_text)
label.place(x=150, y=20, anchor="center")
label_text.set(globalVar.progressText)
progress_var = DoubleVar()
progress_bar = ttk.Progressbar(popup, variable=progress_var, maximum=100, length = 280)
progress_bar.place(x=150, y=50, anchor="center")
popup.pack_slaves()
cancel_button = Button(popup, text="Cancel", command=self.cancel_run)
cancel_button.place(x=150, y=95, anchor="center")
#print("globalVar.progressPer1: %d" % globalVar.progressPer)
while globalVar.progressPer != 101:
label_text.set(globalVar.progressText)
popup.update()
time.sleep(0.1)
#print("globalVar.progressPer2: %d" % globalVar.progressPer)
progress_var.set(globalVar.progressPer)
if globalVar.cancelProcess:
break
popup.destroy()
return 0
def open_results(self):
if os.path.exists(self.scientoPy.resultsFileName):
webbrowser.open(self.scientoPy.resultsFileName)
else:
messagebox.showinfo("Error", "No results found, please run the analysis first")
def open_ext_results(self):
if os.path.exists(self.scientoPy.extResultsFileName):
webbrowser.open(self.scientoPy.extResultsFileName)
else:
messagebox.showinfo("Error", "No extended results found, please run the analysis first")
def open_preprocess_brief(self):
if os.path.exists(self.scientoPy.preprocessBriefFileName):
webbrowser.open(self.scientoPy.preprocessBriefFileName)
else:
messagebox.showinfo("Error", "No preprocess breif found, please run the preprocess first")
def scientoPyRun(self):
globalVar.cancelProcess = False
globalVar.progressPer = 0
if not os.path.exists(self.scientoPy.preprocessDatasetFile):
messagebox.showinfo("Error", "No preprocess input dataset, please run the preprocess first")
return
print(self.chkValuePreviusResults.get())
self.scientoPy.closePlot()
self.scientoPy.criterion = self.comboCriterion.get()
self.scientoPy.graphType = self.comboGraphType.get()
self.scientoPy.startYear = int(self.spinStartYear.get())
self.scientoPy.endYear = int(self.spinEndYear.get())
self.scientoPy.length = int(self.spinTopicsLength.get())
self.scientoPy.skipFirst = int(self.spinSkipFirst.get())
self.scientoPy.windowWidth = int(self.spinWindowWidth.get())
self.scientoPy.previousResults = self.chkValuePreviusResults.get()
self.scientoPy.trend = self.chkValueTrendAnalysis.get()
if bool(self.entryCustomTopics.get("1.0", END).strip()):
self.scientoPy.topics = self.entryCustomTopics.get("1.0", END).replace("\n", ";")
else:
self.scientoPy.topics = ''
t1 = threading.Thread(target=self.scientoPy.scientoPy)
t1.start()
self.progress_bar_fun()
t1.join()
if globalVar.cancelProcess:
return
self.scientoPy.plotResults()
def select_dataset(self):
self.root.dir_name = filedialog.askdirectory()
if not self.root.dir_name:
return
self.datasetLoc.set(self.root.dir_name)
def run_preprocess(self):
print(self.datasetLoc.get())
if self.datasetLoc.get():
try:
self.preprocess.dataInFolder = self.root.dir_name
self.preprocess.noRemDupl = not self.chkValueRemoveDupl.get()
# Run preprocess in another thread
t1 = threading.Thread(target=self.preprocess.preprocess)
t1.start()
# While running preprocess run progress bar
# Progress bar ends when preprocess ends
self.progress_bar_fun()
# Wait until preprocess thread ends
t1.join()
if globalVar.cancelProcess:
messagebox.showinfo("Error", "Preprocessing canceled")
elif(globalVar.totalPapers > 0):
self.preprocess.graphBrief()
elif globalVar.totalPapers == 0:
messagebox.showinfo("Error", "No valid dataset files found in: %s" % self.root.dir_name)
except:
messagebox.showinfo("Error", "No valid dataset folder")
else:
messagebox.showinfo("Error", "No dataset folder defined")
def generate_bibtex(self):
if not os.path.exists(self.scientoPy.preprocessDatasetFile):
messagebox.showinfo("Error", "No preprocess input dataset, please run the preprocess first")
return
latexFileName = filedialog.askopenfilename(initialdir="./", title="Select the LaTeX file",
filetypes=(("Latex", "*.tex"), ("all files", "*.*")))
if not latexFileName:
return
print(latexFileName)
outFileName = generateBibtex(latexFileName)
webbrowser.open(outFileName)
def runGui(self):
self.root.mainloop()
if __name__ == '__main__':
scientoPyGui = ScientoPyGui()
scientoPyGui.runGui()
``` |
{
"source": "jprukner/naucse.python.cz",
"score": 2
} |
#### File: naucse.python.cz/naucse/cli.py
```python
import click
import elsa
from naucse.utils.views import forks_enabled, does_course_return_info
def cli(app, *, base_url=None, freezer=None):
"""Return the elsa CLI extended with naucse-specific commands.
"""
elsa_group = elsa.cli(app, base_url=base_url, freezer=freezer, invoke_cli=False)
@click.group()
def naucse():
pass
@naucse.command()
@click.option("--forks-only", default=False, is_flag=True,
help="Only list courses and runs from forks")
def list_courses(forks_only):
"""List all courses and runs and info about them.
Mainly useful for courses from forks.
Shows where they are sourced from and if they return even the
most basic information and will therefore be included in
list of courses/runs.
A practical benefit is that on Travis CI, the docker images are
pulled/built by this command, so freezing won't timeout after
the 10 minute limit if things are taking particularly long.
"""
from naucse.views import model
def canonical(course, suffix=""):
click.echo(f" {course.slug}: {course.title}{suffix}")
def fork_invalid(course):
click.echo(f" {course.slug}, from {course.repo}@{course.branch}: "
f"Fork doesn't return basic info, will be ignored.")
def fork_valid(course, suffix=""):
click.echo(f" {course.slug}, from {course.repo}@{course.branch}: {course.title}{suffix}")
click.echo(f"Courses:")
for course in model.courses.values():
if forks_only and not course.is_link():
continue
if not course.is_link():
canonical(course)
elif forks_enabled():
if does_course_return_info(course, force_ignore=True):
fork_valid(course)
else:
fork_invalid(course)
click.echo(f"Runs:")
for course in model.runs.values():
if forks_only and not course.is_link():
continue
if not course.is_link():
canonical(course, suffix=f" ({course.start_date} - {course.end_date})")
elif forks_enabled():
if does_course_return_info(course, ["start_date", "end_date"], force_ignore=True):
fork_valid(course, suffix=f" ({course.start_date} - {course.end_date})")
else:
fork_invalid(course)
cli = click.CommandCollection(sources=[naucse, elsa_group])
return cli()
```
#### File: naucse.python.cz/naucse/__init__.py
```python
import logging
import sys
from logging.handlers import RotatingFileHandler
from pathlib import Path
from naucse.freezer import NaucseFreezer
if sys.version_info[0] <3 :
raise RuntimeError('We love Python 3.')
from naucse.cli import cli
from naucse.views import app, lesson_static_generator
def main():
arca_log_path = Path(".arca/arca.log")
arca_log_path.parent.mkdir(exist_ok=True)
arca_log_path.touch()
naucse_log_path = Path(".arca/naucse.log")
naucse_log_path.touch()
def get_handler(path, **kwargs):
handler = RotatingFileHandler(path, **kwargs)
formatter = logging.Formatter("[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s")
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
return handler
logger = logging.getLogger("arca")
logger.addHandler(get_handler(arca_log_path, maxBytes=10000, backupCount=0))
logger = logging.getLogger("naucse")
logger.addHandler(get_handler(naucse_log_path))
freezer = NaucseFreezer(app)
# see the generator for details
freezer.register_generator(lesson_static_generator)
cli(app, base_url='https://naucse.python.cz', freezer=freezer)
```
#### File: naucse/utils/views.py
```python
import datetime
import hashlib
import json
import os
from collections import deque, defaultdict
from pathlib import Path
from arca.exceptions import PullError, BuildError, RequirementsMismatch
from arca.utils import get_hash_for_file
absolute_urls_to_freeze = deque()
def get_recent_runs(course):
"""Build a list of "recent" runs based on a course.
By recent we mean: haven't ended yet, or ended up to ~2 months ago
(Note: even if naucse is hosted dynamically,
it's still beneficial to show recently ended runs.)
"""
recent_runs = []
if not course.start_date:
today = datetime.date.today()
cutoff = today - datetime.timedelta(days=2*30)
this_year = today.year
for year, run_year in reversed(course.root.run_years.items()):
for run in run_year.runs.values():
if not run.is_link() or (forks_enabled() and does_course_return_info(run, ["start_date", "end_date"])):
if run.base_course is course and run.end_date > cutoff:
recent_runs.append(run)
if year < this_year:
# Assume no run lasts for more than a year,
# e.g. if it's Jan 2018, some run that started in 2017 may
# be included, but don't even look through runs from 2016
# or earlier.
break
recent_runs.sort(key=lambda r: r.start_date, reverse=True)
return recent_runs
def list_months(start_date, end_date):
"""Return a span of months as a list of (year, month) tuples
The months of start_date and end_date are both included.
"""
months = []
year = start_date.year
month = start_date.month
while (year, month) <= (end_date.year, end_date.month):
months.append((year, month))
month += 1
if month > 12:
month = 1
year += 1
return months
_naucse_tree_hash = {}
def get_naucse_tree_hash(repo):
"""Return the hash of the folder ``naucse`` in specified ``repo``.
The ``naucse`` tree contains rendering mechanisms.
"""
from naucse.views import app
global _naucse_tree_hash
if _naucse_tree_hash.get(repo.git_dir):
return _naucse_tree_hash[repo.git_dir]
tree_hash = get_hash_for_file(repo, "naucse")
if not app.config['DEBUG']:
_naucse_tree_hash[repo.git_dir] = tree_hash
return tree_hash
_lesson_tree_hash = defaultdict(dict)
def get_lesson_tree_hash(repo, lesson_slug):
"""Return the hash of the tree containing the lesson in specified repo.
"""
from naucse.views import app
global _lesson_tree_hash
if lesson_slug in _lesson_tree_hash[repo.git_dir]:
return _lesson_tree_hash[repo.git_dir][lesson_slug]
# ``repo.git_dir`` is path to the ``.git`` folder
if not (Path(repo.git_dir).parent / "lessons" / lesson_slug).exists():
raise FileNotFoundError
commit = get_hash_for_file(repo, "lessons/" + lesson_slug)
if not app.config['DEBUG']:
_lesson_tree_hash[repo.git_dir][lesson_slug] = commit
return commit
def forks_enabled():
"""Return true if forks are enabled.
By default forks are not enabled (for the purposes of local development).
Forks can be enabled by setting the FORKS_ENABLED environment variable
to ``true`` (or, in tests, by overriding this function).
"""
return os.environ.get("FORKS_ENABLED", "false") == "true"
def forks_raise_if_disabled():
"""Raise ValueError if forks are not enabled.
"""
if not forks_enabled():
raise ValueError(
"You must explicitly allow forks to be rendered.\n"
"Set FORKS_ENABLED=true to enable them.")
def raise_errors_from_forks():
"""Return true if errors from forks should be re-raised.
If this returns false, errors from forks should be handled:
* Not even basic course info is returned -> Left out of the list of courses
* Error rendering a page
* Lesson - if the lesson is canonical, canonical version is rendered with a warning
* Everything else - templates/error_in_fork.html is rendered
Raising can be enabled by setting the RAISE_FORK_ERRORS environment
variable to ``true`` (or, in tests, by overriding this function).
"""
return os.environ.get("RAISE_FORK_ERRORS", "false") == "true"
def does_course_return_info(course, extra_required=(), *, force_ignore=False):
"""Return true if basic info about the course is available.
This tests that the given external course can be pulled and it
returns required info (roughly, enough to be displayed in the
course list).
Exceptions are re-raised if :func:`raise_errors_from_forks` indicates
they should and ``force_ignore`` is not set.
Otherwise, they are only logged.
"""
from naucse.views import logger
required = ["title", "description"] + list(extra_required)
try:
if isinstance(course.info, dict) and all([x in course.info for x in required]):
return True
if raise_errors_from_forks() and not force_ignore:
raise ValueError(f"Couldn't get basic info about the course {course.slug}, "
f"the repo didn't return a dict or the required info is missing.")
else:
logger.error("There was an problem getting basic info out of forked course %s. "
"Suppressing, because this is the production branch.", course.slug)
except (PullError, BuildError, RequirementsMismatch) as e:
if raise_errors_from_forks() and not force_ignore:
raise
if isinstance(e, PullError):
logger.error("There was an problem either pulling or cloning the forked course %s. "
"Suppressing, because this is the production branch.", course.slug)
elif isinstance(e, RequirementsMismatch):
logger.error("There are some extra requirements in the forked course %s. "
"Suppressing, because this is the production branch.", course.slug)
else:
logger.error("There was an problem getting basic info out of forked course %s. "
"Suppressing, because this is the production branch.", course.slug)
logger.exception(e)
return False
def page_content_cache_key(repo, lesson_slug, page, solution, course_vars=None) -> str:
"""Return a key under which content fragments will be stored in cache
The cache key depends on the page and the last commit which modified
lesson rendering in ``repo``
"""
return "commit:{}:content:{}".format(
get_naucse_tree_hash(repo),
hashlib.sha1(json.dumps(
{
"lesson": lesson_slug,
"page": page,
"solution": solution,
"vars": course_vars,
"lesson_tree_hash": get_lesson_tree_hash(repo, lesson_slug),
},
sort_keys=True
).encode("utf-8")).hexdigest()
)
def edit_link(path):
from naucse.views import model
if path == Path("."):
return f"https://github.com/{model.meta.slug}"
return f"https://github.com/{model.meta.slug}/blob/{model.meta.branch}/{str(path)}"
def get_edit_icon():
"""Return name of the icon for the "edit this page" link, or None.
Icon names should come from Bytesize Icons (see
`templates/_bytesize_icons.html`).
"""
return "github"
def get_edit_page_name():
"""Return name of the page where editing is possible.
The returned value needs to be in Czech in the locative ("6th case");
it will be used to replace X in the sentence: `Uprav tuto stránku na X.`
"""
return "GitHubu"
def get_edit_info(edit_path):
return {
"icon": get_edit_icon(),
"page_name": get_edit_page_name(),
"url": edit_link(edit_path)
}
``` |
{
"source": "jprules321/colas",
"score": 3
} |
#### File: extensions/ipc/ut.py
```python
import sys
sys.path.insert(0,'..')
import os
import unittest
from utudx import utUDX, run
#......................................................................
class ut(utUDX):
def setUp(self):
utUDX.setUp(self,['ipc.udxt','ipc/ipc.udxt'])
#--
def test_verb(self):
self.ga('ipc_verb')
self.assertEqual("ON",self.ga.rword(1,4))
self.ga('ipc_verb')
self.assertEqual("OFF",self.ga.rword(1,4))
self.ga('ipc_verb OFF')
self.assertEqual("OFF",self.ga.rword(1,4))
self.ga('ipc_verb ON')
self.assertEqual("ON",self.ga.rword(1,4))
def test_OpenClose(self):
self.ga('ipc_verb ON')
self.ga('ipc_open /tmp/ipc.bin w')
self.assertEqual("/tmp/ipc.bin",self.ga.rword(1,4))
self.ga('ipc_close')
self.ga('ipc_open /tmp/ipc.bin r')
self.assertEqual("/tmp/ipc.bin",self.ga.rword(1,4))
self.ga('ipc_close')
self.ga('ipc_verb OFF')
def test_save(self):
self.ga('ipc_verb ON')
self.ga('ipc_save ts /tmp/ts.bin')
self.assertEqual("20+3358+73+46",self.ga.rword(2,2))
self.assertEqual("/tmp/ts.bin",self.ga.rword(3,5))
self.ga('ipc_open /tmp/ps.bin w')
self.assertEqual("/tmp/ps.bin",self.ga.rword(1,4))
self.ga('ipc_save ps /tmp/ps.bin')
self.assertEqual("20+3358+73+46",self.ga.rword(2,2))
self.ga('ipc_verb OFF')
def test_load(self):
self.ga('ipc_verb ON')
self.ga('ipc_save ts /tmp/ts.bin')
self.ga('define tsload = ipc_load("/tmp/ts.bin")')
self.ga('d ts - tsload')
cmin = abs(float(self.ga.rword(1,2)))
assert cmin<1e-4, 'large load/save discrepancy: cmin=%f'%cmin
cmax = abs(float(self.ga.rword(1,4)))
assert cmax<1e-4, 'large load/save discrepancy: cmax=%f'%cmax
def test_error(self):
try:
self.ga('ipc_open /tmp/t354f_CXCFCGAW__ r')
except:
self.assertEqual("'/tmp/t354f_CXCFCGAW__'",self.ga.rword(1,8))
line1 = self.ga.rline(1)
try:
self.ga('ipc_error')
except:
self.assertEqual("'/tmp/t354f_CXCFCGAW__'",self.ga.rword(1,8))
line2 = self.ga.rline(1)
self.assertEqual(line1,line2)
def test_udf_save(self):
self.ga('ipc_verb ON')
self.ga('d ipc_save(ts,/tmp/ts.bin)')
self.assertEqual("20+3358+73+46",self.ga.rword(2,2))
self.ga('ipc_verb OFF')
#......................................................................
if __name__ == "__main__":
run(ut)
```
#### File: extensions/mf/t.pyhilo.py
```python
import os,sys
import M2
import TCw2 as TC
import const
import grads
#llllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllllll
#
# local
#
def plotPosit(p):
tt=p.split()
chrhl=tt[0]
lathl=tt[1]
lonhl=tt[2]
valhl=tt[4]
grdhl=tt[12]
lplhl=tt[14]
hlfmt='%3.1f'
arg= "%s %s %s %s %s %s %s"%(chrhl,lathl,lonhl,valhl,grdhl,lplhl,hlfmt)
argm= "(%s,%s,%s,%s,%s,%s,%s)"%(chrhl,lathl,lonhl,valhl,grdhl,lplhl,hlfmt)
print 'ppp ',chrhl,lathl,lonhl,valhl,grdhl,lplhl
print 'aaa ',arg
gcmd="p1hilo %s"%(arg)
ga(gcmd)
rcp1=ga.Lines
return(gcmd,rcp1)
def plotBt(lat,lon,mw,btszscl=1.0,btcol=1,btcolty=2):
cmw="%3.0f"%(mw)
arg= "%s %s %s %s %s %s"%(lat,lon,cmw,btszscl,btcol,btcolty)
gcmd="drawtcbt %s"%(arg)
ga(gcmd)
rcp1=ga.Lines
return(gcmd,rcp1)
def plotFt(p,fttype=1,ftszscl=1.0,ftbcol=3,ftfcol=2):
tt=p.split()
chrhl=tt[0]
lathl=tt[1]
lonhl=tt[2]
valhl=float(tt[4])
grdhl=float(tt[12])
lplhl=float(tt[14])
cval="%3.0f"%(valhl)
arg= "%s %s %s %s %s %s"%(lathl,lonhl,fttype,ftszscl,ftbcol,ftfcol)
gcmd="drawtcft %s"%(arg)
ga(gcmd)
rcp1=ga.Lines
return(gcmd,rcp1)
def plotXpt(stmid,dtg,model,var,type,ptype,btlat,btlon,btmw,radinf,bearing,
doplotvar=0,doclear=0,doqpos=0,docirc=1,dopng=1,tag=None):
if(ptype == 'w'):
ftbcol=4
ftfcol=2
elif(ptype == 'm'):
ftbcol=3
ftfcol=1
else:
ftbcol=3
ftfcol=5
if(doclear): ga('c')
if(doplotvar): ga("d %s"%(var))
(rc1,gacmd)=plotBt(btlat,btlon,btmw)
cmd="mfhilo %s gr %s d 80 %f %f"%(var,type,btlat,btlon)
ga(cmd)
rc=ga.Lines
p1=rc[2]
print 'qqqqqqqqqqqqqq ',p1
# (rcp1,gacmd)=plotPosit(p1)
(rcp1,gacmd)=plotFt(p1,ftbcol=ftbcol,ftfcol=ftfcol)
if(doqpos): ga('q pos')
if(doclear and docirc):
t1="stmid: %s dtg: %s vmax: %3.0f model: %s var: %s"%(stmid,dtg,btmw,model,var)
t2=plotC1tcprop(var,p1,radinf,bearing,docirc)
ga('set strsiz 0.06')
cmd="draw title %s\%s"%(t1,t2)
ga(cmd)
if(dopng):
if(tag != None):
pngfile="/w3/rapb/wxmap2/tmp/mf.%s.%s.%s.%s.png"%(tag,stmid,dtg,var)
else:
pngfile="/w3/rapb/wxmap2/tmp/mf.%s.%s.%s.png"%(stmid,dtg,var)
cmd="gxyat -o %s -r -x 1024 -y 768"%(pngfile)
ga(cmd)
return(p1)
def plotC1tcprop(var,p,radinf,bearing,docirc=1):
tt=p.split()
chr=tt[0]
lat=tt[1]
lon=tt[2]
val=float(tt[4])
grd=float(tt[12])
lpl=float(tt[14])
cval="%3.0f"%(val)
arg="%s %s %s %s"%(lat,lon,radinf,bearing)
cmd="tcprop %s %s"%(var,arg)
ga(cmd)
cards=ga.Lines
for card in cards:
print 'cccc ',card
meancard=cards[3]
tt=meancard.split()
mean=float(tt[1])
meanh1=float(tt[3])
meanh2=float(tt[5])
t2="m: %3.1f mRH: %3.1f mLH: %3.1f R: %3.0f [nm] B: %3.0f deg"%(mean,meanh1,meanh2,radinf,bearing)
print 'tttt222 ',t2
if(docirc):
cmd="c1hilo %s %s %s %s"%(lat,lon,radinf,bearing)
ga(cmd)
return(t2)
dowindow=0
dtg='2009050312'
dtg='2009050412'
model='gfs2'
model='fim8'
tt=TC.GetStmidsByDtg(dtg)
stmids=tt[0]
stmid=stmids[0]
bts=TC.GetBtLatLonVmax(stmid)
bt=bts[dtg]
btlat=bt[0]
btlon=bt[1]
btmw=bt[2]
btdir=bt[4]
btspd=bt[5]
print 'qqqqqqqqqqq ',btdir,btspd
radinf=200.0
bearing=btdir
dlat=4
dlon=dlat*(4.0/3.0)
blat=btlat-dlat
elat=blat+2*dlat
blon=btlon-dlon
elon=blon+2*dlon
ga=grads.GaNum(Bin='../../bin/grads',Window=dowindow)
ga('load udxt libmf.udxt')
ga('load udxt ../re/re.udxt')
ga('load udxt ../fish/fish.udxt')
ga('load udxt ../gxyat/gxyat.udxt')
ga('set mpdset hires')
ga('set map 15 0 8')
g1=M2.Model2(model).DataPath(dtg)
dpgfs=g1.dpaths[0]
print 'qqqq ',dpgfs
ga.open(dpgfs)
ga('set lev 850')
ga('vt=hcurl(ua,va)*1e5')
print 'btlat ',btlat,btlon,btdir
#ga('vt=smth2d(vt,2)')
ga('set csmooth on')
if(btlat < 0): ga('vt=-vt')
ga('psi=fish(vt*1e-5))*1e-6')
ga('psl=((psl*0.01)-1000.0)')
dorest=0
if(dorest):
ga('tup=zg(lev=200)-zg(lev=500)')
ga('tup=smth2d(tup,10,0.5)')
ga('z8=zg(lev=850)')
ga('z7=zg(lev=700)')
ga('was=mag(uas,vas)*%f'%(const.ms2knots))
ga('set lat %f %f'%(blat,elat))
ga('set lon %f %f'%(blon,elon))
ga('tcprop psl %f %f 200 0'%(btlat,btlon))
sys.exit()
var='psl'
p1=plotXpt(stmid,dtg,model,var,'l','m',btlat,btlon,btmw,radinf,bearing,doclear=1,doplotvar=1,docirc=1,dopng=0)
var='vt'
p1=plotXpt(stmid,dtg,model,var,'h','w',btlat,btlon,btmw,radinf,bearing,doclear=0,doplotvar=0,docirc=0,dopng=0)
var='psi'
p1=plotXpt(stmid,dtg,model,var,'l','w',btlat,btlon,btmw,radinf,bearing,doclear=0,doplotvar=0,docirc=0,dopng=0)
var='z8'
p1=plotXpt(stmid,dtg,model,var,'l','m',btlat,btlon,btmw,radinf,bearing,doclear=0,doplotvar=0,docirc=0,dopng=0)
var='z7'
p1=plotXpt(stmid,dtg,model,var,'l','m',btlat,btlon,btmw,radinf,bearing,doclear=0,doplotvar=0,docirc=0,dopng=0)
var='was'
p1=plotXpt(stmid,dtg,model,var,'l','w',btlat,btlon,btmw,radinf,bearing,doclear=0,doplotvar=0,docirc=0,dopng=1,tag='circs')
var='psl'
p1=plotXpt(stmid,dtg,model,var,'l','m',btlat,btlon,btmw,radinf,bearing,doclear=1,doplotvar=1,docirc=1)
var='vt'
p1=plotXpt(stmid,dtg,model,var,'h','w',btlat,btlon,btmw,radinf,bearing,doclear=1,doplotvar=1,docirc=1)
var='psi'
p1=plotXpt(stmid,dtg,model,var,'l','w',btlat,btlon,btmw,radinf,bearing,doclear=1,doplotvar=1,docirc=1)
var='z8'
p1=plotXpt(stmid,dtg,model,var,'l','m',btlat,btlon,btmw,radinf,bearing,doclear=1,doplotvar=1,docirc=1)
var='z7'
p1=plotXpt(stmid,dtg,model,var,'l','m',btlat,btlon,btmw,radinf,bearing,doclear=1,doplotvar=1,docirc=1)
var='was'
p1=plotXpt(stmid,dtg,model,var,'l','w',btlat,btlon,btmw,radinf,bearing,doclear=1,doplotvar=1,docirc=1)
var='tup'
p1=plotXpt(stmid,dtg,model,var,'h','m',btlat,btlon,btmw,radinf,bearing,doclear=1,doplotvar=1,docirc=1)
sys.exit()
sys.exit()
rc=plotXpt('psi','l','w',btlat,btlon,btmw,doclear=0)
rc=plotXpt('psl','l','m',btlat,btlon,btmw,doclear=0)
rc=plotXpt('was','l','w',btlat,btlon,btmw)
rc=plotXpt('tup','h','m',btlat,btlon,btmw)
rc=plotXpt('z8','l','m',btlat,btlon,btmw)
rc=plotXpt('z7','l','m',btlat,btlon,btmw)
#ga('mfhilo vt gr h d 80 %f %f'%(btlat,btlon))
#rcvt=ga.Lines
#ga('mfhilo psl gr l d 80 %f %f'%(btlat,btlon))
#rcpsl=ga.Lines
#print rcvty
```
#### File: extensions/mf/ut.py
```python
import sys
sys.path.insert(0,'..')
import os
import unittest
from utudx import utUDX, run
#......................................................................
class ut(utUDX):
verb=0
def setUp(self):
utUDX.setUp(self,['libmf.udxt','mf/libmf.udxt'])
def test_mfhilo_gr(self,verb=verb):
self.ga("""set lat 20 90
set lon 60 240
set lev 200
zga=zg-ave(zg,lon=0,lon=360)
mfhilo zga gr b d 100 33.67 222.50""")
if(verb): self._PrintResult()
self.assertEqual(2,int(self.ga.rword(1,5)))
self.assertEqual(33,int(float(self.ga.rword(2,2))))
self.assertEqual(42,int(float(self.ga.rword(3,2))))
def test_mfhilo_cl(self,verb=verb):
self.ga("""set lat 20 90
set lon 60 240
set lev 200
zga=zg-ave(zg,lon=0,lon=360)
mfhilo zga cl b 300 10000 33.67 222.50""")
if(verb): self._PrintResult()
self.assertEqual(9,int(self.ga.rword(1,5)))
self.assertEqual(33,int(float(self.ga.rword(2,2))))
def test_tcprop(self,verb=verb):
self.ga("""set lat 20 90
set lon 60 240
set lev 200
zga=zg-ave(zg,lon=0,lon=360)
tcprop zga 33.67 222.50 300""")
if(verb): self._PrintResult()
radmean=float(self.ga.rword(6,2))
self.assertEqual(227,int(radmean))
def test_re2(self,verb=verb):
self.ga("set lev 200")
self.ga("d re2(ua,1.0)")
if(verb): self._PrintResult()
self.assertEqual(360,int(self.ga.rword(3,6)))
def test_smth2d(self,verb=verb):
self.ga("set lev 200")
self._CheckCint('smth2d(ua,10,0.5)',-5,50,5)
def test_esmrf(self):
self._CheckCint('esmrf(ta)',5,40,5)
def test_linreg(self,verb=verb):
self.ga("""set lat 0
set lev 200
d linreg(zg*1e-5)""")
if(verb): self._PrintResult()
self.assertEqual("0.1244",self.ga.rword(1,4))
# not tested yet...........................
#def test_grhist(self):
# sys.stdout.write("skipped ... ")
# pass
#def test_uv2trw(self):
# sys.stdout.write("skipped ... ")
# pass
#def test_mfhilo_tm(self,verb=0):
# sys.stdout.write("skipped ... ")
# pass
#--
# Useful Internal Methods for Writing Tests
def _PrintResult(self):
for i in range(0,self.ga.nLines+1):
card=self.ga.rline(i)
print 'card ',i,card
def _CheckCint(self,name,cmin,cmax,cint):
"""
Check contour intervals during display.
"""
self.ga('clear')
self.ga('display %s'%name)
self.assertEqual(cmin,int(self.ga.rword(1,2)))
self.assertEqual(cmax,int(self.ga.rword(1,4)))
self.assertEqual(cint,int(self.ga.rword(1,6)))
def _CompareFiles(self,fh1,fh2):
vars1 = fh1.vars[:]
vars2 = fh2.vars[:]
self.assertEqual(vars1.sort(),vars2.sort())
self.assertEqual(fh1.nt,fh2.nt)
for i in range(len(fh1.vars)):
var = fh1.vars[i]
nz = fh1.var_levs[i]
if nz==0: nz=1
if var=='hus': nz=5
nt = fh1.nt
for t in range(1,nt+1):
for z in range(1,nz+1):
self.ga('clear')
self.ga('display %s.%d(z=%d,t=%d) - %s.%d(z=%d,t=%d)'\
%(var,fh1.fid,z,t,var,fh2.fid,z,t))
# print ">>> t=%d, z=%d, %s --- %s "%(t,z,var,self.ga.rline(1))
self.assertEqual(self.ga.rline(1), \
'Constant field. Value = 0')
#......................................................................
if __name__ == "__main__":
run(ut)
```
#### File: wgrib2-v0.1.9.4/extensions/ut.all.py
```python
import os
import sys
import unittest
#......................................................................
sys.path.insert(0,'../pytests/lib')
from grads import GrADS
import ams
import bjt
import fish
import gsf
import gxyat
import hello
import ipc
import lats
import mf
import orb
import shape
import shfilt
# Special case to avoid conflict with bult in "re" (regular expression)
sys.path.insert(0,'re')
import utre as re
#......................................................................
def run_all_tests(verb=2,BinDir=None,DataDir=None):
"""
Runs all tests based on the standard *model* testing file.
"""
print ""
print " Testing OpenGrADS Extensions"
print " ----------------------------"
print ""
# Assemble and run the test suite
# -------------------------------
load = unittest.TestLoader().loadTestsFromTestCase
TestSuite = [ load(ams.ut),
load(bjt.ut),
load(fish.ut),
load(gsf.ut),
load(gxyat.ut),
load(hello.ut),
load(ipc.ut),
load(lats.ut),
load(mf.ut),
load(orb.ut),
load(re.ut),
load(shape.ut),
load(shfilt.ut),
]
all = unittest.TestSuite(TestSuite)
Results = unittest.TextTestRunner(verbosity=verb).run(all)
# Return number of errors+failures: skipped binaries do not count
# ---------------------------------------------------------------
if not Results.wasSuccessful():
raise IOError, 'GrADS tests failed'
else:
print "Passed ALL unit tests"
#----------------------------------------------------------------
if __name__ == "__main__":
run_all_tests()
``` |
{
"source": "jprusik/dreamhost-flask-project-template",
"score": 3
} |
#### File: dreamhost-flask-project-template/app/app.py
```python
from flask import Flask, url_for, redirect, request, Markup, render_template, session, flash
import json, datetime
import config
app = Flask(__name__)
app.config.from_object('config')
# DISABLE DEBUG FOR PRODUCTION!
app.debug = False
def clear_session():
session['last_action'] = None
# using session.clear() nulls everything, including the session itself, so you have to check for session AND session['key'] or pop(None) individual session keys
# session.clear()
# Check credentials, modify session, etc.
@app.before_request
def before_request():
if 'session_start' not in session:
session['session_start'] = datetime.datetime.now()
session['last_action'] = datetime.datetime.now()
@app.route('/index')
@app.route('/')
def index():
return render_template('home.html')
@app.route('/search', methods=['GET'])
def search():
searchword = request.args.get('query', '')
if searchword == '':
flash('No query value was provided.')
return render_template('search.html', query_return=searchword)
@app.route('/logout')
def logout():
clear_session()
return redirect(url_for('index'))
@app.errorhandler(404)
def not_found(error):
return render_template('error.html', error_info='404: Page not found')
@app.errorhandler(413)
def not_found(error):
return render_template('error.html', error_info='413: Upload size exceeded')
@app.errorhandler(500)
def internal_server_error(error):
# This may pass system errors you do not wish users to see
return render_template('error.html', error_info=error.args)
# What version of python is active?
# import sys
# @app.route('/pyversion')
# def pyversion():
# return sys.version
if __name__ == '__main__':
app.run()
``` |
{
"source": "JPry/digital_picture_frame",
"score": 3
} |
#### File: JPry/digital_picture_frame/shutdown.py
```python
import RPi.GPIO as io
import time
import os
SHUTDOWN_PIN = 23
HOLD_SECONDS = 3
def shutdown():
os.system("sudo shutdown -h now")
def main():
io.setmode(io.BCM)
io.setup(SHUTDOWN_PIN, io.IN, pull_up_down=io.PUD_UP)
# io.add_event_detect(SHUTDOWN_PIN, io.FALLING, callback=shutdown, bouncetime = 200)
# Wait for the button to be pressed
while True:
button_state = io.input(SHUTDOWN_PIN)
if button_state == False:
# Require the button to be pressed for HOLD_SECONDS
time.sleep(HOLD_SECONDS)
new_state = io.input(SHUTDOWN_PIN)
if new_state == False:
shutdown()
time.sleep(0.5)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
io.cleanup()
``` |
{
"source": "jpsachse/megajules3000",
"score": 3
} |
#### File: megajules3000/server/main.py
```python
import json, random, os
from flask import Flask, url_for
from flask.ext.cors import CORS
from map_manager import MapManager
app = Flask(__name__)
CORS(app)
print "Cleaning up temporary files..."
for filename in os.listdir("static"):
if filename.endswith(".png"):
os.remove("static/" + filename)
map_manager = MapManager(map_directory="maps/", initial_map="Alabastia_Lab")
@app.route('/current_map')
def get_map():
map = map_manager.current_map
response = dict()
if map.startX >= 0:
response['startX'] = map.startX
if map.startY >= 0:
response['startY'] = map.startY
cache_buster = map.name + str(random.randint(0, 1000000000))
response["name"] = cache_buster
map.as_image(map_manager.directory).save("static/" + cache_buster + ".png")
response["objects"] = url_for('static', filename=cache_buster + '.png')
response["map"] = map.as_collision_map()
return json.dumps(response)
@app.route('/action/<action_id>')
def show_user_profile(action_id):
action = map_manager.current_map.actions[int(action_id)]
if action.type == "changeMap":
map_manager.change_map_by_name(action.content)
elif action.type == "showFact" and action.content == "":
try:
action.content = map_manager.takeFactFromCurrentLevel()
except IndexError:
action.content = "Nothing interesting (Pool of facts is empty.)"
elif action.type == "startMinigame":
if action.content["name"] == "guessMe":
facts = {}
for entity in action.content["entities"]:
facts[entity] = []
temp_facts = map_manager.knowledge_fetcher.get_filtered_facts_for(entity)
for i in range(10):
if len(temp_facts) > 0:
pos = random.randint(0, len(temp_facts) - 1)
facts[entity].append(temp_facts.pop(pos))
action.content["solutions"] = []
action.content["facts"] = []
for entity, entity_facts in facts.iteritems():
name = map_manager.knowledge_fetcher.get_label_for(entity)
action.content["solutions"].append(name)
action.content["facts"].append(entity_facts)
return json.dumps(action.__dict__)
@app.route('/minigame/<action_id>/<result>')
def evaluate_minigame(action_id, result):
action = map_manager.current_map.actions[int(action_id)]
next_action = map_manager.current_map.actions[action.next_action]
print result
if int(result) > 0: #TODO: real evaluation
if next_action.type == "changeMap":
map_manager.change_map_by_name(next_action.content)
return json.dumps(next_action.__dict__)
else:
raise Exception("There shall be a changeMap after a minigame!")
else:
return json.dumps(next_action.__dict__)
if __name__ == '__main__':
app.run(port=4242, debug=True)
```
#### File: megajules3000/server/map_manager.py
```python
import os.path
import random
from map_generator import MapGenerator
from knowledge.fetcher import KnowledgeFetcher
class MapManager():
def __init__(self, initial_map, map_directory="maps/"):
self.directory = map_directory
self.current_map = None
self.maps = self.retrieve_maps()
self.knowledge_fetcher = KnowledgeFetcher()
self.knowledgePool = {}
if type(initial_map) == int:
self.change_map_by_index(initial_map)
else:
self.change_map_by_name(initial_map)
def retrieve_maps(self):
if self.directory == None or not os.path.exists(self.directory):
raise Exception("Invalid maps directory")
result = list()
generator = MapGenerator()
for map_file in os.listdir(self.directory):
if map_file.endswith(".json"):
map_path = self.directory + map_file
map = generator.generate_map(map_path)
map.name = map_file.replace(".json", "")
result.append(map)
return result
def get_map_by_index(self, index):
return self.maps[index]
def get_map_by_name(self, map_name):
for map in self.maps:
if map.name == map_name:
return map
def takeFactFromCurrentLevel(self):
if not self.current_map.entity in self.knowledgePool:
self.knowledgePool[self.current_map.entity] = self.knowledge_fetcher.get_filtered_facts_for(self.current_map.entity)
index = random.randint(0, len(self.knowledgePool[self.current_map.entity]))
return self.knowledgePool[self.current_map.entity].pop(index)
def change_map_by_index(self, index):
self.current_map = self.maps[index]
def change_map_by_name(self, name):
self.current_map = self.get_map_by_name(name)
```
#### File: server/model/action.py
```python
class Action():
def __init__(self, id=-1, type="", next_action=None, content=""):
self.type = type
self.next_action = next_action
self.content = content
self.id = id
``` |
{
"source": "jpsaenzmo/jupyter-lab-kernelino",
"score": 3
} |
#### File: jupyter-lab-kernelino/arduino_kernel/board.py
```python
class BoardError(Exception):
def __init__(self, msg):
# pylint: disable=useless-super-delegation
super().__init__(msg)
class Board:
def __init__(self):
self.connected = False
self.serial = None
def connect(self):
"""(re)connect to board and enter raw repl
"""
if self.connected:
return
# pylint : disable=too-many-function-args
device = self._find_board()
def _find_board(self):
"""Return the FBQN of the connected Arduino boards"""
```
#### File: jupyter-lab-kernelino/arduino_kernel/kernel.py
```python
from ipykernel.kernelbase import Kernel
import json
import os
import subprocess
import sys
import urllib
from urllib.request import urlopen
from requests.compat import urljoin
from notebook.notebookapp import list_running_servers
from .board import Board, BoardError
SKETCH_FOLDER = ".arduino/sketch"
class ArduinoKernel(Kernel):
implementation = "Arduino"
implementation_version = "1.0"
language = "no-op"
language_version = "0.1"
language_info = {
"name": "Any text",
"mimetype": "text/plain",
"file_extension": ".ino",
}
banner = "Arduino kernel"
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
self._start_bash()
def _start_bash(self):
from pexpect import replwrap
import signal
sig = signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
os.makedirs(SKETCH_FOLDER)
except FileExistsError:
pass
def do_execute(
self, code, silent, store_history=True, user_expressions=None, allow_stdin=False
):
from pexpect import EOF
# Empty cell
if not code.strip():
return {
"status": "OK",
"execution_count": self.execution_count,
"payload": [],
"user_expressions": {},
}
# Non-empty cell
interrupted = False
try:
try:
os.makedirs(SKETCH_FOLDER)
except FileExistsError:
pass
if code == "arduino-cli board list":
try:
sp = subprocess.check_output(
"arduino-cli board list", stderr=subprocess.STDOUT, shell=False
)
except subprocess.CalledProcessError as e:
raise RuntimeError(
"command '{}' return with error (code {}): {}".format(
e.cmd, e.returncode, e.output
)
)
output = sp.decode(sys.stdout.encoding)
elif code.startswith("arduino-cli lib install"):
try:
sp = subprocess.check_output(
code,
stderr=subprocess.STDOUT,
shell=True,
)
except subprocess.CalledProcessError as e:
errorTxt = "Command '{}' return with error (code {}): {}".format(
e.cmd, e.returncode, e.output
)
stream_content = {"name": "stdout", "text": errorTxt}
self.send_response(self.iopub_socket, "stream", stream_content)
return {"status": "abort", "execution_count": self.execution_count}
output = sp.decode(sys.stdout.encoding)
else:
oper = code.split("\n")[0]
command = ""
codes = ""
if oper.split("%")[0] == "port":
port = oper.split("%")[1]
fqbn = code.split("\n")[1]
fqbn = fqbn.split("%")[1]
codes = code.split("\n", 2)[2]
command = (
"arduino-cli upload -p "
+ port
+ " --fqbn "
+ fqbn
+ " "
+ SKETCH_FOLDER
)
elif oper.split("%")[0] == "board":
fqbn = code.split("\n")[0]
fqbn = fqbn.split("%")[1]
codes = code.split("\n", 1)[1]
command = "arduino-cli compile -b " + fqbn + " " + SKETCH_FOLDER
f = open(SKETCH_FOLDER + "/sketch.ino", "w+")
f.write(codes.rstrip())
f.close()
try:
sp = subprocess.check_output(
command,
stderr=subprocess.STDOUT,
shell=True,
)
except subprocess.CalledProcessError as e:
errorTxt = "Command '{}' return with error (code {}): {}".format(
e.cmd, e.returncode, e.output
)
stream_content = {"name": "stdout", "text": errorTxt}
self.send_response(self.iopub_socket, "stream", stream_content)
return {"status": "abort", "execution_count": self.execution_count}
output = sp.decode(sys.stdout.encoding)
except KeyboardInterrupt:
interrupted = True
clean_sketches()
# Restarting Bash
except EOF:
output = self.bash_wrapper.child.before + "Restarting Bash"
# If expecting output
if not silent:
stream_content = {"name": "stdout", "text": output}
self.send_response(self.iopub_socket, "stream", stream_content)
# If interrupted
if interrupted:
clean_sketches()
return {"status": "abort", "execution_count": self.execution_count}
# If everything is OK
else:
return {
"status": "ok",
"execution_count": self.execution_count,
"payload": [],
"user_expressions": {},
}
def clean_sketches():
if os.path.isfile("./" + SKETCH_FOLDER + "/sketch.ino"):
filelist = os.listdir("./" + SKETCH_FOLDER)
for f in filelist:
os.remove(os.path.join(mydir, f))
``` |
{
"source": "jpsahoo14/graphql-engine",
"score": 3
} |
#### File: server/tests-py/test_tests.py
```python
import pytest
from super_classes import DefaultTestSelectQueries
from validate import check_query_f, collapse_order_not_selset
from ruamel.yaml.comments import CommentedMap
class TestTests1(DefaultTestSelectQueries):
"""
Test various things about our test framework code. Validate that tests work
as we expect.
"""
# NOTE: We don't care about this for now, but should adapt this to test
# that xfail detection in code that handles `--accept` works correctly.
@pytest.mark.xfail(reason="expected")
def test_tests_xfail(self, request):
try:
marker = request.node.get_closest_marker("xfail")
print(marker)
if marker.name != 'xfail':
print("FAIL!")
return True # Force a test failure when xfail strict
except:
print("FAIL!")
return True # Force a test failure when xfail strict
assert 0, "Expected failure is expected"
# Adapted arbitrarily from
# `TestGraphQLQueryBasic.test_select_query_author_pk()` using original yaml
# test case file that we later fixed.
@pytest.mark.xfail(reason="expected, validating test code")
def test_tests_detect_bad_ordering(self, hge_ctx):
"""We can detect bad ordering of selection set"""
check_query_f(hge_ctx, 'test_tests/select_query_author_by_pkey_bad_ordering.yaml', 'http')
#
# E AssertionError:
# E expected:
# E data:
# E author_by_pk:
# E name: Author 1
# E id: 1
# E diff: (results differ only in their order of keys)
# E response:
# E data:
# E author_by_pk:
# E id: 1
# E name: Author 1
# Re-use setup and teardown from where we adapted this test case:
@classmethod
def dir(cls):
return 'queries/graphql_query/basic'
class TestTests2(DefaultTestSelectQueries):
"""
Test various things about our test framework code. Validate that tests work
as we expect.
"""
# Test another bad ordering scenario, while we're here:
@pytest.mark.xfail(reason="expected, validating test code")
def test_tests_detect_bad_ordering(self, hge_ctx):
"""We can detect bad ordering of selection set"""
check_query_f(hge_ctx, 'test_tests/user_can_query_jsonb_values_filter_bad_order.yaml', 'http')
#
# E AssertionError:
# E expected:
# E data:
# E jsonb_table:
# E - jsonb_col:
# E name: Hasura
# E age: 7
# E id: 1
# E response:
# E data:
# E jsonb_table:
# E - id: 1
# E jsonb_col:
# E age: 7
# E name: Hasura
# E diff: (results differ only in their order of keys)
# Unit test for good measure, to validate above and check our assumptions
# wrt comparisons of trees of ordered and unordered dicts and arrays:
def test_tests_dict_ordering_assumptions_and_helpers(self):
# fragment of yaml test file:
example_query = {"query": """
query {
thing1
jsonb_table{
id
jsonb_col
}
thing2
}
""" }
# We want to collapse any ordering we don't care about here
# (CommentedMap is ruamel.yaml's OrderedMap that also preserves
# format):
fully_ordered_result = \
CommentedMap([('data',
CommentedMap([
('thing1', "thing1"),
('jsonb_table', [
CommentedMap([
('id', 1),
('jsonb_col', CommentedMap([('age', 7), ('name', 'Hasura')]))]),
CommentedMap([
('id', 2),
('jsonb_col', CommentedMap([('age', 8), ('name', 'Rawkz')]))]),
]),
('thing2', CommentedMap([("a",1), ("b",2), ("c",3)])),
]))])
relevant_ordered_result = collapse_order_not_selset(fully_ordered_result, example_query)
# We expect to have discarded ordering of leaves not in selset:
relevant_ordered_result_expected = \
dict([('data',
CommentedMap([
('thing1', "thing1"),
('jsonb_table', [
CommentedMap([
('id', 1),
('jsonb_col', dict([('age', 7), ('name', 'Hasura')]))]),
CommentedMap([
('id', 2),
('jsonb_col', dict([('age', 8), ('name', 'Rawkz')]))]),
]),
('thing2', dict([("a",1), ("b",2), ("c",3)])),
]))])
# NOTE: use str() to actually do a stong equality comparison, comparing
# types. Only works because str() on dict seems to have a canonical
# ordering.
assert str(relevant_ordered_result) == str(relevant_ordered_result_expected)
# Demonstrate equality on different mixes of trees of ordered and unordered dicts:
assert CommentedMap([("a", "a"), ("b", "b")]) == dict([("b", "b"), ("a", "a")])
assert CommentedMap([("a", "a"), ("b", "b")]) != CommentedMap([("b", "b"), ("a", "a")])
assert dict([ ("x", CommentedMap([("a", "a"), ("b", CommentedMap([("b1", "b1"), ("b2", "b2")]))])), ("y","y"),]) == \
CommentedMap([("y","y"), ("x", dict([("a", "a"), ("b", CommentedMap([("b1", "b1"), ("b2", "b2")]))])), ])
def test_tests_ordering_differences_correctly_ignored(self, hge_ctx):
"""
We don't care about ordering of stuff outside the selection set e.g. JSON fields.
"""
check_query_f(hge_ctx, 'test_tests/user_can_query_jsonb_values_filter_okay_orders.yaml', 'http')
# Re-use setup and teardown from where we adapted this test case:
@classmethod
def dir(cls):
return 'queries/graphql_query/permissions'
``` |
{
"source": "jpsalado92/airflow",
"score": 2
} |
#### File: airflow/decorators/__init__.py
```python
from typing import TYPE_CHECKING
from airflow.decorators.python import PythonDecoratorMixin, python_task # noqa
from airflow.decorators.python_virtualenv import PythonVirtualenvDecoratorMixin
from airflow.decorators.task_group import task_group # noqa
from airflow.models.dag import dag # noqa
from airflow.providers_manager import ProvidersManager
class _TaskDecorator(PythonDecoratorMixin, PythonVirtualenvDecoratorMixin):
def __getattr__(self, name):
if name.startswith("__"):
raise AttributeError(f'{type(self).__name__} has no attribute {name!r}')
decorators = ProvidersManager().taskflow_decorators
if name not in decorators:
raise AttributeError(f"task decorator {name!r} not found")
return decorators[name]
# [START mixin_for_autocomplete]
if TYPE_CHECKING:
try:
from airflow.providers.docker.decorators.docker import DockerDecoratorMixin
class _DockerTask(_TaskDecorator, DockerDecoratorMixin):
pass
_TaskDecorator = _DockerTask # type: ignore[misc]
except ImportError:
pass
# [END mixin_for_autocomplete]
task = _TaskDecorator()
```
#### File: airflow_breeze/ci/build_image.py
```python
from pathlib import Path
from typing import List
from airflow_breeze.breeze import get_airflow_sources_root
from airflow_breeze.cache import check_cache_and_write_if_not_cached
from airflow_breeze.ci.build_params import BuildParams
from airflow_breeze.console import console
from airflow_breeze.utils import filter_out_none, run_command
PARAMS_CI_IMAGE = [
"python_base_image",
"airflow_version",
"airflow_branch",
"airflow_extras",
"airflow_pre_cached_pip_packages",
"additional_airflow_extras",
"additional_python_deps",
"additional_dev_apt_command",
"additional_dev_apt_deps",
"additional_dev_apt_env",
"additional_runtime_apt_command",
"additional_runtime_apt_deps",
"additional_runtime_apt_env",
"upgrade_to_newer_dependencies",
"constraints_github_repository",
"airflow_constraints_reference",
"airflow_constraints",
"airflow_image_repository",
"airflow_image_date_created",
"build_id",
"commit_sha",
]
PARAMS_TO_VERIFY_CI_IMAGE = [
"dev_apt_command",
"dev_apt_deps",
"runtime_apt_command",
"runtime_apt_deps",
]
def construct_arguments_docker_command(ci_image: BuildParams) -> List[str]:
args_command = []
for param in PARAMS_CI_IMAGE:
args_command.append("--build-arg")
args_command.append(param.upper() + "=" + str(getattr(ci_image, param)))
for verify_param in PARAMS_TO_VERIFY_CI_IMAGE:
param_value = str(getattr(ci_image, verify_param))
if len(param_value) > 0:
args_command.append("--build-arg")
args_command.append(verify_param.upper() + "=" + param_value)
docker_cache = ci_image.docker_cache_ci_directive
if len(docker_cache) > 0:
args_command.extend(ci_image.docker_cache_ci_directive)
return args_command
def construct_docker_command(ci_image: BuildParams) -> List[str]:
arguments = construct_arguments_docker_command(ci_image)
final_command = []
final_command.extend(["docker", "build"])
final_command.extend(arguments)
final_command.extend(["-t", ci_image.airflow_ci_image_name, "--target", "main", "."])
final_command.extend(["-f", str(Path(get_airflow_sources_root(), 'Dockerfile.ci').resolve())])
return final_command
def build_image(verbose, **kwargs):
ci_image_params = BuildParams(filter_out_none(**kwargs))
is_cached, value = check_cache_and_write_if_not_cached(
"PYTHON_MAJOR_MINOR_VERSION", ci_image_params.python_version
)
if is_cached:
ci_image_params.python_version = value
cmd = construct_docker_command(ci_image_params)
output = run_command(cmd, verbose=verbose, text=True)
console.print(f"[blue]{output}")
```
#### File: aws/utils/eks_test_utils.py
```python
import datetime
import re
from copy import deepcopy
from typing import Dict, List, Optional, Pattern, Tuple, Type, Union
from airflow.providers.amazon.aws.hooks.eks import EksHook
from ..utils.eks_test_constants import (
STATUS,
ClusterAttributes,
ClusterInputs,
FargateProfileAttributes,
FargateProfileInputs,
NodegroupAttributes,
NodegroupInputs,
ResponseAttributes,
)
InputTypes = Union[Type[ClusterInputs], Type[NodegroupInputs], Type[FargateProfileInputs]]
def attributes_to_test(
inputs: InputTypes,
cluster_name: str,
fargate_profile_name: Optional[str] = None,
nodegroup_name: Optional[str] = None,
) -> List[Tuple]:
"""
Assembles the list of tuples which will be used to validate test results.
The format of the tuple is (attribute name, expected value)
:param inputs: A class containing lists of tuples to use for verifying the output
of cluster or nodegroup creation tests.
:type inputs: InputTypes
:param cluster_name: The name of the cluster under test.
:type cluster_name: str
:param fargate_profile_name: The name of the Fargate profile under test if applicable.
:type fargate_profile_name: str
:param nodegroup_name: The name of the nodegroup under test if applicable.
:type nodegroup_name: str
:return: Returns a list of tuples containing the keys and values to be validated in testing.
:rtype: List[Tuple]
"""
result: List[Tuple] = deepcopy(inputs.REQUIRED + inputs.OPTIONAL + [STATUS]) # type: ignore
if inputs == ClusterInputs:
result += [(ClusterAttributes.NAME, cluster_name)]
elif inputs == FargateProfileInputs:
result += [(FargateProfileAttributes.FARGATE_PROFILE_NAME, fargate_profile_name)]
elif inputs == NodegroupInputs:
# The below tag is mandatory and must have a value of either 'owned' or 'shared'
# A value of 'owned' denotes that the subnets are exclusive to the nodegroup.
# The 'shared' value allows more than one resource to use the subnet.
required_tag: Dict = {'kubernetes.io/cluster/' + cluster_name: 'owned'}
# Find the user-submitted tag set and append the required tag to it.
final_tag_set: Dict = required_tag
for key, value in result:
if key == "tags":
final_tag_set = {**value, **final_tag_set}
# Inject it back into the list.
result = [
(key, value) if (key != NodegroupAttributes.TAGS) else (NodegroupAttributes.TAGS, final_tag_set)
for key, value in result
]
result += [(NodegroupAttributes.NODEGROUP_NAME, nodegroup_name)]
return result
def generate_clusters(eks_hook: EksHook, num_clusters: int, minimal: bool) -> List[str]:
"""
Generates a number of EKS Clusters with data and adds them to the mocked backend.
:param eks_hook: An EksHook object used to call the EKS API.
:type eks_hook: EksHook
:param num_clusters: Number of clusters to generate.
:type num_clusters: int
:param minimal: If True, only the required values are generated; if False all values are generated.
:type minimal: bool
:return: Returns a list of the names of the generated clusters.
:rtype: List[str]
"""
# Generates N clusters named cluster0, cluster1, .., clusterN
return [
eks_hook.create_cluster(name="cluster" + str(count), **_input_builder(ClusterInputs, minimal))[
ResponseAttributes.CLUSTER
][ClusterAttributes.NAME]
for count in range(num_clusters)
]
def generate_fargate_profiles(
eks_hook: EksHook, cluster_name: str, num_profiles: int, minimal: bool
) -> List[str]:
"""
Generates a number of EKS Fargate profiles with data and adds them to the mocked backend.
:param eks_hook: An EksHook object used to call the EKS API.
:type eks_hook: EksHook
:param cluster_name: The name of the EKS Cluster to attach the nodegroups to.
:type cluster_name: str
:param num_profiles: Number of Fargate profiles to generate.
:type num_profiles: int
:param minimal: If True, only the required values are generated; if False all values are generated.
:type minimal: bool
:return: Returns a list of the names of the generated nodegroups.
:rtype: List[str]
"""
# Generates N Fargate profiles named profile0, profile1, .., profileN
return [
eks_hook.create_fargate_profile(
fargateProfileName="profile" + str(count),
clusterName=cluster_name,
**_input_builder(FargateProfileInputs, minimal),
)[ResponseAttributes.FARGATE_PROFILE][FargateProfileAttributes.FARGATE_PROFILE_NAME]
for count in range(num_profiles)
]
def generate_nodegroups(
eks_hook: EksHook, cluster_name: str, num_nodegroups: int, minimal: bool
) -> List[str]:
"""
Generates a number of EKS Managed Nodegroups with data and adds them to the mocked backend.
:param eks_hook: An EksHook object used to call the EKS API.
:type eks_hook: EksHook
:param cluster_name: The name of the EKS Cluster to attach the nodegroups to.
:type cluster_name: str
:param num_nodegroups: Number of clusters to generate.
:type num_nodegroups: int
:param minimal: If True, only the required values are generated; if False all values are generated.
:type minimal: bool
:return: Returns a list of the names of the generated nodegroups.
:rtype: List[str]
"""
# Generates N nodegroups named nodegroup0, nodegroup1, .., nodegroupN
return [
eks_hook.create_nodegroup(
nodegroupName="nodegroup" + str(count),
clusterName=cluster_name,
**_input_builder(NodegroupInputs, minimal),
)[ResponseAttributes.NODEGROUP][NodegroupAttributes.NODEGROUP_NAME]
for count in range(num_nodegroups)
]
def region_matches_partition(region: str, partition: str) -> bool:
"""
Returns True if the provided region and partition are a valid pair.
:param region: AWS region code to test.
:type: region: str
:param partition: AWS partition code to test.
:type partition: str
:return: Returns True if the provided region and partition are a valid pair.
:rtype: bool
"""
valid_matches: List[Tuple[str, str]] = [
("cn-", "aws-cn"),
("us-gov-", "aws-us-gov"),
("us-gov-iso-", "aws-iso"),
("us-gov-iso-b-", "aws-iso-b"),
]
for prefix, expected_partition in valid_matches:
if region.startswith(prefix):
return partition == expected_partition
return partition == "aws"
def _input_builder(options: InputTypes, minimal: bool) -> Dict:
"""
Assembles the inputs which will be used to generate test object into a dictionary.
:param options: A class containing lists of tuples to use for to create
the cluster or nodegroup used in testing.
:type options: InputTypes
:param minimal: If True, only the required values are generated; if False all values are generated.
:type minimal: bool
:return: Returns a dict containing the keys and values to be validated in testing.
:rtype: Dict
"""
values: List[Tuple] = deepcopy(options.REQUIRED) # type: ignore
if not minimal:
values.extend(deepcopy(options.OPTIONAL))
return dict(values) # type: ignore
def string_to_regex(value: str) -> Pattern[str]:
"""
Converts a string template into a regex template for pattern matching.
:param value: The template string to convert.
:type value: str
:returns: Returns a regex pattern
:rtype: Pattern[str]
"""
return re.compile(re.sub(r"[{](.*?)[}]", r"(?P<\1>.+)", value))
def convert_keys(original: Dict) -> Dict:
"""
API Input and Output keys are formatted differently. The EKS Hooks map
as closely as possible to the API calls, which use camelCase variable
names, but the Operators match python conventions and use snake_case.
This method converts the keys of a dict which are in snake_case (input
format) to camelCase (output format) while leaving the dict values unchanged.
:param original: Dict which needs the keys converted.
:value original: Dict
"""
if "nodegroup_name" in original.keys():
conversion_map = dict(
cluster_name="clusterName",
cluster_role_arn="roleArn",
nodegroup_subnets="subnets",
subnets="subnets",
nodegroup_name="nodegroupName",
nodegroup_role_arn="nodeRole",
)
elif "fargate_profile_name" in original.keys():
conversion_map = dict(
cluster_name="clusterName",
fargate_profile_name="fargateProfileName",
subnets="subnets",
# The following are "duplicated" because we used the more verbose/descriptive version
# in the CreateCluster Operator when creating a cluster alongside a Fargate profile, but
# the more terse version in the CreateFargateProfile Operator for the sake of convenience.
pod_execution_role_arn="podExecutionRoleArn",
fargate_pod_execution_role_arn="podExecutionRoleArn",
selectors="selectors",
fargate_selectors="selectors",
)
else:
conversion_map = dict(
cluster_name="name",
cluster_role_arn="roleArn",
resources_vpc_config="resourcesVpcConfig",
)
return {conversion_map[k]: v for (k, v) in deepcopy(original).items()}
def iso_date(input_datetime: datetime.datetime) -> str:
return input_datetime.strftime("%Y-%m-%dT%H:%M:%S") + "Z"
def generate_dict(prefix, count) -> Dict:
return {f"{prefix}_{_count}": str(_count) for _count in range(count)}
```
#### File: www/views/test_views.py
```python
import os
from collections import Callable
from unittest import mock
import pytest
from airflow.configuration import initialize_config
from airflow.plugins_manager import AirflowPlugin, EntryPointSource
from airflow.www import views
from airflow.www.views import get_key_paths, get_safe_url, get_value_from_path, truncate_task_duration
from tests.test_utils.config import conf_vars
from tests.test_utils.mock_plugins import mock_plugin_manager
from tests.test_utils.www import check_content_in_response, check_content_not_in_response
def test_configuration_do_not_expose_config(admin_client):
with conf_vars({('webserver', 'expose_config'): 'False'}):
resp = admin_client.get('configuration', follow_redirects=True)
check_content_in_response(
[
'Airflow Configuration',
'# Your Airflow administrator chose not to expose the configuration, '
'most likely for security reasons.',
],
resp,
)
@mock.patch.dict(os.environ, {"AIRFLOW__CORE__UNIT_TEST_MODE": "False"})
def test_configuration_expose_config(admin_client):
# make sure config is initialized (without unit test mote)
conf = initialize_config()
conf.validate()
with conf_vars({('webserver', 'expose_config'): 'True'}):
resp = admin_client.get('configuration', follow_redirects=True)
check_content_in_response(['Airflow Configuration', 'Running Configuration'], resp)
def test_redoc_should_render_template(capture_templates, admin_client):
from airflow.utils.docs import get_docs_url
with capture_templates() as templates:
resp = admin_client.get('redoc')
check_content_in_response('Redoc', resp)
assert len(templates) == 1
assert templates[0].name == 'airflow/redoc.html'
assert templates[0].local_context == {
'openapi_spec_url': '/api/v1/openapi.yaml',
'rest_api_enabled': True,
'get_docs_url': get_docs_url,
}
def test_plugin_should_list_on_page_with_details(admin_client):
resp = admin_client.get('/plugin')
check_content_in_response("test_plugin", resp)
check_content_in_response("Airflow Plugins", resp)
check_content_in_response("source", resp)
check_content_in_response("<em>$PLUGINS_FOLDER/</em>test_plugin.py", resp)
def test_plugin_should_list_entrypoint_on_page_with_details(admin_client):
mock_plugin = AirflowPlugin()
mock_plugin.name = "test_plugin"
mock_plugin.source = EntryPointSource(
mock.Mock(), mock.Mock(version='1.0.0', metadata={'name': 'test-entrypoint-testpluginview'})
)
with mock_plugin_manager(plugins=[mock_plugin]):
resp = admin_client.get('/plugin')
check_content_in_response("test_plugin", resp)
check_content_in_response("Airflow Plugins", resp)
check_content_in_response("source", resp)
check_content_in_response("<em>test-entrypoint-testpluginview==1.0.0:</em> <Mock id=", resp)
def test_plugin_endpoint_should_not_be_unauthenticated(app):
resp = app.test_client().get('/plugin', follow_redirects=True)
check_content_not_in_response("test_plugin", resp)
check_content_in_response("Sign In - Airflow", resp)
def test_should_list_providers_on_page_with_details(admin_client):
resp = admin_client.get('/provider')
beam_href = "<a href=\"https://airflow.apache.org/docs/apache-airflow-providers-apache-beam/"
beam_text = "apache-airflow-providers-apache-beam</a>"
beam_description = "<a href=\"https://beam.apache.org/\">Apache Beam</a>"
check_content_in_response(beam_href, resp)
check_content_in_response(beam_text, resp)
check_content_in_response(beam_description, resp)
check_content_in_response("Providers", resp)
def test_endpoint_should_not_be_unauthenticated(app):
resp = app.test_client().get('/provider', follow_redirects=True)
check_content_not_in_response("Providers", resp)
check_content_in_response("Sign In - Airflow", resp)
@pytest.mark.parametrize(
"url, content",
[
(
"/taskinstance/list/?_flt_0_execution_date=2018-10-09+22:44:31",
"List Task Instance",
),
(
"/taskreschedule/list/?_flt_0_execution_date=2018-10-09+22:44:31",
"List Task Reschedule",
),
],
ids=["instance", "reschedule"],
)
def test_task_start_date_filter(admin_client, url, content):
resp = admin_client.get(url)
# We aren't checking the logic of the date filter itself (that is built
# in to FAB) but simply that our UTC conversion was run - i.e. it
# doesn't blow up!
check_content_in_response(content, resp)
@pytest.mark.parametrize(
"url, content",
[
(
"/taskinstance/list/?_flt_3_dag_id=test_dag",
"List Task Instance",
)
],
ids=["instance"],
)
def test_task_dag_id_equals_filter(admin_client, url, content):
resp = admin_client.get(url)
# We aren't checking the logic of the dag_id filter itself (that is built
# in to FAB) but simply that dag_id filter was run
check_content_in_response(content, resp)
@pytest.mark.parametrize(
"test_url, expected_url",
[
("", "/home"),
("http://google.com", "/home"),
("36539'%3balert(1)%2f%2f166", "/home"),
(
"http://localhost:8080/trigger?dag_id=test&origin=36539%27%3balert(1)%2f%2f166&abc=2",
"/home",
),
(
"http://localhost:8080/trigger?dag_id=test_dag&origin=%2Ftree%3Fdag_id%test_dag';alert(33)//",
"/home",
),
(
"http://localhost:8080/trigger?dag_id=test_dag&origin=%2Ftree%3Fdag_id%3Dtest_dag",
"http://localhost:8080/trigger?dag_id=test_dag&origin=%2Ftree%3Fdag_id%3Dtest_dag",
),
],
)
@mock.patch("airflow.www.views.url_for")
def test_get_safe_url(mock_url_for, app, test_url, expected_url):
mock_url_for.return_value = "/home"
with app.test_request_context(base_url="http://localhost:8080"):
assert get_safe_url(test_url) == expected_url
@pytest.mark.parametrize(
"test_duration, expected_duration",
[
(0.12345, 0.123),
(0.12355, 0.124),
(3.12, 3.12),
(9.99999, 10.0),
(10.01232, 10),
],
)
def test_truncate_task_duration(test_duration, expected_duration):
assert truncate_task_duration(test_duration) == expected_duration
@pytest.fixture
def test_app():
from airflow.www import app
return app.create_app(testing=True)
def test_mark_task_instance_state(test_app):
"""
Test that _mark_task_instance_state() does all three things:
- Marks the given TaskInstance as SUCCESS;
- Clears downstream TaskInstances in FAILED/UPSTREAM_FAILED state;
- Set DagRun to QUEUED.
"""
from airflow.models import DAG, DagBag, TaskInstance
from airflow.operators.dummy import DummyOperator
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from airflow.utils.types import DagRunType
from airflow.www.views import Airflow
start_date = datetime(2020, 1, 1)
with DAG("test_mark_task_instance_state", start_date=start_date) as dag:
task_1 = DummyOperator(task_id="task_1")
task_2 = DummyOperator(task_id="task_2")
task_3 = DummyOperator(task_id="task_3")
task_4 = DummyOperator(task_id="task_4")
task_5 = DummyOperator(task_id="task_5")
task_1 >> [task_2, task_3, task_4, task_5]
dagrun = dag.create_dagrun(
start_date=start_date,
execution_date=start_date,
data_interval=(start_date, start_date),
state=State.FAILED,
run_type=DagRunType.SCHEDULED,
)
def get_task_instance(session, task):
return (
session.query(TaskInstance)
.filter(
TaskInstance.dag_id == dag.dag_id,
TaskInstance.task_id == task.task_id,
TaskInstance.execution_date == start_date,
)
.one()
)
with create_session() as session:
get_task_instance(session, task_1).state = State.FAILED
get_task_instance(session, task_2).state = State.SUCCESS
get_task_instance(session, task_3).state = State.UPSTREAM_FAILED
get_task_instance(session, task_4).state = State.FAILED
get_task_instance(session, task_5).state = State.SKIPPED
session.commit()
test_app.dag_bag = DagBag(dag_folder='/dev/null', include_examples=False)
test_app.dag_bag.bag_dag(dag=dag, root_dag=dag)
with test_app.test_request_context():
view = Airflow()
view._mark_task_instance_state(
dag_id=dag.dag_id,
task_id=task_1.task_id,
origin="",
execution_date=start_date.isoformat(),
upstream=False,
downstream=False,
future=False,
past=False,
state=State.SUCCESS,
)
with create_session() as session:
# After _mark_task_instance_state, task_1 is marked as SUCCESS
assert get_task_instance(session, task_1).state == State.SUCCESS
# task_2 remains as SUCCESS
assert get_task_instance(session, task_2).state == State.SUCCESS
# task_3 and task_4 are cleared because they were in FAILED/UPSTREAM_FAILED state
assert get_task_instance(session, task_3).state == State.NONE
assert get_task_instance(session, task_4).state == State.NONE
# task_5 remains as SKIPPED
assert get_task_instance(session, task_5).state == State.SKIPPED
dagrun.refresh_from_db(session=session)
# dagrun should be set to QUEUED
assert dagrun.get_state() == State.QUEUED
TEST_CONTENT_DICT = {"key1": {"key2": "val2", "key3": "val3", "key4": {"key5": "val5"}}}
@pytest.mark.parametrize(
"test_content_dict, expected_paths", [(TEST_CONTENT_DICT, ("key1.key2", "key1.key3", "key1.key4.key5"))]
)
def test_generate_key_paths(test_content_dict, expected_paths):
for key_path in get_key_paths(test_content_dict):
assert key_path in expected_paths
@pytest.mark.parametrize(
"test_content_dict, test_key_path, expected_value",
[
(TEST_CONTENT_DICT, "key1.key2", "val2"),
(TEST_CONTENT_DICT, "key1.key3", "val3"),
(TEST_CONTENT_DICT, "key1.key4.key5", "val5"),
],
)
def test_get_value_from_path(test_content_dict, test_key_path, expected_value):
assert expected_value == get_value_from_path(test_key_path, test_content_dict)
def assert_decorator_used(cls: type, fn_name: str, decorator: Callable):
fn = getattr(cls, fn_name)
code = decorator(None).__code__
while fn is not None:
if fn.__code__ is code:
return
if not hasattr(fn, '__wrapped__'):
break
fn = getattr(fn, '__wrapped__')
assert False, f'{cls.__name__}.{fn_name} was not decorated with @{decorator.__name__}'
@pytest.mark.parametrize(
"cls",
[
views.TaskInstanceModelView,
views.DagRunModelView,
],
)
def test_dag_edit_privileged_requires_view_has_action_decorators(cls: type):
action_funcs = {func for func in dir(cls) if callable(getattr(cls, func)) and func.startswith("action_")}
# We remove action_post as this is a standard SQLAlchemy function no enable other action functions.
action_funcs = action_funcs - {"action_post"}
for action_function in action_funcs:
assert_decorator_used(cls, action_function, views.action_has_dag_edit_access)
``` |
{
"source": "jpsalamarcara/every_angle_rec_test",
"score": 2
} |
#### File: datasources/mongo_item_datasource/datasource.py
```python
from typing import List
from .mongo.model import StoredItem
from media_library.core.datasources.item_datasource import ItemDataSource
from media_library.core.domain.item import Item
class MongoItemDataSource(ItemDataSource):
def __init__(self):
self.storage = StoredItem
def save(self, item: Item):
row = self.storage.objects(uid=item.uid).first() if item.uid is not None else None
if row is None:
row = self.storage(name=item.name,
media_type=item.media_type,
location=item.location
)
else:
row.update(**{'name': item.name, 'media_type': item.media_type, 'location': item.location})
row.save()
def __parse__(self, item: StoredItem) -> Item:
return Item(uid=item.uid, name=item.name, media_type=item.media_type, location=item.location)
def get(self, name: str = None, media_type: str = None, location: str = None) -> List[Item]:
query = {}
if name:
query['name'] = name
if media_type:
query['media_type'] = media_type
if location:
query['location'] = location
return [self.__parse__(row) for row in self.storage.objects(**query).all()]
def delete(self, uid):
row = self.storage.objects(uid=uid).first() if uid is not None else None
if row:
row.delete()
```
#### File: core/domain/item.py
```python
_AVAILABLE_MEDIA_TYPES = ('game', 'music', 'movie')
class Item:
def __init__(self, uid=None, name: str = None, media_type: str = None, location: str=None):
assert name is not None, 'name must have a value'
assert media_type in _AVAILABLE_MEDIA_TYPES, 'media_type not yet supported!'
assert location is not None, 'location must have a value'
self.uid = uid
self.name = name
self.media_type = media_type
self.location = location
``` |
{
"source": "jpsampaio/ExEcommIT",
"score": 3
} |
#### File: jpsampaio/ExEcommIT/105.py
```python
def notas(* notas, sit = True):
adSituacao = dict()
total = len(notas)
adSituacao['Total'] = total
maior = max(notas)
adSituacao['Maior'] = maior
menor = min(notas)
adSituacao['Menor'] = menor
soma = sum(notas)
media = soma / len(notas)
adSituacao['Média'] = f'{media :.1f}'
if sit:
if media >= 7:
adSituacao['Situação'] = 'Aprovado'
elif media >= 5:
adSituacao['Situação'] = 'Razoável'
else:
adSituacao['Situação'] = 'Reprovado'
return adSituacao
resposta = notas(5.6, 10, 8.7, 3.3, sit = True)
print(resposta)
```
#### File: jpsampaio/ExEcommIT/Ex104.py
```python
def leiaint(txt):
n = (input(f"{txt} \n"))
while True:
try:
int(n)
break
except ValueError:
print("Errou")
n = input(f"{txt} \n")
return n
a = leiaint('Digite um numero')
print(a)
```
#### File: jpsampaio/ExEcommIT/Ex106.py
```python
def tit(msg, c):
n = len(msg)
if c == 3:
i = "\033[7:36m"
if c == 1:
i = "\033[7:31m"
if c == 2:
i = "\033[7:32m"
if c == 0:
i = "\033[7:30m"
f = "\033[m"
print(f'{i}=' * (n + 4))
print(f'{i} {msg} ')
print(f'{i}=' * (n + 4))
print(f)
def biblio():
while True:
tit('SISTEMA DE AJUDA PyHELP', 2)
func = str(input('Função ou Biblioteca >> '))
tit(f'Acessando o manual de "{func}"', 3)
if func.upper() in 'FIM':
tit('ATÉ LOGO!', 1)
break
print(f"\033[7:30m"), help(func)
print("\033[m")
biblio()
```
#### File: jpsampaio/ExEcommIT/Ex110.py
```python
def resume(value=0, rate_increase=0, rate_decrease=0):
print('-' * 32)
print(f'{"RESUMO DO VALOR":^30}')
print('-' * 32)
print(f'{"Preço analisado:":<20} {currency(value)}',)
print(f'{"Dobro do preço:":<20} {double(value, True)}')
print(f'{"Metade do preço:":<20} {half(value, True)}')
print(f'{rate_increase}{"% de aumento:":<{20 - findSizeInt(rate_increase)}} {increase(value, rate_increase, True)}')
print(f'{rate_decrease}{"% de redução:":<{20 - findSizeInt(rate_decrease)}} {decrease(value, rate_decrease, True)}')
print('-' * 32)
def findSizeInt(number):
from math import floor, log10
number = abs(int(number))
return 1 if number == 0 else floor(log10(number)) + 1
```
#### File: jpsampaio/ExEcommIT/Ex112.py
```python
def leia_cpf(msg="CPF: ", show=True):
from time import sleep
vad = False
while not vad:
erro = False
c1 = input(msg).strip().replace('.', '').replace('-', '').replace(',', '')
print('Verificando...')
sleep(1)
for c in range(0, len(c1)):
if c1[c].isalpha():
erro = True
if len(c1) != 11:
erro = True
if erro:
print(f'CPF inválido!')
else:
if show:
print(f'O CPF \'{c1[:3]}.{c1[3:6]}.{c1[6:9]}-{c1[9:]}\' é válido.')
else:
print('CPF válido.')
vad = True
return int(c1)
```
#### File: jpsampaio/ExEcommIT/Ex99.py
```python
from time import sleep
def maior(*valores):
maior = 0
print('-=' * 30)
print('Analisando valores passados..')
for c in valores:
print(f'{c}', end=" ", flush=True)
sleep(1)
if c > maior:
maior = c
print('')
print(f'Foram informados {len(valores)} valores ao todo')
print(f'O maior valor informado foi {maior}')
maior(2, 9 , 4, 5, 7, 1)
maior(4, 7, 0)
maior(1, 2)
maior(6)
maior()
``` |
{
"source": "jpsanr/YouGraph-Public",
"score": 2
} |
#### File: YouGraph-Public/models/topic_modeling.py
```python
from spacy.tokens import Doc
import numpy
from spacy.attrs import LOWER, POS, ENT_TYPE, IS_ALPHA
from neo4j import GraphDatabase
import pandas as pd
import os
#import subprocess
#import requests
#import unidecode
import re
import csv
from acessos import get_conn, read, persistir_banco, persistir_multiplas_linhas
import sys
import re, numpy as np, pandas as pd
from pprint import pprint
import spacy
# Gensim
import gensim
from gensim import corpora, models, similarities
import gensim, spacy, logging, warnings
import gensim.corpora as corpora
from gensim.utils import lemmatize, simple_preprocess
from gensim.models import CoherenceModel
import matplotlib.pyplot as plt
from gensim import corpora, models, similarities
# NLTK Stop words
from nltk.corpus import stopwords
from acessos import read, get_conn, persistir_uma_linha, persistir_multiplas_linhas, replace_df
from gensim.models.ldamulticore import LdaMulticore
import seaborn as sns
import matplotlib.colors as mcolors
#%matplotlib inline
warnings.filterwarnings("ignore",category=DeprecationWarning)
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import os
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
from gensim.test.utils import datapath
class Topic_Modeling:
def __init__(self, language="pt-br", stop_words_list=[]):
self.language = language
self.stop_words = self._load_stop_words(stop_words_list)
self.nlp = self._load_spacy()
self.model_list =[]
self.coherence_values = []
self.lista_num_topics = []
self.melhor_modelo = None
def _load_spacy(self):
'''metodo privado que retorna o modelo do spacy baseado no idioma'''
#disable_list = ['parser', 'ner']
disable_list = []
if self.language == "pt-br":
nlp = spacy.load('pt_core_news_lg', disable=disable_list)
elif self.language == "us-en":
nlp = spacy.load("en_core_web_sm", disable=disable_list)
return nlp
def _load_stop_words(self, stop_words_list=[]):
'''metodo privado que retorna as stop words baseado no idioma'''
if self.language == "pt-br":
stop_words = stopwords.words('portuguese')
stop_words.extend(stop_words_list)
elif self.language == "us-en":
stop_words = stopwords.words('english') #Testar
stop_words.extend(['from', 'subject', 're', 'edu', 'use', 'not', 'would', 'say', 'could', '_', 'be', 'know', 'good', 'go', 'get', 'do', 'done', 'try', 'many', 'some', 'nice', 'thank', 'think', 'see', 'rather', 'easy', 'easily', 'lot', 'lack', 'make', 'want', 'seem', 'run', 'need', 'even', 'right', 'line', 'even', 'also', 'may', 'take', 'come'])
stop_words.extend(stop_words_list)
return stop_words
def filtrar_pos_tag(self, texto, allowed_postags=["NOUN", "PROPN", "VERB", "ADJ"]):
texto_saida = ""
doc = self.nlp(texto)
for token in doc:
if token.pos_ in allowed_postags:
texto_saida += " {}".format(token)
return texto_saida
def replace_ner_por_label(self, texto):
texto_out = texto
doc = self.nlp(texto)
for ent in reversed(doc.ents):
#label = " _" + ent.label_ + "_ "
label = ent.label_
comeco = ent.start_char
fim = comeco + len(ent.text)
texto_out = texto_out [:comeco] + label + texto_out[fim:]
return texto_out
def processamento_inicial(self, lista_documentos):
'''remove emails, quebra de linhas e single quotes'''
#Tratando abreviações
lista_documentos = [re.sub('neh', 'né', sent) for sent in lista_documentos]
lista_documentos = [re.sub('td', 'tudo', sent) for sent in lista_documentos]
lista_documentos = [re.sub('tds', 'todos', sent) for sent in lista_documentos]
lista_documentos = [re.sub('vc', 'você', sent) for sent in lista_documentos]
lista_documentos = [re.sub('vcs', 'vocês', sent) for sent in lista_documentos]
lista_documentos = [re.sub('voce', 'você', sent) for sent in lista_documentos]
lista_documentos = [re.sub('tbm', 'também', sent) for sent in lista_documentos]
# Remove Emails
lista_documentos = [re.sub('\S*@\S*\s?', '', sent) for sent in lista_documentos]
# Remove new line characters
lista_documentos = [re.sub('\s+', ' ', sent) for sent in lista_documentos]
# Remove distracting single quotes
lista_documentos = [re.sub("\'", "", sent) for sent in lista_documentos]
return lista_documentos
def sent_to_words(self, sentences):
'''tokeniza um unico documento'''
for sentence in sentences:
yield(gensim.utils.simple_preprocess(str(sentence), deacc=False)) # deacc=True removes punctuations
def tokenizar(self, lista_documentos):
'''tokeniza uma lista de documentos'''
lista_documentos_tokenizado = list(self.sent_to_words(lista_documentos))
return lista_documentos_tokenizado
def montar_n_grams(self, lista_documentos_tokenizado):
'''monta bi_grams e tri_grams de uma lista de documentos tokenizado
utilizar este metodo depois de remover stop words'''
bigram = gensim.models.Phrases(lista_documentos_tokenizado, min_count=5, threshold=100) # higher threshold fewer phrases.
trigram = gensim.models.Phrases(bigram[lista_documentos_tokenizado], threshold=100)
# Faster way to get a sentence clubbed as a trigram/bigram
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
#retorna lista bigram e trigram
self.bigram = [bigram_mod[doc] for doc in lista_documentos_tokenizado]
self.trigram = [trigram_mod[bigram_mod[doc]] for doc in lista_documentos_tokenizado]
return self.bigram , self.trigram
def get_n_grams(self):
return self.bigram , self.trigram
def lematizar_documentos(self, lista_documentos_tokenizado):
"""https://spacy.io/api/annotation"""
documentos_out = []
for sent in lista_documentos_tokenizado:
doc = self.nlp(" ".join(sent))
lista_tokens_lematizados = []
for token in doc :
lista_tokens_lematizados.append(token.lemma_)
documentos_out.append(lista_tokens_lematizados)
return documentos_out
def remover_stop_words(self, lista_documentos_tokenizado):
return [[word for word in simple_preprocess(str(doc)) if word not in self.stop_words] for doc in lista_documentos_tokenizado]
def montar_novo_corpus(self, nova_lista_documentos_lematizada, id2word):
print(id2word)
corpus = [id2word.doc2bow(text) for text in nova_lista_documentos_lematizada]
return corpus
def pre_processar_texto_ou_lista(self, texto_ou_lista, filtro_ner=True, allowed_postags=["NOUN","PROPN", "VERB", "ADJ"]):
if isinstance(texto_ou_lista, str):
lista_documentos = [texto_ou_lista]
else:
lista_documentos = texto_ou_lista
lista_documentos = self.processamento_inicial(lista_documentos)
if filtro_ner==True:
lista_documentos = [self.replace_ner_por_label(texto) for texto in lista_documentos]
# if filtro_pos_tag==True:
# lista_documentos = [self.filtrar_pos_tag(texto) for texto in lista_documentos]
lista_documentos = [self.filtrar_pos_tag(texto, allowed_postags) for texto in lista_documentos]
lista_documentos_tokenizado = self.tokenizar(lista_documentos)
lista_documentos_tokenizado_stop_words = self.remover_stop_words(lista_documentos_tokenizado)
lista_documento_bi_gram, lista_documento_tri_gram = self.montar_n_grams(lista_documentos_tokenizado_stop_words)
lista_documento_lematizada = self.lematizar_documentos(lista_documento_tri_gram)
#lista_documento_lematizada = lista_documento_bi_gram
return lista_documento_lematizada
def gerar_modelo_hdp(self, corpus, id2word, texts):
model_hdp = models.HdpModel(corpus, id2word=id2word)
coherencemodel = CoherenceModel(model=model_hdp, texts=texts, dictionary=id2word, coherence='c_v')
self.melhor_modelo = model_hdp
return model_hdp, coherencemodel.get_coherence()
def gerar_multiplos_modelos(self, id2word, corpus, texts, limit, start=2, step=3):
print("Start: {}".format(start))
print("limit: {}".format(limit))
print("Step: {}".format(step))
self.start = start
self.limit = limit
self.step = step
coherence_values = []
model_list = []
for num_topics in range(start, limit, step):
print("Gerando novo modelo...")
# model = gensim.models.ldamodel.LdaModel(corpus=corpus,
# id2word=id2word,
# num_topics=num_topics,
# random_state=100,
# update_every=1,
# chunksize=100,
# passes=10,
# alpha='auto',
# per_word_topics=True)
lda = LdaMulticore(corpus=corpus,
id2word=id2word,
random_state=100,
num_topics=num_topics,
workers=3)
model_list.append(model)
coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=id2word, coherence='c_v')
coherence_values.append(coherencemodel.get_coherence())
self.lista_num_topics.append(num_topics)
self.model_list = model_list
self.coherence_values = coherence_values
return model_list, coherence_values
def plotar_coerencia(self):
x = range(self.start, self.limit, self.step)
plt.plot(x, self.coherence_values)
plt.xlabel("Num de Tópicos")
plt.ylabel("Coherence score")
plt.legend(("coherence_values"), loc='best')
plt.show()
for m, cv in zip(x, self.coherence_values):
print("Num de Tópicos =", m, " valor coerência: ", round(cv, 4))
def classificar_novo_texto(self, texto, model,id2word):
lista_lematizada = self.pre_processar_texto_ou_lista(texto)
novo_corpus = self.montar_novo_corpus(lista_lematizada,id2word)
doc_bow = novo_corpus[0]
topicos = model[doc_bow]
#topicos_ordenados = sorted(topicos[0], key=lambda x: x[1], reverse=True)
topicos_ordenados = sorted(topicos, key=lambda x: x[1], reverse=True)
melhor_topico = topicos_ordenados[0]
#print(topicos_ordenados)
return melhor_topico, topicos_ordenados
def montar_id2word(self, lista_documento_lematizada):
id2word = corpora.Dictionary(lista_documento_lematizada)
return id2word
def montar_dict_models(self):
dict_models = {
"modelo": self.model_list,
"coerencia":self.coherence_values,
"num_topics": self.lista_num_topics
}
return dict_models
def salvar_modelos(self, diretorio, folder_name):
dict_models = self.montar_dict_models()
df_models = pd.DataFrame(dict_models)
folder_name = "{}\\{}".format(diretorio,folder_name)
try:
os.mkdir(folder_name)
except OSError:
print ("Erro na criação da pasta")
return "erro"
df_models['caminho'] = df_models.apply(lambda x: "{}\\#_{}".format(folder_name, str(x['num_topics'])), axis=1)
for row in df_models.iterrows():
row[1]['modelo'].save(row[1]['caminho'])
df_models.drop(['modelo'], axis=1, inplace=True)
dict_models = df_models.to_dict("records")
return dict_models
def retornar_melhor_modelo(self):
dict_models = self.montar_dict_models()
df_models = pd.DataFrame(dict_models)
self.melhor_modelo = df_models.sort_values(by=['coerencia'], ascending=False).iloc[0]['modelo']
melhor_coerencia = df_models.sort_values(by=['coerencia'], ascending=False).iloc[0]['coerencia']
num_topicos = df_models.sort_values(by=['coerencia'], ascending=False).iloc[0]['num_topics']
return self.melhor_modelo, melhor_coerencia, num_topicos
def retornar_top_key_words(self, modelo, num_palavras=30):
dict_palavras_topicos = {}
for index, topic in modelo.show_topics(num_topics=-1,num_words=num_palavras,formatted=False):
dict_words = {}
for i, palavra in enumerate(topic,start=1):
dict_words["palavra_{}".format(i)] = palavra[0]
dict_words["prob_{}".format(i)] = float(palavra[1])
#print("Palavra: {} - Peso: {}".format(palavra[0],palavra[1]))
dict_words["topico"] = index
dict_palavras_topicos["topico_"+str(index)] = dict_words
df_palavras = pd.DataFrame.from_dict(dict_palavras_topicos, orient='index')
return df_palavras, dict_palavras_topicos
def persistir_objeto_mongo(self, dict_dados):
dict_dados['lista_coerencia'] = self.coherence_values
dict_dados['palavras_melhor_modelo']
def processar_df_topicos_probabilidade(self, df):
'''busca os 4 principais tópicos e salva em colunas'''
df['topico_1'] = df.apply(lambda x: x['lista_topicos'][0][0] ,axis=1)
df['prob_1'] = df.apply(lambda x: x['lista_topicos'][0][1] ,axis=1)
try:
df['topico_2'] = df.apply(lambda x: int(x['lista_topicos'][1][0]) if len(x['lista_topicos']) > 1 else None ,axis=1)
df['prob_2'] = df.apply(lambda x: float(x['lista_topicos'][1][1]) if len(x['lista_topicos']) > 1 else None ,axis=1)
except:
df['topico_2'] = None
df['prob_2'] = None
try:
df['topico_3'] = df.apply(lambda x: int(x['lista_topicos'][2][0]) if len(x['lista_topicos']) > 2 else None ,axis=1)
df['prob_3'] = df.apply(lambda x: float(x['lista_topicos'][2][1]) if len(x['lista_topicos']) > 2 else None ,axis=1)
except:
df['topico_3'] = None
df['prob_3'] = None
try:
df['topico_4'] = df.apply(lambda x: int(x['lista_topicos'][3][0]) if len(x['lista_topicos']) > 3 else None ,axis=1)
df['prob_4'] = df.apply(lambda x: float(x['lista_topicos'][3][1]) if len(x['lista_topicos']) > 3 else None ,axis=1)
except:
df['topico_4'] = None
df['prob_4'] = None
return df
```
#### File: YouGraph-Public/models/tuple_extractor.py
```python
import pandas as pd
import os
import math
class Tuple_Extractor:
'''
Script responsável pela extração de tuplas de conhecimento a partir do DptOIE;
O DptOIE foi desenvolvido por <NAME> (2020);
Como o projeto de Sena e Claro foi desenvolvido na linguagem JAVA, foi necessário fazer um port para Python;
- Esta classe é responsável por este port.
-> ATENÇÃO: Diversas melhorias podem e devem ser feitas nesta classe;
'''
def __init__(self):
'''Do nothing'''
def extrair_tupla(self, texto):
status = -1
print(" -> Salvando texto no input.txt")
comando_rotina_java = "java -jar DptOIE.jar -sentencesIN .\\input.txt > log_java.txt -SC true -CC true -appositive 1 -appositive 2"
path = os.getcwd()
dir_java = os.path.abspath(os.path.join(path, os.pardir))+ "\\Java"
arquivo = dir_java + "\\input.txt"
arquivo = open(arquivo, "r+", encoding="utf-8")
arquivo.truncate(0) #limpa arquivo
arquivo.write(texto) #add texto recebido
arquivo.close()
print(" -> Extraindo Tuplas...")
try:
os.chdir(dir_java) #muda diretorio para pasta JAVA
os.system(comando_rotina_java)
os.chdir(path) # retorna diretorio para a pasta certa
print("Pronto :D")
status = 1
except Exception as e:
print(e)
return status
def get_ultimas_tuplas_geradas(self):
path = os.getcwd()
dir_java = os.path.abspath(os.path.join(path, os.pardir))+ "\\Java"
os.chdir(dir_java) #muda diretorio para pasta JAVA
df_tuple = pd.read_csv('extractedFactsByDpOIE.csv', sep=";", encoding="ansi")
df_tuple = df_tuple.rename(columns={' "SENTENÇA" ': 'sentenca',' "ARG1" ': 'arg1', ' "REL" ': 'rel',' "ARG2" ': 'arg2'})
self.processar_sentence_Nan(df_tuple) #adciona as sentenças corretas quando elas forem Nan
df_tuple = df_tuple.drop([' "ID SENTENÇA" ',
' "ID EXTRAÇÃO" ',
' "COERÊNCIA" ',
' "MINIMALIDADE" ',
' "MÓDULO SUJEITO" ',
' "MÓDULO RELAÇÃO" ',
' "MÓDULO ARG2"'], axis=1)
df_tuple = df_tuple.loc[:, ~df_tuple.columns.str.contains('^Unnamed')]
df_tuple.dropna(inplace=True)
os.chdir(path) # retorna diretorio para a pasta certa
return df_tuple
def processar_sentence_Nan(self, df_tuple):
aux_sentenca = ''
for key, row in df_tuple.iterrows():
sentenca = row['sentenca']
if isinstance(sentenca, str)== False and math.isnan(sentenca):
sentenca = aux_sentenca
df_tuple.loc[key, "sentenca"] = sentenca
else:
aux_sentenca = sentenca
``` |
{
"source": "jpsantos-mf/ezdxf",
"score": 2
} |
#### File: examples/addons/r12writer.py
```python
from pathlib import Path
from time import perf_counter
import math
from ezdxf.addons import MengerSponge
from ezdxf.addons import r12writer
from ezdxf.render.forms import sphere, circle, translate
DIR = Path('~/Desktop/Outbox').expanduser()
def menger_sponge(filename, level=1, kind=0):
t0 = perf_counter()
sponge = MengerSponge(level=level, kind=kind).mesh()
t1 = perf_counter()
print(f'Build menger sponge <{kind}> in {t1 - t0:.5f}s.')
with r12writer(filename) as r12:
r12.add_polyface(sponge.vertices, sponge.faces, color=1)
print(f'saved as "{filename}".')
def polymesh(filename, size=(10, 10)):
m, n = size # rows, cols
dx = math.pi / m * 2
dy = math.pi / n * 2
vertices = []
for x in range(m): # rows second
z1 = math.sin(dx * x)
for y in range(n): # cols first
z2 = math.sin(dy * y)
z = z1 * z2
vertices.append((x, y, z))
with r12writer(filename) as r12:
r12.add_polymesh(vertices, size=size, color=1)
print(f'saved as "{filename}".')
def polyface_sphere(filename):
mesh = sphere(16, 8, quads=True)
with r12writer(filename) as r12:
r12.add_polyface(mesh.vertices, mesh.faces, color=1)
print(f'saved as "{filename}".')
def polylines(filename):
with r12writer(filename) as r12:
r12.add_polyline_2d(circle(8), color=1, closed=False)
r12.add_polyline_2d(translate(circle(8), vec=(3, 0)), color=3, closed=True)
r12.add_polyline_2d(
[(0, 4), (4, 4, 1), (8, 4, 0, 0.2, 0.000001), (12, 4)],
format='xybse',
start_width=0.1,
end_width=0.1,
color=5,
)
print(f'saved as "{filename}".')
if __name__ == '__main__':
menger_sponge(DIR / "menger_sponge_r12.dxf", level=2)
polymesh(DIR / "polymesh.dxf", size=(20, 10))
polyface_sphere(DIR / "sphere.dxf")
polylines(DIR / "polylines.dxf")
```
#### File: examples/render/show_all_arrows.py
```python
import ezdxf
doc = ezdxf.new('R2007', setup=True)
msp = doc.modelspace()
y = 0
for index, name in enumerate(sorted(ezdxf.ARROWS.__all_arrows__)):
if name == "":
label = '"" = closed filled'
else:
label = name
y = index * 2
def add_connection_point(p):
msp.add_circle(p, radius=0.01, dxfattribs={'color': 1})
msp.add_text(label, {'style': 'OpenSans', 'height': .25}).set_pos((-5, y - .5))
msp.add_line((-5, y), (-1, y))
msp.add_line((5, y), (10, y))
# left side |<- is the reverse orientation
cp1 = msp.add_arrow(name, insert=(0, y), size=1, rotation=180, dxfattribs={'color': 7})
# right side ->| is the base orientation
cp2 = msp.add_arrow(name, insert=(4, y), size=1, rotation=0, dxfattribs={'color': 7})
msp.add_line(cp1, cp2)
add_connection_point(cp1)
add_connection_point(cp2)
add_connection_point(msp.add_arrow_blockref(name, insert=(7, y), size=.3, rotation=45))
add_connection_point(msp.add_arrow_blockref(name, insert=(7.5, y), size=.3, rotation=135))
add_connection_point(msp.add_arrow_blockref(name, insert=(8, y), size=.5, rotation=-90))
msp.add_line((0, 0), (0, y))
msp.add_line((4, 0), (4, y))
msp.add_line((8, 0), (8, y))
doc.saveas('all_arrows_{}.dxf'.format(doc.acad_release))
```
#### File: ezdxf/integration_tests/test_create_basic_graphics.py
```python
import pytest
import os
import ezdxf
from ezdxf.lldxf.const import versions_supported_by_new
@pytest.fixture(params=versions_supported_by_new)
def drawing(request):
return ezdxf.new(request.param)
def add_line_entities(layout, offset):
for color in range(1, 256):
layout.add_line((offset+0, color), (offset+50, color), {
'color': color,
'layer': 'Träger'
})
def test_basic_graphics(drawing, tmpdir):
add_line_entities(drawing.modelspace(), 0)
add_line_entities(drawing.layout(), 70)
filename = str(tmpdir.join('basic_graphics_%s.dxf' % drawing.dxfversion))
try:
drawing.saveas(filename)
except ezdxf.DXFError as e:
pytest.fail("DXFError: {0} for DXF version {1}".format(str(e), drawing.dxfversion))
assert os.path.exists(filename) is True
```
#### File: ezdxf/integration_tests/test_leica_disto_r12.py
```python
import os
import pytest
import ezdxf
BASEDIR = os.path.dirname(__file__)
DATADIR = 'data'
@pytest.fixture(params=['Leica_Disto_S910.dxf'])
def filename(request):
filename = os.path.join(BASEDIR, DATADIR, request.param)
if not os.path.exists(filename):
pytest.skip(f'File {filename} not found.')
return filename
def test_leica_disto_r12(filename):
doc = ezdxf.readfile(filename)
msp = doc.modelspace()
points = list(msp.query('POINT'))
assert len(points) == 11
assert len(points[0].dxf.location) == 3
```
#### File: ezdxf/integration_tests/test_load_dxf_unicode_notation.py
```python
import os
import pytest
from ezdxf import recover
BASEDIR = os.path.dirname(__file__)
DATADIR = 'data'
@pytest.fixture(params=['ASCII_R12.dxf'])
def filename(request):
filename = os.path.join(BASEDIR, DATADIR, request.param)
if not os.path.exists(filename):
pytest.skip(f'File {filename} not found.')
return filename
def test_load_special_dxf_unicode_notation(filename):
doc, auditor = recover.readfile(filename)
layer = doc.layers.get('ΛΑΓΕΡÄÜÖ')
assert layer.dxf.name == 'ΛΑΓΕΡÄÜÖ'
msp = doc.modelspace()
lines = msp.query('LINE[layer=="ΛΑΓΕΡÄÜÖ"]')
assert len(lines) == 2
```
#### File: ezdxf/integration_tests/test_read_write_modern_entites.py
```python
import pytest
import os
import ezdxf
from ezdxf.lldxf.const import LATEST_DXF_VERSION
def test_lwpolyline(tmpdir):
dwg = ezdxf.new(LATEST_DXF_VERSION)
msp = dwg.modelspace()
# point format = (x, y, [start_width, [end_width, [bulge]]])
points = [(0, 0, 0, .05), (3, 0, .1, .2, -.5), (6, 0, .1, .05), (9, 0)]
msp.add_lwpolyline(points)
filename = str(tmpdir.join('lwpolyline.dxf'))
try:
dwg.saveas(filename)
except ezdxf.DXFError as e:
pytest.fail("DXFError: {0} for DXF version {1}".format(str(e), dwg.dxfversion))
assert os.path.exists(filename)
del dwg
dwg = ezdxf.readfile(filename)
msp = dwg.modelspace()
lwpolyline = msp.query('LWPOLYLINE')[0]
assert len(lwpolyline) == 4
pts = lwpolyline.get_points()
assert pts[0] == (0, 0, 0, .05, 0)
assert pts[1] == (3, 0, .1, .2, -.5)
assert pts[2] == (6, 0, .1, .05, 0)
assert pts[3] == (9, 0, 0, 0, 0)
```
#### File: issues/20160305 convert R8-R13-R14/convert_R13_to_R2000.py
```python
import glob
import os
import ezdxf
R13_DIR = r"D:\Source\dxftest\R13_test_files"
def convert_dxf_file(dxfin, dxfout):
print("Opening %s" % dxfin)
dwg = ezdxf.readfile(dxfin)
dwg.saveas(dxfout)
print("Ready.")
def main():
for filename in glob.glob(os.path.join(R13_DIR, '*.dxf')):
folder, name = os.path.split(filename)
convert_dxf_file(filename, os.path.join(R13_DIR, 'converted_to_R2000', name))
if __name__ == '__main__':
main()
```
#### File: issues/20160522 #20 constraints/vertex-debug.py
```python
import ezdxf
import sys
def main():
print 'Python version is: '+(sys.version)
print 'ezdxf version is: ' + ezdxf.__version__
print '\nCoordinate for layer 0'
find_coordinates(filename='test.dxf',layer_name='0')
print '\nCoordinate for layer 1'
find_coordinates(filename='test.dxf',layer_name='Layer 1')
print '\nCoordinate for layer 2'
find_coordinates(filename='test.dxf',layer_name='Layer 2')
def find_coordinates(filename='test.dxf', layer_name='0'):
dwg_dxf = ezdxf.readfile(filename)
for e in dwg_dxf.entities:
if layer_name in e.get_dxf_attrib(key='layer') and e.dxftype() == 'LWPOLYLINE':
polygon_points = []
for i in e.get_rstrip_points():
polygon_points.append(i)
print polygon_points
if __name__ == '__main__':
main()
```
#### File: issues/20170328 #12/query_issue.py
```python
import ezdxf
import random
def get_random_point():
"""Creates random x, y coordinates."""
x = random.randint(-100, 100)
y = random.randint(-100, 100)
return x, y
dwg = ezdxf.new('R2010')
flag = dwg.blocks.new(name='FLAG')
flag.add_polyline2d([(0, 0), (0, 5), (4, 3), (0, 3)])
flag.add_circle((0, 0), .4, dxfattribs={'color': 2})
msp = dwg.modelspace()
for _ in range(50):
msp.add_blockref(name='FLAG', insert=get_random_point())
q = msp.query('INSERT')
entity = q[0]
query = ezdxf.query.new()
query.extend([entity])
```
#### File: ezdxf/profiling/read_big_R12_files.py
```python
import time
from pathlib import Path
DIR = Path(r'D:\Source\dxftest\CADKitSamples')
_3D_MODEL = DIR / 'fanuc-430-arm.dxf'
_2D_PLAN = DIR / 'AEC Plan Elev Sample.dxf'
def load_3D_model():
import ezdxf
ezdxf.readfile(filename=_3D_MODEL)
def iter_3D_model():
import ezdxf
doc = ezdxf.readfile(filename=_3D_MODEL)
msp = doc.modelspace()
count = 0
for e in msp:
e.dxftype()
count += 1
print(f'Iterated {count} entities in modelspace (fanuc-430-arm.dxf).')
del doc
def single_pass_iter_3D_model():
from ezdxf.addons.iterdxf import single_pass_modelspace
count = 0
for e in single_pass_modelspace(open(_3D_MODEL, 'rb')):
e.dxftype()
count += 1
print(f'Iterated {count} entities in modelspace (fanuc-430-arm.dxf).')
def from_disk_iter_3D_model():
from ezdxf.addons.iterdxf import opendxf
count = 0
doc = opendxf(_3D_MODEL)
for e in doc.modelspace():
e.dxftype()
count += 1
doc.close()
print(f'Iterated {count} entities in modelspace (fanuc-430-arm.dxf).')
def load_2D_plan():
import ezdxf
ezdxf.readfile(_2D_PLAN)
def iter_2D_plan():
import ezdxf
doc = ezdxf.readfile(_2D_PLAN)
msp = doc.modelspace()
count = 0
for e in msp:
e.dxftype()
count += 1
print(f'Iterated {count} entities in modelspace (AEC Plan Elev Sample.dxf).')
del doc
def print_result(time, text):
print(f'Operation: {text} takes {time:.2f} s\n')
def run(func):
start = time.perf_counter()
func()
end = time.perf_counter()
return end - start
if __name__ == '__main__':
print_result(run(load_3D_model), 'ezdxf.readfile() - load "faunc-430-arm.dxf"')
print_result(run(iter_3D_model), 'ezdxf.readfile() - iteration "faunc-430-arm.dxf"')
print_result(run(single_pass_iter_3D_model), 'iterdxf.single_pass_modelspace() - single pass iteration from disk "faunc-430-arm.dxf"')
print_result(run(from_disk_iter_3D_model), 'iterdxf.opendxf() - seekable file iteration from disk "faunc-430-arm.dxf"')
print_result(run(load_2D_plan), 'ezdxf.readfile() - load "AEC Plan Elev Sample.dxf"')
print_result(run(iter_2D_plan), 'ezdxf.readfile() - iter "AEC Plan Elev Sample.dxf"')
```
#### File: ezdxf/profiling/reading_samples.py
```python
import ezdxf
import os
from collections import Counter
import time
from ezdxf import EZDXF_TEST_FILES
CADKIT = "CADKitSamples"
CADKIT_FILES = [
"A_000217.dxf", # 0
"AEC Plan Elev Sample.dxf", # 1
"backhoe.dxf", # 2
"BIKE.DXF", # 3
"cnc machine.dxf", # 4
"Controller-M128-top.dxf", # 5
"drilling_machine.dxf", # 6
"fanuc-430-arm.dxf", # 7
"Floor plan.dxf", # 8
"gekko.DXF", # 9
"house design for two family with comman staircasedwg.dxf", # 10
"house design.dxf", # 11
"kit-dev-coldfire-xilinx_5213.dxf", # 12
"Laurana50k.dxf", # 13
"Lock-Off.dxf", # 14
"Mc Cormik-D3262.DXF", # 15
"Mechanical Sample.dxf", # 16
"Nikon_D90_Camera.DXF", # 17
"pic_programmer.dxf", # 18
"Proposed Townhouse.dxf", # 19
"Shapefont.dxf", # 20
"SMA-Controller.dxf", # 21
"Tamiya TT-01.DXF", # 22
"torso_uniform.dxf", # 23
"Tyrannosaurus.DXF", # 24
"WOOD DETAILS.dxf", # 25
]
STD_FILES = [
CADKIT_FILES[1],
CADKIT_FILES[23],
]
def count_entities(msp):
counter = Counter()
for entity in msp:
counter[entity.dxftype()] += 1
return counter
for _name in CADKIT_FILES:
filename = os.path.join(EZDXF_TEST_FILES, CADKIT, _name)
print(f'reading file: {filename}')
start_reading = time.perf_counter()
doc = ezdxf.readfile(filename)
msp = doc.modelspace()
new_entities = count_entities(msp)
new_count = len(msp)
new_timing = time.perf_counter() - start_reading
print(f'loaded {new_count} entities in {new_timing:.3f} sec')
```
#### File: addons/drawing/debug_utils.py
```python
from typing import List
from ezdxf.addons.drawing.backend import Backend
from ezdxf.addons.drawing.type_hints import Color
from ezdxf.math import Vec3
def draw_rect(points: List[Vec3], color: Color, out: Backend):
from ezdxf.addons.drawing import Properties
props = Properties()
props.color = color
for a, b in zip(points, points[1:]):
out.draw_line(a, b, props)
```
#### File: addons/drawing/utils.py
```python
from typing import List
from ezdxf.math import Vec3
def get_tri_or_quad_points(solid, adjust_order=True) -> List[Vec3]:
d = solid.dxf
vertices: List[Vec3] = [d.vtx0, d.vtx1, d.vtx2]
if d.vtx3 != d.vtx2: # when the face is a triangle, vtx2 == vtx3
vertices.append(d.vtx3)
# adjust weird vertex order of SOLID and TRACE but not 3DFACE:
# 0, 1, 2, 3 -> 0, 1, 3, 2
if adjust_order and len(vertices) > 3:
vertices[2], vertices[3] = vertices[3], vertices[2]
if not vertices[0].isclose(vertices[-1]):
vertices.append(vertices[0])
return vertices
```
#### File: addons/dwg/fileheader.py
```python
import struct
from .const import *
from .crc import crc8
codepage_to_encoding = {
37: 'cp874', # Thai,
38: 'cp932', # Japanese
39: 'gbk', # UnifiedChinese
40: 'cp949', # Korean
41: 'cp950', # TradChinese
28: 'cp1250', # CentralEurope
29: 'cp1251', # Cyrillic
30: 'cp1252', # WesternEurope
32: 'cp1253', # Greek
33: 'cp1254', # Turkish
34: 'cp1255', # Hebrew
35: 'cp1256', # Arabic
36: 'cp1257', # Baltic
}
FILE_HEADER_MAGIC = {
3: 0xa598,
4: 0x8101,
5: 0x3cc4,
6: 0x8461,
}
class FileHeader:
def __init__(self, data: Bytes, crc_check=False):
self.crc_check = crc_check
if len(data) < 6:
raise DwgVersionError('Not a DWG file.')
ver = data[:6].decode(errors='ignore')
if ver not in SUPPORTED_VERSIONS:
raise DwgVersionError(f'Not a DWG file or unsupported DWG version, signature: {ver}.')
self.version: str = ver
codepage: int = struct.unpack_from('<h', data, 0x13)[0]
self.encoding = codepage_to_encoding.get(codepage, 'cp1252')
self.maintenance_release_version = data[0xB]
self.sections = dict()
if self.version <= ACAD_2000:
self.r2000_header(data)
else:
raise DwgVersionError(self.version)
def r2000_header(self, data: Bytes):
index = 0x15
section_count: int = struct.unpack_from('<L', data, index)[0]
index += 4
fmt = '<BLL'
record_size = struct.calcsize(fmt)
for record in range(section_count):
# 0: HEADER_ID
# 1: CLASSES_ID
# 2: OBJECTS_ID
num, seeker, size = struct.unpack_from(fmt, data, index)
index += record_size
self.sections[num] = (seeker, size)
if self.crc_check:
# CRC from first byte of file until start of crc value
check = crc8(data[:index], seed=0) ^ FILE_HEADER_MAGIC[len(self.sections)]
crc = struct.unpack_from('<H', data, index)[0]
if crc != check:
raise CRCError('CRC error in file header.')
index += 2
sentinel = data[index: index + SENTINEL_SIZE]
if sentinel != b'\x95\xA0\x4E\x28\x99\x82\x1A\xE5\x5E\x41\xE0\x5F\x9D\x3A\x4D\x00':
raise DwgCorruptedFileHeader('Corrupted DXF R13/14/2000 file header.')
def print(self):
print(f'DWG version: {self.version}')
print(f'encoding: {self.encoding}')
print(f'Records: {len(self.sections)}')
print('Header: seeker {0[0]} size: {0[1]}'.format(self.sections[0]))
print('Classes: seeker {0[0]} size: {0[1]}'.format(self.sections[1]))
print('Objects: seeker {0[0]} size: {0[1]}'.format(self.sections[2]))
```
#### File: addons/dwg/header_section.py
```python
from typing import Dict, Any, List, Tuple
from abc import abstractmethod
import struct
from ezdxf.lldxf.const import acad_release_to_dxf_version
from ezdxf.tools.binarydata import BitStream
from .const import *
from .crc import crc8
from .fileheader import FileHeader
def load_header_section(specs: FileHeader, data: Bytes, crc_check=False):
if specs.version <= ACAD_2000:
return DwgHeaderSectionR2000(specs, data, crc_check)
else:
return DwgHeaderSectionR2004(specs, data, crc_check)
class DwgSectionLoader:
def __init__(self, specs: FileHeader, data: Bytes, crc_check=False):
self.specs = specs
self.crc_check = crc_check
self.data = self.load_data_section(data)
@abstractmethod
def load_data_section(self, data: Bytes) -> Bytes:
...
class DwgHeaderSectionR2000(DwgSectionLoader):
def load_data_section(self, data: Bytes) -> Bytes:
if self.specs.version > ACAD_2000:
raise DwgVersionError(self.specs.version)
seeker, section_size = self.specs.sections[HEADER_ID]
return data[seeker:seeker + section_size]
def load_header_vars(self) -> Dict:
data = self.data
sentinel = data[:16]
if sentinel != b'\xCF\x7B\x1F\x23\xFD\xDE\x38\xA9\x5F\x7C\x68\xB8\x4E\x6D\x33\x5F':
raise DwgCorruptedHeaderSection('Sentinel for start of HEADER section not found.')
index = 16
size = struct.unpack_from('<L', data, index)[0]
index += 4
bs = BitStream(data[index: index + size], dxfversion=self.specs.version, encoding=self.specs.encoding)
hdr_vars = parse_header(bs)
index += size
if self.crc_check:
check = struct.unpack_from('<H', data, index)[0]
# CRC of data from end of sentinel until start of crc value
crc = crc8(data[16:-18], seed=0xc0c1)
if check != crc:
raise CRCError('CRC error in header section.')
sentinel = data[-16:]
if sentinel != b'\x30\x84\xE0\xDC\x02\x21\xC7\x56\xA0\x83\x97\x47\xB1\x92\xCC\xA0':
raise DwgCorruptedHeaderSection('Sentinel for end of HEADER section not found.')
return hdr_vars
class DwgHeaderSectionR2004(DwgHeaderSectionR2000):
def load_data(self, data: Bytes) -> Bytes:
raise NotImplementedError()
CMD_SET_VERSION = 'ver'
CMD_SKIP_BITS = 'skip_bits'
CMD_SKIP_NEXT_IF = 'skip_next_if'
CMD_SET_VAR = 'var'
def _min_max_versions(version: str) -> Tuple[str, str]:
min_ver = ACAD_13
max_ver = ACAD_LATEST
if version != 'all':
v = version.split('-')
if len(v) > 1:
min_ver = acad_release_to_dxf_version[v[0].strip()]
max_ver = acad_release_to_dxf_version[v[1].strip()]
else:
v = v[0].strip()
if v[-1] == '+':
min_ver = acad_release_to_dxf_version[v[:-1]]
else:
min_ver = max_ver = acad_release_to_dxf_version[v]
return min_ver, max_ver
def load_commands(desc: str) -> List[Tuple[str, Any]]:
commands = []
lines = desc.split('\n')
for line in lines:
line = line.strip()
if not line or line[0] == '#':
continue
try:
command, param = line.split(':')
except ValueError:
raise ValueError(f'Unpack Error in line: {line}')
command = command.strip()
param = param.split('#')[0].strip()
if command == CMD_SET_VERSION:
commands.append((CMD_SET_VERSION, _min_max_versions(param)))
elif command in {CMD_SKIP_BITS, CMD_SKIP_NEXT_IF}:
commands.append((command, param))
elif command[0] == '$':
commands.append((CMD_SET_VAR, (command, param)))
else:
raise ValueError(f'Unknown command: {command}')
return commands
def parse_bitstream(bs: BitStream, commands: List[Tuple[str, Any]]) -> Dict[str, Any]:
version = bs.dxfversion
min_ver = ACAD_13
max_ver = ACAD_LATEST
hdr_vars = dict()
skip_next_cmd = False
for cmd, params in commands:
if skip_next_cmd:
skip_next_cmd = False
continue
if cmd == CMD_SET_VERSION:
min_ver, max_ver = params
elif cmd == CMD_SKIP_BITS:
bs.skip(int(params))
elif cmd == CMD_SKIP_NEXT_IF:
skip_next_cmd = eval(params, None, {'header': hdr_vars})
elif cmd == CMD_SET_VAR:
if min_ver <= version <= max_ver:
name, code = params
hdr_vars[name] = bs.read_code(code)
else:
raise ValueError(f'Unknown command: {cmd}')
return hdr_vars
def parse_header(bs: BitStream) -> Dict[str, Any]:
commands = load_commands(HEADER_DESCRIPTION)
return parse_bitstream(bs, commands)
HEADER_DESCRIPTION = """
ver: R2007
$SIZE_IN_BITS: RL # Size in bits
ver: R2013+
$REQUIREDVERSIONS: BLL # default value 0, read only
ver: all
$UNKNOWN: BD # Unknown, default value 412148564080.0
$UNKNOWN: BD # Unknown, default value 1.0
$UNKNOWN: BD # Unknown, default value 1.0
$UNKNOWN: BD # Unknown, default value 1.0
$UNKNOWN: TV # Unknown text string, default ""
$UNKNOWN: TV # Unknown text string, default ""
$UNKNOWN: TV # Unknown text string, default ""
$UNKNOWN: TV # Unknown text string, default ""
$UNKNOWN: BL # Unknown long, default value 24L
$UNKNOWN: BL # Unknown long, default value 0L;
ver: R13-R14
$UNKNOWN: BS # Unknown short, default value 0
ver: R13-R2000
$CURRENT_VIEWPORT_ENTITY_HEADER: H # Handle of the current viewport entity header (hard pointer)
ver: all
$DIMASO: B
$DIMSHO: B
ver: R13-R14
$DIMSAV: B # Undocumented
ver: all
$PLINEGEN: B
$ORTHOMODE: B
$REGENMODE: B
$FILLMODE: B
$QTEXTMODE: B
$PSLTSCALE: B
$LIMCHECK: B
ver: R13-R14
$BLIPMODE: B
ver: R2004+
$UNKNOWN: B # Undocumented
ver: all
$USRTIMER: B # (User timer on/off)
$SKPOLY: B
$ANGDIR: B
$SPLFRAME: B
ver: R13-R14
$ATTREQ: B
$ATTDIA: B
ver: all
$MIRRTEXT: B
$WORLDVIEW: B
ver: R13-R14
$WIREFRAME: B # Undocumented.
ver: all
$TILEMODE: B
$PLIMCHECK: B
$VISRETAIN: B
ver: R13-R14
$DELOBJ: B
ver: all
$DISPSILH: B
$PELLIPSE: B # (not present in DXF)
$PROXYGRAPHICS: BS
ver: R13-R14
$DRAGMODE: BS
ver: all
$TREEDEPTH: BS
$LUNITS: BS
$LUPREC: BS
$AUNITS: BS
$AUPREC: BS
ver: R13-R14
$OSMODE: BS
ver: all
$ATTMODE: BS
ver: R13-R14
$COORDS: BS
ver: all
$PDMODE: BS
ver: R13-R14
$PICKSTYLE: BS
ver: R2004+
$UNKNOWN: BL
$UNKNOWN: BL
$UNKNOWN: BL
ver: all
$USERI1: BS
$USERI2: BS
$USERI3: BS
$USERI4: BS
$USERI5: BS
$SPLINESEGS: BS
$SURFU: BS
$SURFV: BS
$SURFTYPE: BS
$SURFTAB1: BS
$SURFTAB2: BS
$SPLINETYPE: BS
$SHADEDGE: BS
$SHADEDIF: BS
$UNITMODE: BS
$MAXACTVP: BS
$ISOLINES: BS
$CMLJUST: BS
$TEXTQLTY: BS
$LTSCALE: BD
$TEXTSIZE: BD
$TRACEWID: BD
$SKETCHINC: BD
$FILLETRAD: BD
$THICKNESS: BD
$ANGBASE: BD
$PDSIZE: BD
$PLINEWID: BD
$USERR1: BD
$USERR2: BD
$USERR3: BD
$USERR4: BD
$USERR5: BD
$CHAMFERA: BD
$CHAMFERB: BD
$CHAMFERC: BD
$CHAMFERD: BD
$FACETRES: BD
$CMLSCALE: BD
$CELTSCALE: BD
ver: R13-R2004
$MENUNAME: TV
ver: all
$TDCREATE: BL # (Julian day)
$TDCREATE: BL # (Milliseconds into the day)
$TDUPDATE: BL # (Julian day)
$TDUPDATE: BL # (Milliseconds into the day)
ver: R2004+
$UNKNOWN: BL
$UNKNOWN: BL
$UNKNOWN: BL
ver: all
$TDINDWG: BL # (Days)
$TDINDWG: BL # (Milliseconds into the day)
$TDUSRTIMER: BL # (Days)
$TDUSRTIMER: BL # (Milliseconds into the day)
$CECOLOR: CMC
# with an 8-bit length specifier preceding the handle bytes (standard hex handle form) (code 0).
# The HANDSEED is not part of the handle stream, but of the normal data stream (relevant for R21 and later).
$HANDSEED: H # The next handle
$CLAYER: H # (hard pointer)
$TEXTSTYLE: H # (hard pointer)
$CELTYPE: H # (hard pointer)
ver: R2007+
$CMATERIAL: H # (hard pointer)
ver: all
$DIMSTYLE: H # (hard pointer)
$CMLSTYLE: H # (hard pointer)
ver: R2000+
$PSVPSCALE: BD
ver: all
$PINSBASE: 3BD # (PSPACE)
$PEXTMIN: 3BD # (PSPACE)
$PEXTMAX: 3BD # (PSPACE)
$PLIMMIN: 2RD # (PSPACE)
$PLIMMAX: 2RD # (PSPACE)
$PELEVATION: BD # (PSPACE)
$PUCSORG: 3BD # (PSPACE)
$PUCSXDIR: 3BD # (PSPACE)
$PUCSYDIR: 3BD # (PSPACE)
$PUCSNAME: H # (PSPACE) (hard pointer)
ver: R2000+
$PUCSORTHOREF: H # (hard pointer)
$PUCSORTHOVIEW: BS
$PUCSBASE: H # (hard pointer)
$PUCSORGTOP: 3BD
$PUCSORGBOTTOM: 3BD
$PUCSORGLEFT: 3BD
$PUCSORGRIGHT: 3BD
$PUCSORGFRONT: 3BD
$PUCSORGBACK: 3BD
ver: all
$INSBASE: 3BD # (MSPACE)
$EXTMIN: 3BD # (MSPACE)
$EXTMAX: 3BD # (MSPACE)
$LIMMIN: 2RD # (MSPACE)
$LIMMAX: 2RD # (MSPACE)
$ELEVATION: BD # (MSPACE)
$UCSORG: 3BD # (MSPACE)
$UCSXDIR: 3BD # (MSPACE)
$UCSYDIR: 3BD # (MSPACE)
$UCSNAME: H # (MSPACE) (hard pointer)
ver: R2000+
$UCSORTHOREF: H # (hard pointer)
$UCSORTHOVIEW: BS
$UCSBASE: H # (hard pointer)
$UCSORGTOP: 3BD
$UCSORGBOTTOM: 3BD
$UCSORGLEFT: 3BD
$UCSORGRIGHT: 3BD
$UCSORGFRONT: 3BD
$UCSORGBACK: 3BD
$DIMPOST: TV
$DIMAPOST: TV
ver: R13-R14
$DIMTOL: B
$DIMLIM: B
$DIMTIH: B
$DIMTOH: B
$DIMSE1: B
$DIMSE2: B
$DIMALT: B
$DIMTOFL: B
$DIMSAH: B
$DIMTIX: B
$DIMSOXD: B
$DIMALTD: RC
$DIMZIN: RC
$DIMSD1: B
$DIMSD2: B
$DIMTOLJ: RC
$DIMJUST: RC
$DIMFIT: RC
$DIMUPT: B
$DIMTZIN: RC
$DIMALTZ: RC
$DIMALTTZ: RC
$DIMTAD: RC
$DIMUNIT: BS
$DIMAUNIT: BS
$DIMDEC: BS
$DIMTDEC: BS
$DIMALTU: BS
$DIMALTTD: BS
$DIMTXSTY: H # (hard pointer)
ver: all
$DIMSCALE: BD
$DIMASZ: BD
$DIMEXO: BD
$DIMDLI: BD
$DIMEXE: BD
$DIMRND: BD
$DIMDLE: BD
$DIMTP: BD
$DIMTM: BD
ver: R2007+
$DIMFXL: BD
$DIMJOGANG: BD
$DIMTFILL: BS
$DIMTFILLCLR: CMC
ver: R2000+
$DIMTOL: B
$DIMLIM: B
$DIMTIH: B
$DIMTOH: B
$DIMSE1: B
$DIMSE2: B
$DIMTAD: BS
$DIMZIN: BS
$DIMAZIN: BS
ver: R2007+
$DIMARCSYM: BS
ver: all
$DIMTXT: BD
$DIMCEN: BD
$DIMTSZ: BD
$DIMALTF: BD
$DIMLFAC: BD
$DIMTVP: BD
$DIMTFAC: BD
$DIMGAP: BD
ver: R13-R14
$DIMPOST: T
$DIMAPOST: T
$DIMBLK: T
$DIMBLK1: T
$DIMBLK2: T
ver: R2000+
$DIMALTRND: BD
$DIMALT: B
$DIMALTD: BS
$DIMTOFL: B
$DIMSAH: B
$DIMTIX: B
$DIMSOXD: B
ver: all
$DIMCLRD: CMC
$DIMCLRE: CMC
$DIMCLRT: CMC
ver: R2000+
$DIMADEC: BS
$DIMDEC: BS
$DIMTDEC: BS
$DIMALTU: BS
$DIMALTTD: BS
$DIMAUNIT: BS
$DIMFRAC: BS
$DIMLUNIT: BS
$DIMDSEP: BS
$DIMTMOVE: BS
$DIMJUST: BS
$DIMSD1: B
$DIMSD2: B
$DIMTOLJ: BS
$DIMTZIN: BS
$DIMALTZ: BS
$DIMALTTZ: BS
$DIMUPT: B
$DIMATFIT: BS
ver: R2007+
$DIMFXLON: B
ver: R2010+
$DIMTXTDIRECTION: B
$DIMALTMZF: BD
$DIMALTMZS: T
$DIMMZF: BD
$DIMMZS: T
ver: R2000+
$DIMTXSTY: H # (hard pointer)
$DIMLDRBLK: H # (hard pointer)
$DIMBLK: H # (hard pointer)
$DIMBLK1: H # (hard pointer)
$DIMBLK2: H # (hard pointer)
ver: R2007+
$DIMLTYPE: H # (hard pointer)
$DIMLTEX1: H # (hard pointer)
$DIMLTEX2: H # (hard pointer)
ver: R2000+
$DIMLWD: BS
$DIMLWE: BS
ver: all
$BLOCK_CONTROL_OBJECT: H # (hard owner) Block Record Table
$LAYER_CONTROL_OBJECT: H # (hard owner) Layer Table
$STYLE_CONTROL_OBJECT: H # (hard owner) Style Table
$LINETYPE_CONTROL_OBJECT: H # (hard owner) Linetype Table
$VIEW_CONTROL_OBJECT: H # (hard owner) View table
$UCS_CONTROL_OBJECT: H # (hard owner) UCS Table
$VPORT_CONTROL_OBJECT: H # (hard owner) Viewport table
$APPID_CONTROL_OBJECT: H # (hard owner) AppID Table
$DIMSTYLE_CONTROL_OBJECT: H # (hard owner) Dimstyle Table
ver: R13-R2000
$VIEWPORT_ENTITY_HEADER_CONTROL_OBJECT: H # (hard owner)
ver: all
$ACAD_GROUP_DICTIONARY: H # (hard pointer)
$ACAD_MLINESTYLE_DICTIONARY: H # (hard pointer)
$ROOT_DICTIONARY: H # (NAMED OBJECTS) (hard owner)
ver: R2000+
$TSTACKALIGN: BS # default = 1 (not present in DXF)
$TSTACKSIZE: BS # default = 70 (not present in DXF)
$HYPERLINKBASE: TV
$STYLESHEET: TV
$LAYOUTS_DICTIONARY: H # (hard pointer)
$PLOTSETTINGS_DICTIONARY: H # (hard pointer)
$PLOTSTYLES_DICTIONARY: H # (hard pointer)
ver: R2004+
$MATERIALS_DICTIONARY: H # (hard pointer)
$COLORS_DICTIONARY: H # (hard pointer)
ver: R2007+
$VISUALSTYLE_DICTIONARY: H # (hard pointer)
ver: R2013+
$UNKNOWN: H # (hard pointer)
ver: R2000+
$R2000_PLUS_FLAGS: BL
# CELWEIGHT Flags & 0x001F
# ENDCAPS Flags & 0x0060
# JOINSTYLE Flags & 0x0180
# LWDISPLAY !(Flags & 0x0200)
# XEDIT !(Flags & 0x0400)
# EXTNAMES Flags & 0x0800
# PSTYLEMODE Flags & 0x2000
# OLESTARTUP Flags & 0x4000
$INSUNITS: BS
$CEPSNTYPE: BS
skip_next_if: header['$CEPSNTYPE'] != 3
$CPSNID: H # (present only if CEPSNTYPE == 3) (hard pointer)
$FINGERPRINTGUID: TV
$VERSIONGUID: TV
ver: R2004+
$SORTENTS: RC
$INDEXCTL: RC
$HIDETEXT: RC
$XCLIPFRAME: RC # before R2010 the value can be 0 or 1 only.
$DIMASSOC: RC
$HALOGAP: RC
$OBSCUREDCOLOR: BS
$INTERSECTIONCOLOR: BS
$OBSCUREDLTYPE: RC
$INTERSECTIONDISPLAY: RC
$PROJECTNAME: TV
ver: all
$PAPER_SPACE_BLOCK_RECORD: H # (hard pointer)
$MODEL_SPACE_BLOCK_RECORD: H # (hard pointer)
$BYLAYER_LTYPE: H # (hard pointer)
$BYBLOCK_LTYPE: H # (hard pointer)
$CONTINUOUS_LTYPE: H # (hard pointer)
ver: R2007+
$CAMERADISPLAY: B
$UNKNOWN: BL
$UNKNOWN: BL
$UNKNOWN: BD
$STEPSPERSEC: BD
$STEPSIZE: BD
$3DDWFPREC: BD
$LENSLENGTH: BD
$CAMERAHEIGHT: BD
$SOLIDHIST: RC
$SHOWHIST: RC
$PSOLWIDTH: BD
$PSOLHEIGHT: BD
$LOFTANG1: BD
$LOFTANG2: BD
$LOFTMAG1: BD
$LOFTMAG2: BD
$LOFTPARAM: BS
$LOFTNORMALS: RC
$LATITUDE: BD
$LONGITUDE: BD
$NORTHDIRECTION: BD
$TIMEZONE: BL
$LIGHTGLYPHDISPLAY: RC
$TILEMODELIGHTSYNCH: RC
$DWFFRAME: RC
$DGNFRAME: RC
$UNKNOWN: B
$INTERFERECOLOR: CMC
$INTERFEREOBJVS: H # (hard pointer)
$INTERFEREVPVS: H # (hard pointer)
$CSHADOW: RC
$UNKNOWN: BD
ver: R14+
$UNKNOWN: BS # short (type 5/6 only) these do not seem to be required,
$UNKNOWN: BS # short (type 5/6 only) even for type 5.
$UNKNOWN: BS # short (type 5/6 only)
$UNKNOWN: BS # short (type 5/6 only)
"""
```
#### File: src/ezdxf/commands.py
```python
from typing import Callable, Optional, Dict
import abc
import sys
import os
import glob
import signal
import logging
from pathlib import Path
from ezdxf import recover
from ezdxf.lldxf import const
from ezdxf.lldxf.validator import is_dxf_file
__all__ = ['get', 'add_parsers']
logger = logging.getLogger('ezdxf')
def get(cmd: str) -> Optional[Callable]:
cls = _commands.get(cmd)
if cls:
return cls.run
return None
def add_parsers(subparsers) -> None:
for cmd in _commands.values(): # in order of registration
cmd.add_parser(subparsers)
class Command:
""" abstract base class for launcher commands """
NAME = "command"
@staticmethod
@abc.abstractmethod
def add_parser(subparsers) -> None:
pass
@staticmethod
@abc.abstractmethod
def run(args) -> None:
pass
_commands: Dict[str, Command] = dict()
def register(cls: Command):
""" Register a launcher sub-command. """
_commands[cls.NAME] = cls
return cls
@register
class PrettyPrint(Command):
""" Launcher sub-command: pp """
NAME = 'pp'
@staticmethod
def add_parser(subparsers):
parser = subparsers.add_parser(
PrettyPrint.NAME,
help="pretty print DXF files as HTML file"
)
parser.add_argument(
'files',
metavar='FILE',
nargs='+',
help='DXF files pretty print',
)
parser.add_argument(
'-o', '--open',
action='store_true',
help='open generated HTML file by the default web browser',
)
parser.add_argument(
'-r', '--raw',
action='store_true',
help='raw mode, no DXF structure interpretation',
)
parser.add_argument(
'-x', '--nocompile',
action='store_true',
help="don't compile points coordinates into single tags "
"(only in raw mode)",
)
parser.add_argument(
'-l', '--legacy',
action='store_true',
help="legacy mode, reorder DXF point coordinates",
)
parser.add_argument(
'-s', '--sections',
action='store',
default='hctbeo',
help="choose sections to include and their order, h=HEADER, c=CLASSES, "
"t=TABLES, b=BLOCKS, e=ENTITIES, o=OBJECTS",
)
@staticmethod
def run(args):
from ezdxf.pp import run
run(args)
@register
class Audit(Command):
""" Launcher sub-command: audit """
NAME = 'audit'
@staticmethod
def add_parser(subparsers):
parser = subparsers.add_parser(
Audit.NAME,
help="audit and repair DXF files"
)
parser.add_argument(
'files',
metavar='FILE',
nargs='+',
help='audit DXF files',
)
parser.add_argument(
'-s', '--save',
action='store_true',
help="save recovered files with extension \".rec.dxf\" "
)
@staticmethod
def run(args):
def build_outname(name: str) -> str:
p = Path(name)
return p.parent / (p.stem + ".rec.dxf")
def log_fixes(auditor):
for error in auditor.fixes:
logger.info('fixed:' + error.message)
def log_errors(auditor):
for error in auditor.errors:
logger.error(error.message)
def _audit(filename: str) -> None:
msg = f"auditing file: {filename}"
print(msg)
logger.info(msg)
try:
doc, auditor = recover.readfile(filename)
except IOError:
msg = 'Not a DXF file or a generic I/O error.'
print(msg)
logger.error(msg)
return # keep on processing additional files
except const.DXFStructureError:
msg = 'Invalid or corrupted DXF file.'
print(msg)
logger.error(msg)
return # keep on processing additional files
if auditor.has_errors:
auditor.print_error_report()
log_errors(auditor)
if auditor.has_fixes:
auditor.print_fixed_errors()
log_fixes(auditor)
if auditor.has_errors is False and auditor.has_fixes is False:
print('No errors found.')
else:
print(f'Found {len(auditor.errors)} errors, '
f'applied {len(auditor.fixes)} fixes')
if args.save:
outname = build_outname(filename)
doc.saveas(outname)
print(f"Saved recovered file as: {outname}")
for pattern in args.files:
names = list(glob.glob(pattern))
if len(names) == 0:
msg = f"File(s) '{pattern}' not found."
print(msg)
logger.error(msg)
continue
for filename in names:
if not os.path.exists(filename):
msg = f"File '{filename}' not found."
print(msg)
logger.error(msg)
continue
if not is_dxf_file(filename):
msg = f"File '{filename}' is not a DXF file."
print(msg)
logger.error(msg)
continue
_audit(filename)
def load_document(filename: str):
try:
doc, auditor = recover.readfile(filename)
except IOError:
msg = f'Not a DXF file or a generic I/O error: {filename}'
print(msg)
logger.error(msg)
sys.exit(2)
except const.DXFStructureError:
msg = f'Invalid or corrupted DXF file: {filename}'
print(msg)
logger.error(msg)
sys.exit(3)
if auditor.has_errors:
# But is most likely good enough for rendering.
msg = f'Found {len(auditor.errors)} unrecoverable errors.'
print(msg)
logger.error(msg)
if auditor.has_fixes:
msg = f'Fixed {len(auditor.fixes)} errors.'
print(msg)
logger.info(msg)
return doc, auditor
@register
class Draw(Command):
""" Launcher sub-command: draw """
NAME = 'draw'
@staticmethod
def add_parser(subparsers):
parser = subparsers.add_parser(
Draw.NAME,
help="draw and convert DXF files by Matplotlib"
)
parser.add_argument(
'file',
metavar='FILE',
nargs='?',
help='DXF file to view or convert',
)
parser.add_argument(
'--formats',
action='store_true',
help="show all supported export formats and exit"
)
parser.add_argument(
'-o', '--out',
required=False,
help="output filename for export"
)
parser.add_argument(
'--dpi',
type=int,
default=300,
help="target render resolution, default is 300",
)
parser.add_argument(
'--ltype',
default='internal',
choices=['internal', 'ezdxf'],
help="select the line type rendering engine, default is internal",
)
@staticmethod
def run(args):
# Import on demand for a quicker startup:
try:
import matplotlib.pyplot as plt
except ImportError:
print('Matplotlib package not found.')
sys.exit(2)
from ezdxf.addons.drawing import RenderContext, Frontend
from ezdxf.addons.drawing.matplotlib import MatplotlibBackend
# Print all supported export formats:
if args.formats:
fig = plt.figure()
for extension, description in fig.canvas.get_supported_filetypes().items():
print(f'{extension}: {description}')
sys.exit(0)
if args.file:
filename = args.file
else:
print('argument FILE is required')
sys.exit(1)
doc, _ = load_document(filename)
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ctx = RenderContext(doc)
out = MatplotlibBackend(ax, params={'linetype_renderer': args.ltype})
Frontend(ctx, out).draw_layout(doc.modelspace(), finalize=True)
if args.out is not None:
print(f'exporting to "{args.out}"')
fig.savefig(args.out, dpi=args.dpi)
plt.close(fig)
else:
plt.show()
@register
class View(Command):
""" Launcher sub-command: view """
NAME = 'view'
@staticmethod
def add_parser(subparsers):
parser = subparsers.add_parser(
View.NAME,
help="view DXF files by the PyQt viewer"
)
parser.add_argument(
'file',
metavar='FILE',
nargs='?',
help='DXF file to view',
)
parser.add_argument(
'--ltype',
default='internal',
choices=['internal', 'ezdxf'],
help="select the line type rendering engine, default is internal",
)
# disable lineweight at all by default:
parser.add_argument(
'--lwscale',
type=float,
default=0,
help="set custom line weight scaling, default is 0 to disable "
"line weights at all",
)
@staticmethod
def run(args):
# Import on demand for a quicker startup:
try:
from PyQt5 import QtWidgets
except ImportError:
print('PyQt5 package not found.')
sys.exit(1)
from ezdxf.addons.drawing.qtviewer import CadViewer
signal.signal(signal.SIGINT, signal.SIG_DFL) # handle Ctrl+C properly
app = QtWidgets.QApplication(sys.argv)
set_app_icon(app)
viewer = CadViewer(params={
'linetype_renderer': args.ltype,
'lineweight_scaling': args.lwscale,
})
filename = args.file
if filename:
doc, auditor = load_document(filename)
viewer.set_document(doc, auditor)
viewer.draw_layout('Model')
sys.exit(app.exec_())
def set_app_icon(app):
from PyQt5 import QtGui, QtCore
app_icon = QtGui.QIcon()
p = resources_path()
app_icon.addFile(str(p / '16x16.png'), QtCore.QSize(16, 16))
app_icon.addFile(str(p / '24x24.png'), QtCore.QSize(24, 24))
app_icon.addFile(str(p / '32x32.png'), QtCore.QSize(32, 32))
app_icon.addFile(str(p / '48x48.png'), QtCore.QSize(48, 48))
app_icon.addFile(str(p / '64x64.png'), QtCore.QSize(64, 64))
app_icon.addFile(str(p / '256x256.png'), QtCore.QSize(256, 256))
app.setWindowIcon(app_icon)
def resources_path():
from pathlib import Path
return Path(__file__).parent / "resources"
```
#### File: ezdxf/entities/attrib.py
```python
from typing import TYPE_CHECKING, Optional, Union
import copy
from ezdxf.lldxf import validator
from ezdxf.math import NULLVEC
from ezdxf.lldxf.attributes import (
DXFAttr, DXFAttributes, DefSubclass, XType, RETURN_DEFAULT,
group_code_mapping,
)
from ezdxf.lldxf.const import DXF12, SUBCLASS_MARKER, DXF2010
from ezdxf.lldxf import const
from ezdxf.tools import set_flag_state
from .dxfentity import base_class, SubclassProcessor
from .dxfgfx import acdb_entity, elevation_to_z_axis
from .text import Text, acdb_text, acdb_text_group_codes
from .factory import register_entity
if TYPE_CHECKING:
from ezdxf.eztypes import TagWriter, Tags, DXFNamespace, DXFEntity
__all__ = ['AttDef', 'Attrib', 'copy_attrib_as_text']
# DXF Reference for ATTRIB is a total mess and incorrect, the AcDbText subclass
# for the ATTRIB entity is the same as for the TEXT entity, but the valign field
# from the 2nd AcDbText subclass of the TEXT entity is stored in the
# AcDbAttribute subclass:
attrib_fields = {
# Version number: 0 = 2010
'version': DXFAttr(280, default=0, dxfversion=DXF2010),
# Tag string (cannot contain spaces):
'tag': DXFAttr(
2, default='',
validator=validator.is_valid_attrib_tag,
fixer=validator.fix_attrib_tag,
),
# 1 = Attribute is invisible (does not appear)
# 2 = This is a constant attribute
# 4 = Verification is required on input of this attribute
# 8 = Attribute is preset (no prompt during insertion)
'flags': DXFAttr(70, default=0),
# Field length (optional) (not currently used)
'field_length': DXFAttr(73, default=0, optional=True),
# Vertical text justification type (optional); see group code 73 in TEXT
'valign': DXFAttr(
74, default=0, optional=True,
validator=validator.is_in_integer_range(0, 4),
fixer=RETURN_DEFAULT,
),
# Lock position flag. Locks the position of the attribute within the block
# reference, example of double use of group codes in one sub class
'lock_position': DXFAttr(
280, default=0, dxfversion=DXF2010, optional=True,
validator=validator.is_integer_bool,
fixer=RETURN_DEFAULT,
),
}
# ATTDEF has an additional field: 'prompt'
# DXF attribute definitions are immutable, a shallow copy is sufficient:
attdef_fields = dict(attrib_fields)
attdef_fields['prompt'] = DXFAttr(
3, default='',
validator=validator.is_valid_one_line_text,
fixer=validator.fix_one_line_text,
)
acdb_attdef = DefSubclass('AcDbAttributeDefinition', attdef_fields)
acdb_attdef_group_codes = group_code_mapping(acdb_attdef)
acdb_attrib = DefSubclass('AcDbAttribute', attrib_fields)
acdb_attrib_group_codes = group_code_mapping(acdb_attrib)
# For XRECORD the tag order is important and group codes appear multiple times,
# therefore this attribute definition needs a special treatment!
acdb_attdef_xrecord = DefSubclass('AcDbXrecord', [
# Duplicate record cloning flag (determines how to merge duplicate entries):
# 1 = Keep existing
('cloning', DXFAttr(280, default=1)),
# MText flag:
# 2 = multiline attribute
# 4 = constant multiline attribute definition
('mtext_flag', DXFAttr(70, default=0)),
# isReallyLocked flag:
# 0 = unlocked
# 1 = locked
('really_locked', DXFAttr(
70, default=0,
validator=validator.is_integer_bool,
fixer=RETURN_DEFAULT,
)),
# Number of secondary attributes or attribute definitions:
('secondary_attribs_count', DXFAttr(70, default=0)),
# Hard-pointer id of secondary attribute(s) or attribute definition(s):
('secondary_attribs_handle', DXFAttr(70, default=0)),
# Alignment point of attribute or attribute definition:
('align_point', DXFAttr(10, xtype=XType.point3d, default=NULLVEC)),
('current_annotation_scale', DXFAttr(40, default=0)),
# attribute or attribute definition tag string
('tag', DXFAttr(
2, default='',
validator=validator.is_valid_attrib_tag,
fixer=validator.fix_attrib_tag,
)),
])
# A special MTEXT entity can follow the ATTDEF and ATTRIB entity, which starts
# as a usual DXF entity with (0, 'MTEXT'), so processing can't be done here,
# because for ezdxf is this a separated Entity.
#
# The attached MTEXT entity: owner is None and handle is None
# Linked as attribute `attached_mtext`.
# I don't have seen this combination of entities in real world examples and is
# ignored by ezdxf for now.
# Attrib and Attdef can have embedded MTEXT entities located in the
# <Embedded Object> subclass, see issue #258
# QUESTION: How are the attached MTEXT and the embedded MTEXT related?
class BaseAttrib(Text):
XRECORD_DEF = acdb_attdef_xrecord
def __init__(self):
""" Default constructor """
super().__init__()
# TODO: implement embedded MTEXT support
# remove DXFEntity.embedded_objects if done
self.xrecord: Optional['Tags'] = None
self.attached_mtext: Optional['DXFEntity'] = None
def _copy_data(self, entity: 'BaseAttrib') -> None:
""" Copy entity data, xrecord data and attached MTEXT are not stored
in the entity database.
"""
entity.xrecord = copy.deepcopy(self.xrecord)
if self.attached_mtext:
entity.attached_mtext = self.attached_mtext.copy()
# attached mtext entity is not stored in the entity database
# no further action required
def link_entity(self, entity: 'DXFEntity'):
self.attached_mtext = entity
def export_dxf(self, tagwriter: 'TagWriter'):
""" Export attached MTEXT entity. """
super().export_dxf(tagwriter)
if self.attached_mtext:
# todo: export MTEXT attached to ATTRIB
# Attached MTEXT has no handle and owner and can not exported
# by the usual export process:
# self.attached_mtext.export_dxf(tagwriter)
raise NotImplemented('Attached MTEXT export')
@property
def is_const(self) -> bool:
""" This is a constant attribute. """
return bool(self.dxf.flags & const.ATTRIB_CONST)
@is_const.setter
def is_const(self, state: bool) -> None:
""" This is a constant attribute. """
self.dxf.flags = set_flag_state(self.dxf.flags, const.ATTRIB_CONST,
state)
@property
def is_invisible(self) -> bool:
""" Attribute is invisible (does not appear). """
return bool(self.dxf.flags & const.ATTRIB_INVISIBLE)
@is_invisible.setter
def is_invisible(self, state: bool) -> None:
""" Attribute is invisible (does not appear). """
self.dxf.flags = set_flag_state(self.dxf.flags, const.ATTRIB_INVISIBLE,
state)
@property
def is_verify(self) -> bool:
""" Verification is required on input of this attribute.
(CAD application feature)
"""
return bool(self.dxf.flags & const.ATTRIB_VERIFY)
@is_verify.setter
def is_verify(self, state: bool) -> None:
""" Verification is required on input of this attribute.
(CAD application feature)
"""
self.dxf.flags = set_flag_state(self.dxf.flags, const.ATTRIB_VERIFY,
state)
@property
def is_preset(self) -> bool:
""" No prompt during insertion. (CAD application feature) """
return bool(self.dxf.flags & const.ATTRIB_IS_PRESET)
@is_preset.setter
def is_preset(self, state: bool) -> None:
""" No prompt during insertion. (CAD application feature) """
self.dxf.flags = set_flag_state(self.dxf.flags, const.ATTRIB_IS_PRESET,
state)
@register_entity
class AttDef(BaseAttrib):
""" DXF ATTDEF entity """
DXFTYPE = 'ATTDEF'
# Don't add acdb_attdef_xrecord here:
DXFATTRIBS = DXFAttributes(base_class, acdb_entity, acdb_text, acdb_attdef)
def load_dxf_attribs(
self, processor: SubclassProcessor = None) -> 'DXFNamespace':
dxf = super(Text, self).load_dxf_attribs(processor)
# Do not call Text loader.
if processor:
processor.fast_load_dxfattribs(
dxf, acdb_text_group_codes, 2, recover=True)
processor.fast_load_dxfattribs(
dxf, acdb_attdef_group_codes, 3, recover=True)
self.xrecord = processor.find_subclass(self.XRECORD_DEF.name)
if processor.r12:
# Transform elevation attribute from R11 to z-axis values:
elevation_to_z_axis(dxf, ('insert', 'align_point'))
return dxf
def export_entity(self, tagwriter: 'TagWriter') -> None:
# Text() writes 2x AcDbText which is not suitable for AttDef()
self.export_acdb_entity(tagwriter)
self.export_acdb_text(tagwriter)
self.export_acdb_attdef(tagwriter)
if self.xrecord:
tagwriter.write_tags(self.xrecord)
def export_acdb_attdef(self, tagwriter: 'TagWriter') -> None:
if tagwriter.dxfversion > DXF12:
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_attdef.name)
self.dxf.export_dxf_attribs(tagwriter, [
'version', 'prompt', 'tag', 'flags', 'field_length', 'valign',
'lock_position',
])
@register_entity
class Attrib(BaseAttrib):
""" DXF ATTRIB entity """
DXFTYPE = 'ATTRIB'
# Don't add acdb_attdef_xrecord here:
DXFATTRIBS = DXFAttributes(base_class, acdb_entity, acdb_text, acdb_attrib)
def load_dxf_attribs(
self, processor: SubclassProcessor = None) -> 'DXFNamespace':
dxf = super(Text, self).load_dxf_attribs(processor)
# Do not call Text loader.
if processor:
processor.fast_load_dxfattribs(
dxf, acdb_text_group_codes, 2, recover=True)
processor.fast_load_dxfattribs(
dxf, acdb_attrib_group_codes, 3, recover=True)
self.xrecord = processor.find_subclass(self.XRECORD_DEF.name)
if processor.r12:
# Transform elevation attribute from R11 to z-axis values:
elevation_to_z_axis(dxf, ('insert', 'align_point'))
return dxf
def export_entity(self, tagwriter: 'TagWriter') -> None:
# Text() writes 2x AcDbText which is not suitable for AttDef()
self.export_acdb_entity(tagwriter)
self.export_acdb_attrib_text(tagwriter)
self.export_acdb_attrib(tagwriter)
if self.xrecord:
tagwriter.write_tags(self.xrecord)
def export_acdb_attrib_text(self, tagwriter: 'TagWriter') -> None:
# Despite the similarities to TEXT, it is different to
# Text.export_acdb_text():
if tagwriter.dxfversion > DXF12:
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_text.name)
self.dxf.export_dxf_attribs(tagwriter, [
'insert', 'height', 'text', 'thickness', 'rotation', 'oblique',
'style', 'width', 'halign', 'align_point', 'text_generation_flag',
'extrusion',
])
def export_acdb_attrib(self, tagwriter: 'TagWriter') -> None:
if tagwriter.dxfversion > DXF12:
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_attrib.name)
self.dxf.export_dxf_attribs(tagwriter, [
'version', 'tag', 'flags', 'field_length', 'valign',
'lock_position',
])
IGNORE_FROM_ATTRIB = {
'handle', 'owner', 'version', 'prompt', 'tag', 'flags', 'field_length',
'lock_position'
}
def copy_attrib_as_text(attrib: BaseAttrib):
""" Returns the content of the ATTRIB/ATTDEF entity as a new virtual TEXT
entity.
"""
# TODO: MTEXT feature of DXF R2018+ is not supported yet!
dxfattribs = attrib.dxfattribs(drop=IGNORE_FROM_ATTRIB)
return Text.new(dxfattribs=dxfattribs, doc=attrib.doc)
```
#### File: ezdxf/entities/dictionary.py
```python
from typing import (
TYPE_CHECKING, KeysView, ItemsView, Any, Union, Dict, Optional,
)
import logging
from ezdxf.lldxf import validator
from ezdxf.lldxf.const import (
SUBCLASS_MARKER, DXFKeyError, DXFValueError,
)
from ezdxf.lldxf.attributes import (
DXFAttr, DXFAttributes, DefSubclass, RETURN_DEFAULT, group_code_mapping
)
from ezdxf.lldxf.types import is_valid_handle
from ezdxf.audit import AuditError
from ezdxf.entities import factory
from .dxfentity import base_class, SubclassProcessor, DXFEntity
from .dxfobj import DXFObject
logger = logging.getLogger('ezdxf')
if TYPE_CHECKING:
from ezdxf.eztypes import TagWriter, Drawing, DXFNamespace, Auditor
__all__ = ['Dictionary', 'DictionaryWithDefault', 'DictionaryVar']
acdb_dictionary = DefSubclass('AcDbDictionary', {
# If set to 1, indicates that elements of the dictionary are to be treated
# as hard-owned:
'hard_owned': DXFAttr(
280, default=0, optional=True,
validator=validator.is_integer_bool,
fixer=RETURN_DEFAULT,
),
# Duplicate record cloning flag (determines how to merge duplicate entries):
# 0 = not applicable
# 1 = keep existing
# 2 = use clone
# 3 = <xref>$0$<name>
# 4 = $0$<name>
# 5 = Unmangle name
'cloning': DXFAttr(
281, default=1,
validator=validator.is_in_integer_range(0, 6),
fixer=RETURN_DEFAULT,
),
# 3: entry name
# 350: entry handle, some DICTIONARY objects have 360 as handle group code,
# this is accepted by AutoCAD but not documented by the DXF reference!
# ezdxf replaces group code 360 by 350.
})
acdb_dictionary_group_codes = group_code_mapping(acdb_dictionary)
KEY_CODE = 3
VALUE_CODE = 350
# Some DICTIONARY use group code 360:
SEARCH_CODES = (VALUE_CODE, 360)
@factory.register_entity
class Dictionary(DXFObject):
""" AutoCAD maintains items such as mline styles and group definitions as
objects in dictionaries. Other applications are free to create and use
their own dictionaries as they see fit. The prefix "ACAD_" is reserved
for use by AutoCAD applications.
Dictionary entries are (key, DXFEntity) pairs. DXFEntity could be a string,
because at loading time not all objects are already stored in the EntityDB,
and have to acquired later.
"""
DXFTYPE = 'DICTIONARY'
DXFATTRIBS = DXFAttributes(base_class, acdb_dictionary)
def __init__(self):
super().__init__()
self._data: Dict[str, Union[str, DXFEntity]] = dict()
self._value_code = VALUE_CODE
def _copy_data(self, entity: 'Dictionary') -> None:
""" Copy hard owned entities but do not store the copies in the entity
database, this is a second step, this is just real copying.
"""
entity._value_code = self._value_code
if self.dxf.hard_owned:
# Reactors are removed from the cloned DXF objects.
entity._data = {key: entity.copy() for key, entity in self.items()}
else:
entity._data = {key: entity for key, entity in self.items()}
def post_bind_hook(self) -> None:
""" Called by binding a new or copied dictionary to the document,
bind hard owned sub-entities to the same document and add them to the
objects section.
"""
if not self.dxf.hard_owned:
return
# copied or new dictionary:
doc = self.doc
owner_handle = self.dxf.handle
for _, entity in self.items():
entity.dxf.owner = owner_handle
factory.bind(entity, doc)
# For a correct DXF export add entities to the objects section:
doc.objects.add_object(entity)
def load_dxf_attribs(
self, processor: SubclassProcessor = None) -> 'DXFNamespace':
dxf = super().load_dxf_attribs(processor)
if processor:
tags = processor.fast_load_dxfattribs(
dxf, acdb_dictionary_group_codes, 1, log=False)
self.load_dict(tags)
return dxf
def load_dict(self, tags):
entry_handle = None
dict_key = None
value_code = VALUE_CODE
for code, value in tags:
if code in SEARCH_CODES:
# First store handles, because at this point, NOT all objects
# are stored in the EntityDB, at first access convert the handle
# to a DXFEntity object.
value_code = code
entry_handle = value
elif code == KEY_CODE:
dict_key = value
if dict_key and entry_handle:
# Store entity as handle string:
self._data[dict_key] = entry_handle
entry_handle = None
dict_key = None
# Use same value code as loaded:
self._value_code = value_code
def post_load_hook(self, doc: 'Drawing') -> None:
super().post_load_hook(doc)
db = doc.entitydb
def items():
for key, handle in self.items():
entity = db.get(handle)
if entity is not None and entity.is_alive:
yield key, entity
if len(self):
for k, v in list(items()):
self.__setitem__(k, v)
def export_entity(self, tagwriter: 'TagWriter') -> None:
""" Export entity specific data as DXF tags. """
super().export_entity(tagwriter)
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_dictionary.name)
self.dxf.export_dxf_attribs(tagwriter, ['hard_owned', 'cloning'])
self.export_dict(tagwriter)
def export_dict(self, tagwriter: 'TagWriter'):
# key: dict key string
# value: DXFEntity or handle as string
# Ignore invalid handles at export, because removing can create an empty
# dictionary, which is more a problem for AutoCAD than invalid handles,
# and removing the whole dictionary is maybe also a problem.
for key, value in self._data.items():
tagwriter.write_tag2(KEY_CODE, key)
# Value can be a handle string or a DXFEntity object:
if isinstance(value, DXFEntity):
if value.is_alive:
value = value.dxf.handle
else:
logger.debug(
f'Key "{key}" points to a destroyed entity '
f'in {str(self)}, target replaced by "0" handle.'
)
value = '0'
# Use same value code as loaded:
tagwriter.write_tag2(self._value_code, value)
@property
def is_hard_owner(self) -> bool:
""" Returns ``True`` if :class:`Dictionary` is hard owner of entities.
Hard owned entities will be destroyed by deleting the dictionary.
"""
return bool(self.dxf.hard_owned)
def keys(self) -> KeysView:
""" Returns :class:`KeysView` of all dictionary keys. """
return self._data.keys()
def items(self) -> ItemsView:
""" Returns :class:`ItemsView` for all dictionary entries as
(:attr:`key`, :class:`DXFEntity`) pairs.
"""
for key in self.keys():
yield key, self.get(key) # maybe handle -> DXFEntity
def __getitem__(self, key: str) -> 'DXFEntity':
""" Return the value for `key`, raises a :class:`DXFKeyError` if `key`
does not exist.
"""
return self.get(key)
def __setitem__(self, key: str, value: 'DXFEntity') -> None:
""" Add item as ``(key, value)`` pair to dictionary. """
return self.add(key, value)
def __delitem__(self, key: str) -> None:
""" Delete entry `key` from the dictionary, raises :class:`DXFKeyError`
if key does not exist.
"""
return self.remove(key)
def __contains__(self, key: str) -> bool:
""" Returns ``True`` if `key` exist. """
return key in self._data
def __len__(self) -> int:
""" Returns count of items. """
return len(self._data)
count = __len__
def get(self, key: str, default: Any = DXFKeyError) -> 'DXFEntity':
""" Returns :class:`DXFEntity` for `key`, if `key` exist,
else `default` or raises a :class:`DXFKeyError` for
`default` = :class:`DXFKeyError`.
"""
try:
return self._data[key]
except KeyError:
if default is DXFKeyError:
raise DXFKeyError(f"KeyError: '{key}'")
else:
return default
def add(self, key: str, value: 'DXFEntity') -> None:
""" Add entry ``(key, value)``. """
if isinstance(value, str):
if not is_valid_handle(value):
raise DXFValueError(
f'Invalid entity handle #{value} for key {key}')
self._data[key] = value
def remove(self, key: str) -> None:
""" Delete entry `key`. Raises :class:`DXFKeyError`, if `key` does not
exist. Deletes also hard owned DXF objects from OBJECTS section.
"""
data = self._data
if key not in data:
raise DXFKeyError(key)
if self.is_hard_owner:
entity = self.get(key)
# Presumption: hard owned DXF objects always reside in the OBJECTS
# section.
self.doc.objects.delete_entity(entity)
del data[key]
def discard(self, key: str) -> None:
""" Delete entry `key` if exists. Does NOT raise an exception if `key`
not exist and does not delete hard owned DXF objects.
"""
try:
del self._data[key]
except KeyError:
pass
def clear(self) -> None:
""" Delete all entries from :class:`Dictionary`, deletes hard owned
DXF objects from OBJECTS section.
"""
if self.is_hard_owner:
self._delete_hard_owned_entries()
self._data.clear()
def _delete_hard_owned_entries(self) -> None:
# Presumption: hard owned DXF objects always reside in the OBJECTS section
objects = self.doc.objects
for key, entity in self.items():
objects.delete_entity(entity)
def add_new_dict(self, key: str, hard_owned: bool = False) -> 'Dictionary':
""" Create a new sub :class:`Dictionary`.
Args:
key: name of the sub dictionary
hard_owned: entries of the new dictionary are hard owned
"""
dxf_dict = self.doc.objects.add_dictionary(owner=self.dxf.handle,
hard_owned=hard_owned)
self.add(key, dxf_dict)
return dxf_dict
def add_dict_var(self, key: str, value: str) -> 'DictionaryVar':
""" Add new :class:`DictionaryVar`.
Args:
key: entry name as string
value: entry value as string
"""
new_var = self.doc.objects.add_dictionary_var(
owner=self.dxf.handle,
value=value
)
self.add(key, new_var)
return new_var
def set_or_add_dict_var(self, key: str, value: str) -> 'DictionaryVar':
""" Set or add new :class:`DictionaryVar`.
Args:
key: entry name as string
value: entry value as string
"""
if key not in self:
dict_var = self.doc.objects.add_dictionary_var(
owner=self.dxf.handle,
value=value
)
self.add(key, dict_var)
else:
dict_var = self.get(key)
dict_var.dxf.value = str(value)
return dict_var
def get_required_dict(self, key: str) -> 'Dictionary':
""" Get entry `key` or create a new :class:`Dictionary`,
if `Key` not exist.
"""
try:
dxf_dict = self.get(key)
except DXFKeyError:
dxf_dict = self.add_new_dict(key)
return dxf_dict
def audit(self, auditor: 'Auditor') -> None:
super().audit(auditor)
self._check_invalid_entries(auditor)
def _check_invalid_entries(self, auditor: 'Auditor'):
trash = [] # do not delete content while iterating
append = trash.append
db = auditor.entitydb
for key, entry in self._data.items():
if isinstance(entry, str):
if entry not in db:
append(key)
elif entry.is_alive:
if entry.dxf.handle not in db:
append(key)
else: # entry is destroyed
append(key)
for key in trash:
del self._data[key]
auditor.fixed_error(
code=AuditError.INVALID_DICTIONARY_ENTRY,
message=f'Removed entry "{key}" with invalid handle in {str(self)}',
dxf_entity=self,
data=key,
)
def destroy(self) -> None:
if not self.is_alive:
return
if self.is_hard_owner:
self._delete_hard_owned_entries()
super().destroy()
acdb_dict_with_default = DefSubclass('AcDbDictionaryWithDefault', {
'default': DXFAttr(340),
})
acdb_dict_with_default_group_codes = group_code_mapping(acdb_dict_with_default)
@factory.register_entity
class DictionaryWithDefault(Dictionary):
DXFTYPE = 'ACDBDICTIONARYWDFLT'
DXFATTRIBS = DXFAttributes(base_class, acdb_dictionary,
acdb_dict_with_default)
def __init__(self):
super().__init__()
self._default: Optional[DXFEntity] = None
def _copy_data(self, entity: 'Dictionary') -> None:
entity._default = self._default
def post_load_hook(self, doc: 'Drawing') -> None:
# Set _default to None if default object not exist - audit() replaces
# a not existing default object by a place holder object.
# AutoCAD ignores not existing default objects!
self._default = doc.entitydb.get(self.dxf.default)
super().post_load_hook(doc)
def load_dxf_attribs(
self, processor: SubclassProcessor = None) -> 'DXFNamespace':
dxf = super().load_dxf_attribs(processor)
if processor:
processor.fast_load_dxfattribs(
dxf, acdb_dict_with_default_group_codes, 2)
return dxf
def export_entity(self, tagwriter: 'TagWriter') -> None:
super().export_entity(tagwriter)
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_dict_with_default.name)
self.dxf.export_dxf_attribs(tagwriter, 'default')
def get(self, key: str, default: Any = DXFKeyError) -> DXFEntity:
# `default` argument is ignored, exist only for API compatibility,
""" Returns :class:`DXFEntity` for `key` or the predefined dictionary
wide :attr:`dxf.default` entity if `key` does not exist or ``None``
if default value also not exist.
"""
return super().get(key, default=self._default)
def set_default(self, default: DXFEntity) -> None:
""" Set dictionary wide default entry.
Args:
default: default entry as :class:`DXFEntity`
"""
self._default = default
self.dxf.default = self._default.dxf.handle
def audit(self, auditor: 'Auditor') -> None:
def create_missing_default_object():
placeholder = self.doc.objects.add_placeholder(
owner=self.dxf.handle)
self.set_default(placeholder)
auditor.fixed_error(
code=AuditError.CREATED_MISSING_OBJECT,
message=f'Created missing default object in {str(self)}.'
)
if self._default is None or not self._default.is_alive:
if auditor.entitydb.locked:
auditor.add_post_audit_job(create_missing_default_object)
else:
create_missing_default_object()
super().audit(auditor)
acdb_dict_var = DefSubclass('DictionaryVariables', {
'schema': DXFAttr(280, default=0),
# Object schema number (currently set to 0)
'value': DXFAttr(1, default=''),
})
acdb_dict_var_group_codes = group_code_mapping(acdb_dict_var)
@factory.register_entity
class DictionaryVar(DXFObject):
"""
DICTIONARYVAR objects are used by AutoCAD as a means to store named values
in the database for setvar / getvar purposes without the need to add entries
to the DXF HEADER section. System variables that are stored as
DICTIONARYVAR objects are the following:
- DEFAULTVIEWCATEGORY
- DIMADEC
- DIMASSOC
- DIMDSEP
- DRAWORDERCTL
- FIELDEVAL
- HALOGAP
- HIDETEXT
- INDEXCTL
- INDEXCTL
- INTERSECTIONCOLOR
- INTERSECTIONDISPLAY
- MSOLESCALE
- OBSCOLOR
- OBSLTYPE
- OLEFRAME
- PROJECTNAME
- SORTENTS
- UPDATETHUMBNAIL
- XCLIPFRAME
- XCLIPFRAME
"""
DXFTYPE = 'DICTIONARYVAR'
DXFATTRIBS = DXFAttributes(base_class, acdb_dict_var)
def load_dxf_attribs(self,
processor: SubclassProcessor = None) -> 'DXFNamespace':
dxf = super().load_dxf_attribs(processor)
if processor:
processor.fast_load_dxfattribs(dxf, acdb_dict_var_group_codes, 1)
return dxf
def export_entity(self, tagwriter: 'TagWriter') -> None:
""" Export entity specific data as DXF tags. """
super().export_entity(tagwriter)
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_dict_var.name)
self.dxf.export_dxf_attribs(tagwriter, ['schema', 'value'])
```
#### File: ezdxf/entities/dxfobj.py
```python
from typing import TYPE_CHECKING, Iterable, Dict, Tuple
import logging
import array
from ezdxf.lldxf import validator
from ezdxf.lldxf.const import DXF2000, DXFStructureError, SUBCLASS_MARKER
from ezdxf.lldxf.tags import Tags
from ezdxf.lldxf.types import dxftag, DXFTag, DXFBinaryTag
from ezdxf.lldxf.attributes import (
DXFAttr, DXFAttributes, DefSubclass, RETURN_DEFAULT, group_code_mapping
)
from ezdxf.tools import take2
from .dxfentity import DXFEntity, base_class, SubclassProcessor
from .factory import register_entity
logger = logging.getLogger('ezdxf')
if TYPE_CHECKING:
from ezdxf.eztypes import Auditor, DXFNamespace, TagWriter
__all__ = [
'DXFObject', 'Placeholder', 'XRecord', 'VBAProject', 'SortEntsTable',
'Field'
]
class DXFObject(DXFEntity):
""" Non graphical entities stored in the OBJECTS section. """
MIN_DXF_VERSION_FOR_EXPORT = DXF2000
def audit(self, auditor: 'Auditor') -> None:
""" Validity check. (internal API) """
super().audit(auditor)
auditor.check_owner_exist(self)
@register_entity
class Placeholder(DXFObject):
DXFTYPE = 'ACDBPLACEHOLDER'
acdb_xrecord = DefSubclass('AcDbXrecord', {
# 0 = not applicable
# 1 = keep existing
# 2 = use clone
# 3 = <xref>$0$<name>
# 4 = $0$<name>
# 5 = Unmangle name
'cloning': DXFAttr(
280, default=1,
validator=validator.is_in_integer_range(0, 6),
fixer=RETURN_DEFAULT,
),
})
def totags(tags: Iterable) -> Iterable[DXFTag]:
for tag in tags:
if isinstance(tag, DXFTag):
yield tag
else:
yield dxftag(tag[0], tag[1])
@register_entity
class XRecord(DXFObject):
""" DXF XRECORD entity """
DXFTYPE = 'XRECORD'
DXFATTRIBS = DXFAttributes(base_class, acdb_xrecord)
def __init__(self):
super().__init__()
self.tags = Tags()
def _copy_data(self, entity: 'XRecord') -> None:
entity.tags = Tags(entity.tags)
def load_dxf_attribs(
self, processor: SubclassProcessor = None) -> 'DXFNamespace':
dxf = super().load_dxf_attribs(processor)
if processor:
try:
tags = processor.subclasses[1]
except IndexError:
raise DXFStructureError(
f'Missing subclass AcDbXrecord in XRecord (#{dxf.handle})')
start_index = 1
if len(tags) > 1:
# First tag is group code 280, but not for DXF R13/R14.
# SUT: doc may be None, but then doc also can not
# be R13/R14 - ezdxf does not create R13/R14
if self.doc is None or self.doc.dxfversion >= DXF2000:
code, value = tags[1]
if code == 280:
dxf.cloning = value
start_index = 2
else: # just log recoverable error
logger.info(
f'XRecord (#{dxf.handle}): expected group code 280 '
f'as first tag in AcDbXrecord'
)
self.tags = Tags(tags[start_index:])
return dxf
def export_entity(self, tagwriter: 'TagWriter') -> None:
super().export_entity(tagwriter)
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_xrecord.name)
tagwriter.write_tag2(280, self.dxf.cloning)
tagwriter.write_tags(Tags(totags(self.tags)))
acdb_vba_project = DefSubclass('AcDbVbaProject', {
# 90: Number of bytes of binary chunk data (contained in the group code
# 310 records that follow)
# 310: DXF: Binary object data (multiple entries containing VBA project
# data)
})
@register_entity
class VBAProject(DXFObject):
""" DXF VBA_PROJECT entity """
DXFTYPE = 'VBA_PROJECT'
DXFATTRIBS = DXFAttributes(base_class, acdb_vba_project)
def __init__(self):
super().__init__()
self.data = b''
def _copy_data(self, entity: 'VBAProject') -> None:
entity.tags = Tags(entity.tags)
def load_dxf_attribs(
self, processor: SubclassProcessor = None) -> 'DXFNamespace':
dxf = super().load_dxf_attribs(processor)
if processor:
self.load_byte_data(processor.subclasses[1])
return dxf
def load_byte_data(self, tags: 'Tags') -> None:
byte_array = array.array('B')
# Translation from String to binary data happens in tag_compiler():
for byte_data in (tag.value for tag in tags if tag.code == 310):
byte_array.extend(byte_data)
self.data = byte_array.tobytes()
def export_entity(self, tagwriter: 'TagWriter') -> None:
super().export_entity(tagwriter)
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_vba_project.name)
tagwriter.write_tag2(90, len(self.data))
self.export_data(tagwriter)
def export_data(self, tagwriter: 'TagWriter'):
data = self.data
while data:
tagwriter.write_tag(DXFBinaryTag(310, data[:127]))
data = data[127:]
def clear(self) -> None:
self.data = b''
acdb_sort_ents_table = DefSubclass('AcDbSortentsTable', {
# Soft-pointer ID/handle to owner (currently only the *MODEL_SPACE or
# *PAPER_SPACE blocks) in ezdxf the block_record handle for a layout is
# also called layout_key:
'block_record_handle': DXFAttr(330),
# 331: Soft-pointer ID/handle to an entity (zero or more entries may exist)
# 5: Sort handle (zero or more entries may exist)
})
acdb_sort_ents_table_group_codes = group_code_mapping(acdb_sort_ents_table)
@register_entity
class SortEntsTable(DXFObject):
""" DXF SORTENTSTABLE entity - sort entities table """
# should work with AC1015/R2000 but causes problems with TrueView/AutoCAD
# LT 2019: "expected was-a-zombie-flag"
# No problems with AC1018/R2004 and later
#
# If the header variable $SORTENTS Regen flag (bit-code value 16) is set,
# AutoCAD regenerates entities in ascending handle order.
#
# When the DRAWORDER command is used, a SORTENTSTABLE object is attached to
# the *Model_Space or *Paper_Space block's extension dictionary under the
# name ACAD_SORTENTS. The SORTENTSTABLE object related to this dictionary
# associates a different handle with each entity, which redefines the order
# in which the entities are regenerated.
#
# $SORTENTS (280): Controls the object sorting methods (bitcode):
# 0 = Disables SORTENTS
# 1 = Sorts for object selection
# 2 = Sorts for object snap
# 4 = Sorts for redraws; obsolete
# 8 = Sorts for MSLIDE command slide creation; obsolete
# 16 = Sorts for REGEN commands
# 32 = Sorts for plotting
# 64 = Sorts for PostScript output; obsolete
DXFTYPE = 'SORTENTSTABLE'
DXFATTRIBS = DXFAttributes(base_class, acdb_sort_ents_table)
def __init__(self):
super().__init__()
self.table: Dict[str, str] = dict()
def _copy_data(self, entity: 'SortEntsTable') -> None:
entity.tags = dict(entity.table)
def load_dxf_attribs(
self, processor: SubclassProcessor = None) -> 'DXFNamespace':
dxf = super().load_dxf_attribs(processor)
if processor:
tags = processor.fast_load_dxfattribs(
dxf, acdb_sort_ents_table_group_codes, 1, log=False)
self.load_table(tags)
return dxf
def load_table(self, tags: 'Tags') -> None:
for handle, sort_handle in take2(tags):
if handle.code != 331:
raise DXFStructureError(
f'Invalid handle code {handle.code}, expected 331')
if sort_handle.code != 5:
raise DXFStructureError(
f'Invalid sort handle code {handle.code}, expected 5')
self.table[handle.value] = sort_handle.value
def export_entity(self, tagwriter: 'TagWriter') -> None:
super().export_entity(tagwriter)
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_sort_ents_table.name)
tagwriter.write_tag2(330, self.dxf.block_record_handle)
self.export_table(tagwriter)
def export_table(self, tagwriter: 'TagWriter'):
for handle, sort_handle in self.table.items():
tagwriter.write_tag2(331, handle)
tagwriter.write_tag2(5, sort_handle)
def __len__(self) -> int:
return len(self.table)
def __iter__(self) -> Iterable:
""" Yields all redraw associations as (object_handle, sort_handle)
tuples.
"""
return iter(self.table.items())
def append(self, handle: str, sort_handle: str) -> None:
""" Append redraw association (handle, sort_handle).
Args:
handle: DXF entity handle (uppercase hex value without leading '0x')
sort_handle: sort handle (uppercase hex value without leading '0x')
"""
self.table[handle] = sort_handle
def clear(self):
""" Remove all handles from redraw order table. """
self.table = dict()
def set_handles(self, handles: Iterable[Tuple[str, str]]) -> None:
""" Set all redraw associations from iterable `handles`, after removing
all existing associations.
Args:
handles: iterable yielding (object_handle, sort_handle) tuples
"""
# The sort_handle doesn't have to be unique, same or all handles can
# share the same sort_handle and sort_handles can use existing handles
# too.
#
# The '0' handle can be used, but this sort_handle will be drawn as
# latest (on top of all other entities) and not as first as expected.
# Invalid entity handles will be ignored by AutoCAD.
self.table = dict(handles)
def remove_invalid_handles(self) -> None:
""" Remove all handles which do not exists in the drawing database. """
entitydb = self.doc.entitydb
self.table = {
handle: sort_handle for handle, sort_handle in self.table.items()
if handle in entitydb
}
def remove_handle(self, handle: str) -> None:
""" Remove handle of DXF entity from redraw order table.
Args:
handle: DXF entity handle (uppercase hex value without leading '0x')
"""
try:
del self.table[handle]
except KeyError:
pass
acdb_field = DefSubclass('AcDbField', {
'evaluator_id': DXFAttr(1),
'field_code': DXFAttr(2),
# Overflow of field code string
'field_code_overflow': DXFAttr(3),
# Number of child fields
'n_child_fields': DXFAttr(90),
# 360: Child field ID (AcDbHardOwnershipId); repeats for number of children
# 97: Number of object IDs used in the field code
# 331: Object ID used in the field code (AcDbSoftPointerId); repeats for
# the number of object IDs used in the field code
# 93: Number of the data set in the field
# 6: Key string for the field data; a key-field pair is repeated for the
# number of data sets in the field
# 7: Key string for the evaluated cache; this key is hard-coded
# as ACFD_FIELD_VALUE
# 90: Data type of field value
# 91: Long value (if data type of field value is long)
# 140: Double value (if data type of field value is double)
# 330: ID value, AcDbSoftPointerId (if data type of field value is ID)
# 92: Binary data buffer size (if data type of field value is binary)
# 310: Binary data (if data type of field value is binary)
# 301: Format string
# 9: Overflow of Format string
# 98: Length of format string
})
# todo: implement FIELD
# register when done
class Field(DXFObject):
""" DXF FIELD entity """
DXFTYPE = 'FIELD'
DXFATTRIBS = DXFAttributes(base_class, acdb_field)
```
#### File: ezdxf/entities/mesh.py
```python
from typing import TYPE_CHECKING, Iterable, Sequence, Tuple, Union, List, Dict
import array
import copy
from itertools import chain
from contextlib import contextmanager
from ezdxf.lldxf import validator
from ezdxf.lldxf.attributes import (
DXFAttr, DXFAttributes, DefSubclass, RETURN_DEFAULT, group_code_mapping
)
from ezdxf.lldxf.const import (
SUBCLASS_MARKER, DXF2000, DXFValueError, DXFStructureError,
)
from ezdxf.lldxf.packedtags import VertexArray, TagArray, TagList
from ezdxf.tools import take2
from .dxfentity import base_class, SubclassProcessor
from .dxfgfx import DXFGraphic, acdb_entity
from .factory import register_entity
if TYPE_CHECKING:
from ezdxf.eztypes import (
TagWriter, DXFNamespace, Vertex, Tags, Matrix44,
)
__all__ = ['Mesh', 'MeshData']
acdb_mesh = DefSubclass('AcDbSubDMesh', {
'version': DXFAttr(71, default=2),
'blend_crease': DXFAttr(
72, default=0,
validator=validator.is_integer_bool,
fixer=RETURN_DEFAULT,
),
# 0 is no smoothing
'subdivision_levels': DXFAttr(
91, default=0,
validator=validator.is_greater_or_equal_zero,
fixer=RETURN_DEFAULT,
),
# 92: Vertex count of level 0
# 10: Vertex position, multiple entries
# 93: Size of face list of level 0
# 90: Face list item, >=3 possible
# 90: length of face list
# 90: 1st vertex index
# 90: 2nd vertex index ...
# 94: Edge count of level 0
# 90: Vertex index of 1st edge
# 90: Vertex index of 2nd edge
# 95: Edge crease count of level 0
# 95 same as 94, or how is the 'edge create value' associated to edge index
# 140: Edge crease value
#
# Overriding properties: how does this work?
# 90: Count of sub-entity which property has been overridden
# 91: Sub-entity marker
# 92: Count of property was overridden
# 90: Property type
# 0 = Color
# 1 = Material
# 2 = Transparency
# 3 = Material mapper
})
acdb_mesh_group_codes = group_code_mapping(acdb_mesh)
class EdgeArray(TagArray):
DTYPE = 'L'
def __len__(self) -> int:
return len(self.values) // 2
def __iter__(self) -> Iterable[Tuple[int, int]]:
for edge in take2(self.values):
yield edge
def set_data(self, edges: Iterable[Tuple[int, int]]) -> None:
self.values = array.array(self.DTYPE, chain.from_iterable(edges))
def export_dxf(self, tagwriter: 'TagWriter'):
# count = count of edges not tags!
tagwriter.write_tag2(94, len(self.values) // 2)
for index in self.values:
tagwriter.write_tag2(90, index)
class FaceList(TagList):
def __len__(self) -> int:
return len(self.values)
def __iter__(self) -> Iterable[array.array]:
return iter(self.values)
def export_dxf(self, tagwriter: 'TagWriter'):
# count = count of tags not faces!
tagwriter.write_tag2(93, self.tag_count())
for face in self.values:
tagwriter.write_tag2(90, len(face))
for index in face:
tagwriter.write_tag2(90, index)
def tag_count(self) -> int:
return len(self.values) + sum(len(f) for f in self.values)
def set_data(self, faces: Iterable[Sequence[int]]) -> None:
_faces = []
for face in faces:
_faces.append(face_to_array(face))
self.values = _faces
def face_to_array(face: Sequence[int]) -> array.array:
max_index = max(face)
if max_index < 256:
dtype = 'B'
elif max_index < 65536:
dtype = 'I'
else:
dtype = 'L'
return array.array(dtype, face)
def create_vertex_array(tags: 'Tags', start_index: int) -> 'VertexArray':
vertex_tags = tags.collect_consecutive_tags(codes=(10,), start=start_index)
return VertexArray(data=chain.from_iterable(t.value for t in vertex_tags))
def create_face_list(tags: 'Tags', start_index: int) -> 'FaceList':
faces = FaceList()
faces_list = faces.values
face = []
counter = 0
for tag in tags.collect_consecutive_tags(codes=(90,), start=start_index):
if not counter:
# leading counter tag
counter = tag.value
if face:
# group code 90 = 32 bit integer
faces_list.append(face_to_array(face))
face = []
else:
# followed by count face tags
counter -= 1
face.append(tag.value)
# add last face
if face:
# group code 90 = 32 bit integer
faces_list.append(face_to_array(face))
return faces
def create_edge_array(tags: 'Tags', start_index: int) -> 'EdgeArray':
return EdgeArray(data=collect_values(
tags, start_index, code=90)) # int values
def collect_values(tags: 'Tags',
start_index: int,
code: int) -> Iterable[Union[float, int]]:
values = tags.collect_consecutive_tags(codes=(code,), start=start_index)
return (t.value for t in values)
def create_crease_array(tags: 'Tags', start_index: int) -> 'array.array':
return array.array('f', collect_values(
tags, start_index, code=140)) # float values
COUNT_ERROR_MSG = "'MESH (#{}) without {} count.'"
@register_entity
class Mesh(DXFGraphic):
""" DXF MESH entity """
DXFTYPE = 'MESH'
DXFATTRIBS = DXFAttributes(base_class, acdb_entity, acdb_mesh)
MIN_DXF_VERSION_FOR_EXPORT = DXF2000
def __init__(self):
super().__init__()
self._vertices = VertexArray() # vertices stored as array.array('d')
self._faces = FaceList() # face lists data
self._edges = EdgeArray() # edge indices stored as array.array('L')
self._creases = array.array('f') # creases stored as array.array('f')
def _copy_data(self, entity: 'Mesh') -> None:
""" Copy data: vertices, faces, edges, creases. """
entity._vertices = copy.deepcopy(self._vertices)
entity._faces = copy.deepcopy(self._faces)
entity._edges = copy.deepcopy(self._edges)
entity._creases = copy.deepcopy(self._creases)
def load_dxf_attribs(
self, processor: SubclassProcessor = None) -> 'DXFNamespace':
dxf = super().load_dxf_attribs(processor)
if processor:
tags = processor.subclass_by_index(2)
# Load mesh data and remove their tags from subclass
self.load_mesh_data(tags, dxf.handle)
# Load remaining data into name space
processor.fast_load_dxfattribs(
dxf, acdb_mesh_group_codes, 2, recover=True)
return dxf
def load_mesh_data(self, mesh_tags: 'Tags', handle: str) -> None:
def process_vertices():
try:
vertex_count_index = mesh_tags.tag_index(92)
except DXFValueError:
raise DXFStructureError(
COUNT_ERROR_MSG.format(handle, 'vertex'))
vertices = create_vertex_array(mesh_tags, vertex_count_index + 1)
# Remove vertex count tag and all vertex tags
end_index = vertex_count_index + 1 + len(vertices)
del mesh_tags[vertex_count_index:end_index]
return vertices
def process_faces():
try:
face_count_index = mesh_tags.tag_index(93)
except DXFValueError:
raise DXFStructureError(COUNT_ERROR_MSG.format(handle, 'face'))
else:
# Remove face count tag and all face tags
faces = create_face_list(mesh_tags, face_count_index + 1)
end_index = face_count_index + 1 + faces.tag_count()
del mesh_tags[face_count_index:end_index]
return faces
def process_edges():
try:
edge_count_index = mesh_tags.tag_index(94)
except DXFValueError:
raise DXFStructureError(COUNT_ERROR_MSG.format(handle, 'edge'))
else:
edges = create_edge_array(mesh_tags, edge_count_index + 1)
# Remove edge count tag and all edge tags
end_index = edge_count_index + 1 + len(edges.values)
del mesh_tags[edge_count_index:end_index]
return edges
def process_creases():
try:
crease_count_index = mesh_tags.tag_index(95)
except DXFValueError:
raise DXFStructureError(
COUNT_ERROR_MSG.format(handle, 'crease'))
else:
creases = create_crease_array(mesh_tags, crease_count_index + 1)
# Remove crease count tag and all crease tags
end_index = crease_count_index + 1 + len(creases)
del mesh_tags[crease_count_index:end_index]
return creases
self._vertices = process_vertices()
self._faces = process_faces()
self._edges = process_edges()
self._creases = process_creases()
def export_entity(self, tagwriter: 'TagWriter') -> None:
""" Export entity specific data as DXF tags. """
super().export_entity(tagwriter)
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_mesh.name)
self.dxf.export_dxf_attribs(tagwriter, [
'version', 'blend_crease', 'subdivision_levels'
])
self.export_mesh_data(tagwriter)
self.export_override_data(tagwriter)
def export_mesh_data(self, tagwriter: 'TagWriter'):
tagwriter.write_tag2(92, len(self.vertices))
self._vertices.export_dxf(tagwriter, code=10)
self._faces.export_dxf(tagwriter)
self._edges.export_dxf(tagwriter)
tagwriter.write_tag2(95, len(self.creases))
for crease_value in self.creases:
tagwriter.write_tag2(140, crease_value)
def export_override_data(self, tagwriter: 'TagWriter'):
tagwriter.write_tag2(90, 0)
@property
def creases(self) -> 'array.array':
""" Creases as :class:`array.array`. (read/write)"""
return self._creases
@creases.setter
def creases(self, values: Iterable[float]) -> None:
self._creases = array.array('f', values)
@property
def vertices(self):
""" Vertices as list like :class:`~ezdxf.lldxf.packedtags.VertexArray`.
(read/write)
"""
return self._vertices
@vertices.setter
def vertices(self, points: Iterable['Vertex']) -> None:
self._vertices = VertexArray(chain.from_iterable(points))
@property
def edges(self):
""" Edges as list like :class:`~ezdxf.lldxf.packedtags.TagArray`.
(read/write)
"""
return self._edges
@edges.setter
def edges(self, edges: Iterable[Tuple[int, int]]) -> None:
self._edges.set_data(edges)
@property
def faces(self):
""" Faces as list like :class:`~ezdxf.lldxf.packedtags.TagList`.
(read/write)
"""
return self._faces
@faces.setter
def faces(self, faces: Iterable[Sequence[int]]) -> None:
self._faces.set_data(faces)
def get_data(self) -> 'MeshData':
return MeshData(self)
def set_data(self, data: 'MeshData') -> None:
self.vertices = data.vertices
self._faces.set_data(data.faces)
self._edges.set_data(data.edges)
self.creases = data.edge_crease_values
@contextmanager
def edit_data(self) -> 'MeshData':
""" Context manager various mesh data, returns :class:`MeshData`.
Despite that vertices, edge and faces since `ezdxf` v0.8.9 are
accessible as packed data types, the usage of :class:`MeshData`
by context manager :meth:`edit_data` is still recommended.
"""
data = self.get_data()
yield data
self.set_data(data)
def transform(self, m: 'Matrix44') -> 'Mesh':
""" Transform the MESH entity by transformation matrix `m` inplace. """
self._vertices.transform(m)
return self
class MeshData:
def __init__(self, mesh):
self.vertices: List[Tuple[float, float, float]] = list(mesh.vertices)
self.faces: List[array.array] = list(mesh.faces)
self.edges: List[Tuple[int, int]] = list(mesh.edges)
self.edge_crease_values: array.array = mesh.creases
def add_face(self, vertices: Iterable[Sequence[float]]) -> Sequence[int]:
""" Add a face by coordinates, vertices is a list of ``(x, y, z)``
tuples.
"""
return self.add_entity(vertices, self.faces)
def add_edge(self, vertices: Sequence[Sequence[float]]) -> Sequence[int]:
""" Add an edge by coordinates, vertices is a list of two ``(x, y, z)``
tuples.
"""
if len(vertices) != 2:
raise DXFValueError(
"Parameter vertices has to be a list/tuple of 2 vertices "
"[(x1, y1, z1), (x2, y2, z2)].")
return self.add_entity(vertices, self.edges)
def add_entity(self, vertices: Iterable[Sequence[float]],
entity_list: List) -> Sequence[int]:
indices = [self.add_vertex(vertex) for vertex in vertices]
entity_list.append(indices)
return indices
def add_vertex(self, vertex: Sequence[float]) -> int:
if len(vertex) != 3:
raise DXFValueError(
'Parameter vertex has to be a 3-tuple (x, y, z).')
index = len(self.vertices)
self.vertices.append(vertex)
return index
def optimize(self, precision: int = 6):
"""
Try to reduce vertex count by merging near vertices. `precision`
defines the decimal places for coordinate be equal to merge two vertices.
"""
def remove_doublette_vertices() -> Dict[int, int]:
def prepare_vertices() -> Iterable[Tuple[float, float, float]]:
for index, vertex in enumerate(self.vertices):
x, y, z = vertex
yield (round(x, precision),
round(y, precision),
round(z, precision),
index)
sorted_vertex_list = list(sorted(prepare_vertices()))
original_vertices = self.vertices
self.vertices = []
index_map: Dict[int, int] = {}
cmp_vertex = None
index = 0
while len(sorted_vertex_list):
vertex_entry = sorted_vertex_list.pop()
original_index = vertex_entry[3]
vertex = original_vertices[original_index]
if vertex != cmp_vertex:
# this is not a doublette
index = len(self.vertices)
self.vertices.append(vertex)
index_map[original_index] = index
cmp_vertex = vertex
else: # it is a doublette
index_map[original_index] = index
return index_map
def remap_faces() -> None:
self.faces = remap_indices(self.faces)
def remap_edges() -> None:
self.edges = remap_indices(self.edges)
def remap_indices(entity_list: Sequence[Sequence[int]]) -> List[Tuple]:
mapped_indices = [] # type: List[Tuple]
for entity in entity_list:
index_list = [index_map[index] for index in entity]
mapped_indices.append(tuple(index_list))
return mapped_indices
index_map = remove_doublette_vertices()
remap_faces()
remap_edges()
```
#### File: ezdxf/entities/mline.py
```python
from typing import TYPE_CHECKING, Dict, Iterable, List, Optional
from collections import OrderedDict, namedtuple
import math
from ezdxf.audit import AuditError
from ezdxf.entities.factory import register_entity
from ezdxf.lldxf import const, validator
from ezdxf.lldxf.attributes import (
DXFAttr, DXFAttributes, DefSubclass, XType, RETURN_DEFAULT,
group_code_mapping,
)
from ezdxf.lldxf.tags import Tags, group_tags
from ezdxf.math import NULLVEC, X_AXIS, Y_AXIS, Z_AXIS, Vertex, Vec3, UCS
from .dxfentity import base_class, SubclassProcessor
from .dxfobj import DXFObject
from .dxfgfx import DXFGraphic, acdb_entity
from .objectcollection import ObjectCollection
import logging
if TYPE_CHECKING:
from ezdxf.eztypes import (
TagWriter, Drawing, DXFNamespace, EntityQuery, BaseLayout, Matrix44,
Auditor,
)
__all__ = ['MLine', 'MLineVertex', 'MLineStyle', 'MLineStyleCollection']
# Usage example: CADKitSamples\Lock-Off.dxf
logger = logging.getLogger('ezdxf')
def filter_close_vertices(vertices: Iterable[Vec3],
abs_tol: float = 1e-12) -> Iterable[Vec3]:
prev = None
for vertex in vertices:
if prev is None:
yield vertex
prev = vertex
else:
if not vertex.isclose(prev, abs_tol=abs_tol):
yield vertex
prev = vertex
acdb_mline = DefSubclass('AcDbMline', OrderedDict({
'style_name': DXFAttr(2, default='Standard'),
'style_handle': DXFAttr(340),
'scale_factor': DXFAttr(
40, default=1,
validator=validator.is_not_zero,
fixer=RETURN_DEFAULT,
),
# Justification
# 0 = Top (Right)
# 1 = Zero (Center)
# 2 = Bottom (Left)
'justification': DXFAttr(
70, default=0,
validator=validator.is_in_integer_range(0, 3),
fixer=RETURN_DEFAULT,
),
# Flags (bit-coded values):
# 1 = Has at least one vertex (code 72 is greater than 0)
# 2 = Closed
# 4 = Suppress start caps
# 8 = Suppress end caps
'flags': DXFAttr(71, default=1),
# Number of MLINE vertices
'count': DXFAttr(72, xtype=XType.callback, getter='__len__'),
# Number of elements in MLINESTYLE definition
'style_element_count': DXFAttr(73, default=2),
# start location in WCS!
'start_location': DXFAttr(10, xtype=XType.callback,
getter='start_location'),
# Normal vector of the entity plane, but all vertices in WCS!
'extrusion': DXFAttr(
210, xtype=XType.point3d, default=Z_AXIS,
validator=validator.is_not_null_vector,
fixer=RETURN_DEFAULT,
),
# MLine data:
# 11: vertex coordinates
# Multiple entries; one entry for each vertex.
# 12: Direction vector of segment starting at this vertex
# Multiple entries; one for each vertex.
# 13: Direction vector of miter at this vertex
# Multiple entries: one for each vertex.
# 74: Number of parameters for this element,
# repeats for each element in segment
# 41: Element parameters,
# repeats based on previous code 74
# 75: Number of area fill parameters for this element,
# repeats for each element in segment
# 42: Area fill parameters,
# repeats based on previous code 75
}))
acdb_mline_group_codes = group_code_mapping(acdb_mline)
# For information about line- and fill parametrization see comments in class
# MLineVertex().
#
# The 2 group codes in mline entities and mlinestyle objects are redundant
# fields. These groups should not be modified under any circumstances, although
# it is safe to read them and use their values. The correct fields to modify
# are as follows:
#
# Mline
# The 340 group in the same object, which indicates the proper MLINESTYLE
# object.
#
# Mlinestyle
# The 3 group value in the MLINESTYLE dictionary, which precedes the 350 group
# that has the handle or entity name of
# the current mlinestyle.
# Facts and assumptions not clearly defined by the DXF reference:
# - the reference line is defined by the group code 11 points (fact)
# - all line segments are parallel to the reference line (assumption)
# - all line vertices are located in the same plane, the orientation of the plane
# is defined by the extrusion vector (assumption)
# - the scale factor is applied to to all geometries
# - the start- and end angle (MLineStyle) is also applied to the first and last
# miter direction vector
# - the last two points mean: all geometries and direction vectors can be used
# as stored in the DXF file no additional scaling or rotation is necessary
# for the MLINE rendering. Disadvantage: minor changes of DXF attributes
# require a refresh of the MLineVertices.
# Ezdxf does not support the creation of line-break (gap) features, but will be
# preserve this data if the MLINE stays unchanged.
# Editing the MLINE entity by ezdxf removes the line-break features (gaps).
class MLineVertex:
def __init__(self):
self.location: Vec3 = NULLVEC
self.line_direction: Vec3 = X_AXIS
self.miter_direction: Vec3 = Y_AXIS
# Line parametrization (74/41)
# ----------------------------
# The line parameterization is a list of float values.
# The list may contain zero or more items.
#
# The first value (miter-offset) is the distance from the vertex
# location along the miter direction vector to the point where the
# line element's path intersects the miter vector.
#
# The next value (line-start-offset) is the distance along the line
# direction from the miter/line path intersection point to the actual
# start of the line element.
#
# The next value (dash-length) is the distance from the start of the
# line element (dash) to the first break (or gap) in the line element.
# The successive values continue to list the start and stop points of
# the line element in this segment of the mline.
# Linetypes do not affect the line parametrization.
#
#
# 1. line element: [miter-offset, line-start-offset, dash, gap, dash, ...]
# 2. line element: [...]
# ...
self.line_params: List[List[float]] = []
""" The line parameterization is a list of float values.
The list may contain zero or more items.
"""
# Fill parametrization (75/42)
# ----------------------------
#
# The fill parameterization is also a list of float values.
# Similar to the line parameterization, it describes the
# parameterization of the fill area for this mline segment.
# The values are interpreted identically to the line parameters and when
# taken as a whole for all line elements in the mline segment, they
# define the boundary of the fill area for the mline segment.
#
# A common example of the use of the fill mechanism is when an
# unfilled mline crosses over a filled mline and "mledit" is used to
# cause the filled mline to appear unfilled in the crossing area.
# This would result in two fill parameters for each line element in the
# affected mline segment; one for the fill stop and one for the fill
# start.
#
# [dash-length, gap-length, ...]?
self.fill_params: List[List[float]] = []
def __copy__(self) -> 'MLineVertex':
vtx = self.__class__()
vtx.location = self.location
vtx.line_direction = self.line_direction
vtx.miter_direction = self.miter_direction
vtx.line_params = list(self.line_params)
vtx.fill_params = list(self.fill_params)
return vtx
copy = __copy__
@classmethod
def load(cls, tags: Tags) -> 'MLineVertex':
vtx = MLineVertex()
line_params = []
line_params_count = 0
fill_params = []
fill_params_count = 0
for code, value in tags:
if code == 11:
vtx.location = Vec3(value)
elif code == 12:
vtx.line_direction = Vec3(value)
elif code == 13:
vtx.miter_direction = Vec3(value)
elif code == 74:
line_params_count = value
if line_params_count == 0:
vtx.line_params.append(tuple())
else:
line_params = []
elif code == 41:
line_params.append(value)
line_params_count -= 1
if line_params_count == 0:
vtx.line_params.append(tuple(line_params))
line_params = []
elif code == 75:
fill_params_count = value
if fill_params_count == 0:
vtx.fill_params.append(tuple())
else:
fill_params = []
elif code == 42:
fill_params.append(value)
fill_params_count -= 1
if fill_params_count == 0:
vtx.fill_params.append(tuple(fill_params))
return vtx
def export_dxf(self, tagwriter: 'TagWriter'):
tagwriter.write_vertex(11, self.location)
tagwriter.write_vertex(12, self.line_direction)
tagwriter.write_vertex(13, self.miter_direction)
for line_params, fill_params in zip(self.line_params, self.fill_params):
tagwriter.write_tag2(74, len(line_params))
for param in line_params:
tagwriter.write_tag2(41, param)
tagwriter.write_tag2(75, len(fill_params))
for param in fill_params:
tagwriter.write_tag2(42, param)
@classmethod
def new(cls, start: Vertex, line_direction: Vertex, miter_direction: Vertex,
line_params: Iterable = None,
fill_params: Iterable = None) -> 'MLineVertex':
vtx = MLineVertex()
vtx.location = Vec3(start)
vtx.line_direction = Vec3(line_direction)
vtx.miter_direction = Vec3(miter_direction)
vtx.line_params = list(line_params or [])
vtx.fill_params = list(fill_params or [])
if len(vtx.line_params) != len(vtx.fill_params):
raise const.DXFValueError(
'Count mismatch of line- and fill parameters')
return vtx
def transform(self, m: 'Matrix44') -> 'MLineVertex':
""" Transform MLineVertex by transformation matrix `m` inplace. """
self.location = m.transform(self.location)
self.line_direction = m.transform_direction(self.line_direction)
self.miter_direction = m.transform_direction(self.miter_direction)
return self
@register_entity
class MLine(DXFGraphic):
DXFTYPE = 'MLINE'
DXFATTRIBS = DXFAttributes(base_class, acdb_entity, acdb_mline)
TOP = const.MLINE_TOP
ZERO = const.MLINE_ZERO
BOTTOM = const.MLINE_BOTTOM
HAS_VERTICES = const.MLINE_HAS_VERTICES
CLOSED = const.MLINE_CLOSED
SUPPRESS_START_CAPS = const.MLINE_SUPPRESS_START_CAPS
SUPPRESS_END_CAPS = const.MLINE_SUPPRESS_END_CAPS
def __init__(self):
super().__init__()
# The MLINE geometry stored in vertices, is the final geometry,
# scaling factor, justification and MLineStyle settings are already
# applied. This is why the geometry has to be updated every time a
# change is applied.
self.vertices: List[MLineVertex] = []
def __len__(self):
""" Count of MLINE vertices. """
return len(self.vertices)
def _copy_data(self, entity: 'MLine') -> None:
entity.vertices = [v.copy() for v in self.vertices]
def load_dxf_attribs(
self, processor: SubclassProcessor = None) -> 'DXFNamespace':
dxf = super().load_dxf_attribs(processor)
if processor:
tags = processor.fast_load_dxfattribs(
dxf, acdb_mline_group_codes, 2, log=False)
self.load_vertices(tags)
return dxf
def load_vertices(self, tags: Tags) -> None:
self.vertices.extend(
MLineVertex.load(tags) for tags in group_tags(tags, splitcode=11)
)
def preprocess_export(self, tagwriter: 'TagWriter') -> bool:
# Do not export MLines without vertices
return len(self.vertices) > 1
# todo: check if line- and fill parametrization is compatible with
# MLINE style, requires same count of elements!
def export_entity(self, tagwriter: 'TagWriter') -> None:
# ezdxf does not export MLINE entities without vertices,
# see method preprocess_export()
self.set_flag_state(self.HAS_VERTICES, True)
super().export_entity(tagwriter)
tagwriter.write_tag2(const.SUBCLASS_MARKER, acdb_mline.name)
self.dxf.export_dxf_attribs(tagwriter, acdb_mline.attribs.keys())
self.export_vertices(tagwriter)
def export_vertices(self, tagwriter: 'TagWriter') -> None:
for vertex in self.vertices:
vertex.export_dxf(tagwriter)
@property
def is_closed(self) -> bool:
""" Returns ``True`` if MLINE is closed.
Compatibility interface to :class:`Polyline`
"""
return self.get_flag_state(self.CLOSED)
def close(self, state: bool = True) -> None:
""" Get/set closed state of MLINE and update geometry accordingly.
Compatibility interface to :class:`Polyline`
"""
state = bool(state)
if state != self.is_closed:
self.set_flag_state(self.CLOSED, state)
self.update_geometry()
@property
def start_caps(self) -> bool:
""" Get/Set start caps state. ``True`` to enable start caps and
``False`` tu suppress start caps. """
return not self.get_flag_state(self.SUPPRESS_START_CAPS)
@start_caps.setter
def start_caps(self, value: bool) -> None:
""" Set start caps state. """
self.set_flag_state(self.SUPPRESS_START_CAPS, not bool(value))
@property
def end_caps(self) -> bool:
""" Get/Set end caps state. ``True`` to enable end caps and
``False`` tu suppress start caps."""
return not self.get_flag_state(self.SUPPRESS_END_CAPS)
@end_caps.setter
def end_caps(self, value: bool) -> None:
""" Set start caps state. """
self.set_flag_state(self.SUPPRESS_END_CAPS, not bool(value))
def set_scale_factor(self, value: float) -> None:
""" Set the scale factor and update geometry accordingly. """
value = float(value)
if not math.isclose(self.dxf.scale_factor, value):
self.dxf.scale_factor = value
self.update_geometry()
def set_justification(self, value: int) -> None:
""" Set MLINE justification and update geometry accordingly.
See :attr:`dxf.justification` for valid settings.
"""
value = int(value)
if self.dxf.justification != value:
self.dxf.justification = value
self.update_geometry()
@property
def style(self) -> Optional['MLineStyle']:
""" Get associated MLINESTYLE. """
if self.doc is None:
return None
_style = self.doc.entitydb.get(self.dxf.style_handle)
if _style is None:
_style = self.doc.mline_styles.get(self.dxf.style_name)
return _style
def set_style(self, name: str) -> None:
""" Set MLINESTYLE by name and update geometry accordingly.
The MLINESTYLE definition must exist.
"""
if self.doc is None:
logger.debug("Can't change style of unbounded MLINE entity.")
return
try:
style = self.doc.mline_styles.get(name)
except const.DXFKeyError:
raise const.DXFValueError(f'Undefined MLINE style: {name}')
# Line- and fill parametrization depends on the count of
# elements, a change in the number of elements triggers a
# reset of the parametrization:
old_style = self.style
new_element_count = len(style.elements)
reset = False
if old_style:
# Do not trust the stored "style_element_count" value
reset = len(self.style.elements) != new_element_count
self.dxf.style_name = name
self.dxf.style_handle = style.dxf.handle
self.dxf.style_element_count = new_element_count
if reset:
self.update_geometry()
def start_location(self) -> Vec3:
""" Returns the start location of the reference line. Callback function
for :attr:`dxf.start_location`.
"""
if len(self.vertices):
return self.vertices[0].location
else:
return NULLVEC
def get_locations(self) -> List[Vec3]:
""" Returns the vertices of the reference line. """
return [v.location for v in self.vertices]
def extend(self, vertices: Iterable['Vertex']) -> None:
""" Append multiple vertices to the reference line.
It is possible to work with 3D vertices, but all vertices have to be in
the same plane and the normal vector of this plan is stored as
extrusion vector in the MLINE entity.
"""
vertices = Vec3.list(vertices)
if not vertices:
return
all_vertices = []
if len(self):
all_vertices.extend(self.get_locations())
all_vertices.extend(vertices)
self.generate_geometry(all_vertices)
def update_geometry(self) -> None:
""" Regenerate the MLINE geometry based on current settings. """
self.generate_geometry(self.get_locations())
def generate_geometry(self, vertices: List[Vec3]) -> None:
""" Regenerate the MLINE geometry for new reference line defined by
`vertices`.
"""
vertices = list(filter_close_vertices(vertices, abs_tol=1e-6))
if len(vertices) == 0:
self.clear()
return
elif len(vertices) == 1:
self.vertices = [MLineVertex.new(vertices[0], X_AXIS, Y_AXIS)]
return
style = self.style
if len(style.elements) == 0:
raise const.DXFStructureError(
f'No line elements defined in {str(style)}.')
def miter(dir1: Vec3, dir2: Vec3):
return ((dir1 + dir2) * 0.5).normalize().orthogonal()
ucs = UCS.from_z_axis_and_point_in_xz(
origin=vertices[0],
point=vertices[1],
axis=self.dxf.extrusion,
)
# Transform given vertices into UCS and project them into the
# UCS-xy-plane by setting the z-axis to 0:
vertices = [v.replace(z=0) for v in ucs.points_from_wcs(vertices)]
start_angle = style.dxf.start_angle
end_angle = style.dxf.end_angle
line_directions = [
(v2 - v1).normalize() for v1, v2 in
zip(vertices, vertices[1:])
]
if self.is_closed:
line_directions.append((vertices[0] - vertices[-1]).normalize())
closing_miter = miter(line_directions[0], line_directions[-1])
miter_directions = [closing_miter]
else:
closing_miter = None
line_directions.append(line_directions[-1])
miter_directions = [line_directions[0].rotate_deg(start_angle)]
for d1, d2 in zip(line_directions, line_directions[1:]):
miter_directions.append(miter(d1, d2))
if closing_miter is None:
miter_directions.pop()
miter_directions.append(line_directions[-1].rotate_deg(end_angle))
else:
miter_directions.append(closing_miter)
self.vertices = [
MLineVertex.new(v, d, m)
for v, d, m in zip(vertices, line_directions, miter_directions)
]
self._update_parametrization()
# reverse transformation into WCS
for v in self.vertices:
v.transform(ucs.matrix)
def _update_parametrization(self):
scale = self.dxf.scale_factor
style = self.style
justification = self.dxf.justification
offsets = [e.offset for e in style.elements]
min_offset = min(offsets)
max_offset = max(offsets)
shift = 0
if justification == self.TOP:
shift = -max_offset
elif justification == self.BOTTOM:
shift = -min_offset
for vertex in self.vertices:
angle = vertex.line_direction.angle_between(vertex.miter_direction)
try:
stretch = scale / math.sin(angle)
except ZeroDivisionError:
stretch = 1.0
vertex.line_params = [
((element.offset + shift) * stretch, 0.0) for element in
style.elements
]
vertex.fill_params = [tuple() for _ in style.elements]
def clear(self) -> None:
""" Remove all MLINE vertices. """
self.vertices.clear()
def remove_dependencies(self, other: 'Drawing' = None) -> None:
""" Remove all dependencies from current document.
(internal API)
"""
if not self.is_alive:
return
super().remove_dependencies(other)
self.dxf.style_handle = '0'
if other:
style = other.mline_styles.get(self.dxf.style_name)
if style:
self.dxf.style_handle = style.dxf.handle
return
self.dxf.style_name = 'Standard'
def transform(self, m: 'Matrix44') -> 'DXFGraphic':
""" Transform MLINE entity by transformation matrix `m` inplace.
"""
for vertex in self.vertices:
vertex.transform(m)
self.dxf.extrusion = m.transform_direction(self.dxf.extrusion)
scale = self.dxf.scale_factor
scale_vec = m.transform_direction(Vec3(scale, scale, scale))
if math.isclose(scale_vec.x, scale_vec.y, abs_tol=1e-6) and \
math.isclose(scale_vec.y, scale_vec.z, abs_tol=1e-6):
self.dxf.scale_factor = sum(scale_vec) / 3 # average error
# None uniform scaling will not be applied to the scale_factor!
self.update_geometry()
return self
def virtual_entities(self) -> Iterable[DXFGraphic]:
""" Yields 'virtual' parts of MLINE as LINE, ARC and HATCH entities.
This entities are located at the original positions, but are not stored
in the entity database, have no handle and are not assigned to any
layout.
"""
from ezdxf.render.mline import virtual_entities
return virtual_entities(self)
def explode(self, target_layout: 'BaseLayout' = None) -> 'EntityQuery':
""" Explode parts of MLINE as LINE, ARC and HATCH entities into target
layout, if target layout is ``None``, the target layout is the layout
of the MLINE.
Returns an :class:`~ezdxf.query.EntityQuery` container with all DXF parts.
Args:
target_layout: target layout for DXF parts, ``None`` for same layout
as source entity.
"""
from ezdxf.explode import explode_entity
return explode_entity(self, target_layout)
def audit(self, auditor: 'Auditor') -> None:
""" Validity check. """
def reset_mline_style(name='Standard'):
auditor.fixed_error(
code=AuditError.RESET_MLINE_STYLE,
message=f'Reset MLINESTYLE to "{name}" in {str(self)}.',
dxf_entity=self,
)
self.dxf.style_name = name
style = doc.mline_styles.get(name)
self.dxf.style_handle = style.dxf.handle
super().audit(auditor)
doc = auditor.doc
if doc is None:
return
# Audit associated MLINESTYLE name and handle:
style = doc.entitydb.get(self.dxf.style_handle)
if style is None: # handle is invalid, get style by name
style = doc.mline_styles.get(self.dxf.style_name, None)
if style is None:
reset_mline_style()
else: # fix MLINESTYLE handle:
auditor.fixed_error(
code=AuditError.INVALID_MLINESTYLE_HANDLE,
message=f'Fixed invalid style handle in {str(self)}.',
dxf_entity=self,
)
self.dxf.style_handle = style.dxf.handle
else: # update MLINESTYLE name silently
self.dxf.style_name = style.dxf.name
# Get current (maybe fixed) MLINESTYLE:
style = self.style
# Update style element count silently:
element_count = len(style.elements)
self.dxf.style_element_count = element_count
# Audit vertices:
for vertex in self.vertices:
if NULLVEC.isclose(vertex.line_direction):
break
if NULLVEC.isclose(vertex.miter_direction):
break
if len(vertex.line_params) != element_count:
break
# Ignore fill parameters.
else: # no break
return
# Invalid vertices found:
auditor.fixed_error(
code=AuditError.INVALID_MLINE_VERTEX,
message=f'Execute geometry update for {str(self)}.',
dxf_entity=self,
)
self.update_geometry()
acdb_mline_style = DefSubclass('AcDbMlineStyle', {
'name': DXFAttr(2, default='Standard'),
# Flags (bit-coded):
# 1 =Fill on
# 2 = Display miters
# 16 = Start square end (line) cap
# 32 = Start inner arcs cap
# 64 = Start round (outer arcs) cap
# 256 = End square (line) cap
# 512 = End inner arcs cap
# 1024 = End round (outer arcs) cap
'flags': DXFAttr(70, default=0),
# Style description (string, 255 characters maximum):
'description': DXFAttr(3, default=''),
# Fill color (integer, default = 256):
'fill_color': DXFAttr(
62, default=256,
validator=validator.is_valid_aci_color,
fixer=RETURN_DEFAULT,
),
# Start angle (real, default is 90 degrees):
'start_angle': DXFAttr(51, default=90),
# End angle (real, default is 90 degrees):
'end_angle': DXFAttr(52, default=90),
# 71: Number of elements
# 49: Element offset (real, no default).
# Multiple entries can exist; one entry for each element
# 62: Element color (integer, default = 0).
# Multiple entries can exist; one entry for each element
# 6: Element linetype (string, default = BYLAYER).
# Multiple entries can exist; one entry for each element
})
acdb_mline_style_group_codes = group_code_mapping(acdb_mline_style)
MLineStyleElement = namedtuple('MLineStyleElement', 'offset color linetype')
class MLineStyleElements:
def __init__(self, tags: Tags = None):
self.elements: List[MLineStyleElement] = []
if tags:
for e in self.parse_tags(tags):
data = MLineStyleElement(e.get('offset', 1.), e.get('color', 0),
e.get('linetype', 'BYLAYER'))
self.elements.append(data)
def __len__(self):
return len(self.elements)
def __getitem__(self, item):
return self.elements[item]
def __iter__(self):
return iter(self.elements)
def export_dxf(self, tagwriter: 'TagWriter'):
write_tag = tagwriter.write_tag2
write_tag(71, len(self.elements))
for offset, color, linetype in self.elements:
write_tag(49, offset)
write_tag(62, color)
write_tag(6, linetype)
def append(self, offset: float, color: int = 0,
linetype: str = 'BYLAYER') -> None:
""" Append a new line element.
Args:
offset: normal offset from the reference line: if justification is
``MLINE_ZERO``, positive values are above and negative values
are below the reference line.
color: :ref:`ACI` value
linetype: linetype name
"""
self.elements.append(MLineStyleElement(
float(offset), int(color), str(linetype)))
@staticmethod
def parse_tags(tags: Tags) -> Iterable[Dict]:
collector = None
for code, value in tags:
if code == 49:
if collector is not None:
yield collector
collector = {'offset': value}
elif code == 62:
collector['color'] = value
elif code == 6:
collector['linetype'] = value
if collector is not None:
yield collector
def ordered_indices(self) -> List[int]:
offsets = [e.offset for e in self.elements]
return [offsets.index(value) for value in sorted(offsets)]
@register_entity
class MLineStyle(DXFObject):
DXFTYPE = 'MLINESTYLE'
DXFATTRIBS = DXFAttributes(base_class, acdb_mline_style)
FILL = const.MLINESTYLE_FILL
MITER = const.MLINESTYLE_MITER
START_SQUARE = const.MLINESTYLE_START_SQARE
START_INNER_ARC = const.MLINESTYLE_START_INNER_ARC
START_ROUND = const.MLINESTYLE_START_ROUND
END_SQUARE = const.MLINESTYLE_END_SQUARE
END_INNER_ARC = const.MLINESTYLE_END_INNER_ARC
END_ROUND = const.MLINESTYLE_END_ROUND
def __init__(self):
super().__init__()
self.elements = MLineStyleElements()
def copy(self):
raise const.DXFTypeError('Copying of MLINESTYLE not supported.')
def load_dxf_attribs(
self, processor: SubclassProcessor = None) -> 'DXFNamespace':
dxf = super().load_dxf_attribs(processor)
if processor:
tags = processor.subclass_by_index(1)
try:
# Find index of the count tag:
index71 = tags.tag_index(71)
except const.DXFValueError:
# The count tag does not exist: DXF structure error?
pass
else:
self.elements = MLineStyleElements(tags[index71 + 1:])
# Remove processed tags:
del tags[index71:]
processor.fast_load_dxfattribs(
dxf, acdb_mline_style_group_codes, tags)
return dxf
def export_entity(self, tagwriter: 'TagWriter') -> None:
super().export_entity(tagwriter)
tagwriter.write_tag2(const.SUBCLASS_MARKER, acdb_mline_style.name)
self.dxf.export_dxf_attribs(tagwriter, acdb_mline_style.attribs.keys())
self.elements.export_dxf(tagwriter)
def update_all(self):
""" Update all MLINE entities using this MLINESTYLE.
The update is required if elements were added or removed or the offset
of any element was changed.
"""
if self.doc:
handle = self.dxf.handle
mlines = (
e for e in self.doc.entitydb.values()
if e.dxftype() == 'MLINE'
)
for mline in mlines:
if mline.dxf.style_handle == handle:
mline.update_geometry()
def ordered_indices(self) -> List[int]:
return self.elements.ordered_indices()
def audit(self, auditor: 'Auditor') -> None:
super().audit(auditor)
if len(self.elements) == 0:
auditor.add_error(
code=AuditError.INVALID_MLINESTYLE_ELEMENT_COUNT,
message=f"No line elements defined in {str(self)}.",
dxf_entity=self
)
class MLineStyleCollection(ObjectCollection):
def __init__(self, doc: 'Drawing'):
super().__init__(doc, dict_name='ACAD_MLINESTYLE',
object_type='MLINESTYLE')
self.create_required_entries()
def create_required_entries(self) -> None:
if 'Standard' not in self.object_dict:
entity: MLineStyle = self.new('Standard')
entity.elements.append(.5, 256)
entity.elements.append(-.5, 256)
```
#### File: ezdxf/entities/ucs.py
```python
from typing import TYPE_CHECKING
import logging
from ezdxf.lldxf.attributes import (
DXFAttr, DXFAttributes, DefSubclass, XType, RETURN_DEFAULT,
group_code_mapping,
)
from ezdxf.lldxf.const import DXF12, SUBCLASS_MARKER
from ezdxf.lldxf import validator
from ezdxf.math import UCS, NULLVEC, X_AXIS, Y_AXIS
from ezdxf.entities.dxfentity import base_class, SubclassProcessor, DXFEntity
from ezdxf.entities.layer import acdb_symbol_table_record
from .factory import register_entity
logger = logging.getLogger('ezdxf')
if TYPE_CHECKING:
from ezdxf.eztypes import TagWriter, DXFNamespace
__all__ = ['UCSTable']
acdb_ucs = DefSubclass('AcDbUCSTableRecord', {
'name': DXFAttr(2, validator=validator.is_valid_table_name),
'flags': DXFAttr(70, default=0),
'origin': DXFAttr(10, xtype=XType.point3d, default=NULLVEC),
'xaxis': DXFAttr(
11, xtype=XType.point3d, default=X_AXIS,
validator=validator.is_not_null_vector,
fixer=RETURN_DEFAULT,
),
'yaxis': DXFAttr(
12, xtype=XType.point3d, default=Y_AXIS,
validator=validator.is_not_null_vector,
fixer=RETURN_DEFAULT,
),
})
acdb_ucs_group_codes = group_code_mapping(acdb_ucs)
@register_entity
class UCSTable(DXFEntity):
""" DXF UCS table entity """
DXFTYPE = 'UCS'
DXFATTRIBS = DXFAttributes(base_class, acdb_symbol_table_record, acdb_ucs)
def load_dxf_attribs(
self, processor: SubclassProcessor = None) -> 'DXFNamespace':
dxf = super().load_dxf_attribs(processor)
if processor:
processor.fast_load_dxfattribs(
dxf, acdb_ucs_group_codes, subclass=2)
return dxf
def export_entity(self, tagwriter: 'TagWriter') -> None:
super().export_entity(tagwriter)
if tagwriter.dxfversion > DXF12:
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_symbol_table_record.name)
tagwriter.write_tag2(SUBCLASS_MARKER, acdb_ucs.name)
self.dxf.export_dxf_attribs(tagwriter, [
'name', 'flags', 'origin', 'xaxis', 'yaxis'
])
def ucs(self) -> UCS:
""" Returns an :class:`ezdxf.math.UCS` object for this UCS table entry.
"""
return UCS(
origin=self.dxf.origin,
ux=self.dxf.xaxis,
uy=self.dxf.yaxis,
)
```
#### File: ezdxf/layouts/layouts.py
```python
from typing import TYPE_CHECKING, Dict, Iterable, List, cast, Optional
import logging
from ezdxf.lldxf.const import DXFKeyError, DXFValueError, DXFInternalEzdxfError
from ezdxf.lldxf.const import (
MODEL_SPACE_R2000, PAPER_SPACE_R2000,
TMP_PAPER_SPACE_NAME,
)
from ezdxf.lldxf.validator import is_valid_table_name
from .layout import Layout, Modelspace, Paperspace
from ezdxf.entities import DXFEntity
if TYPE_CHECKING:
from ezdxf.eztypes import Dictionary, Drawing, Auditor
logger = logging.getLogger('ezdxf')
def key(name: str) -> str:
""" AutoCAD uses case insensitive layout names, but stores the name case
sensitive. """
return name.upper()
MODEL = key('Model')
class Layouts:
def __init__(self, doc: 'Drawing'):
""" Default constructor. (internal API) """
self.doc = doc
# Store layout names in normalized form: key(name)
self._layouts: Dict[str, Layout] = {}
# key: layout name as original case sensitive string; value: DXFLayout()
self._dxf_layouts: 'Dictionary' = cast('Dictionary',
self.doc.rootdict['ACAD_LAYOUT'])
@classmethod
def setup(cls, doc: 'Drawing'):
""" Constructor from scratch. (internal API) """
layouts = Layouts(doc)
layouts.setup_modelspace()
layouts.setup_paperspace()
return layouts
def __len__(self) -> int:
""" Returns count of existing layouts, including the modelspace
layout. """
return len(self._layouts)
def __contains__(self, name: str) -> bool:
""" Returns ``True`` if layout `name` exist. """
assert isinstance(name, str), type(str)
return key(name) in self._layouts
def __iter__(self) -> Iterable['Layout']:
""" Returns iterable of all layouts as :class:`~ezdxf.layouts.Layout`
objects, including the modelspace layout.
"""
return iter(self._layouts.values())
def _add_layout(self, name: str, layout: Layout):
layout.dxf.name = name
self._layouts[key(name)] = layout
self._dxf_layouts[name] = layout.dxf_layout
def _discard(self, layout: 'Layout'):
name = layout.name
self._dxf_layouts.discard(name)
del self._layouts[key(name)]
def setup_modelspace(self):
""" Modelspace setup. (internal API) """
self._new_special(Modelspace, 'Model', MODEL_SPACE_R2000,
dxfattribs={'taborder': 0})
def setup_paperspace(self):
""" First layout setup. (internal API) """
self._new_special(Paperspace, 'Layout1', PAPER_SPACE_R2000,
dxfattribs={'taborder': 1})
def _new_special(self, cls, name: str, block_name: str,
dxfattribs: dict) -> 'Layout':
if name in self._layouts:
raise DXFValueError(f'Layout "{name}" already exists')
dxfattribs['owner'] = self._dxf_layouts.dxf.handle
layout = cls.new(name, block_name, self.doc, dxfattribs=dxfattribs)
self._add_layout(name, layout)
return layout
def unique_paperspace_name(self) -> str:
""" Returns a unique paperspace name. (internal API)"""
blocks = self.doc.blocks
count = 0
while "*Paper_Space%d" % count in blocks:
count += 1
return "*Paper_Space%d" % count
def new(self, name: str, dxfattribs: dict = None) -> Paperspace:
""" Returns a new :class:`~ezdxf.layouts.Paperspace` layout.
Args:
name: layout name as shown in tabs in :term:`CAD` applications
dxfattribs: additional DXF attributes for the
:class:`~ezdxf.entities.layout.DXFLayout` entity
Raises:
DXFValueError: Invalid characters in layout name.
DXFValueError: Layout `name` already exist.
"""
assert isinstance(name, str), type(str)
if not is_valid_table_name(name):
raise DXFValueError('Layout name contains invalid characters.')
if name in self:
raise DXFValueError(f'Layout "{name}" already exist.')
dxfattribs = dict(dxfattribs or {}) # copy attribs
dxfattribs['owner'] = self._dxf_layouts.dxf.handle
dxfattribs.setdefault('taborder', len(self._layouts) + 1)
block_name = self.unique_paperspace_name()
layout = Paperspace.new(name, block_name, self.doc,
dxfattribs=dxfattribs)
# Default extents are ok!
# Reset limits to (0, 0) and (paper width, paper height)
layout.reset_limits()
self._add_layout(name, layout)
return layout
@classmethod
def load(cls, doc: 'Drawing') -> 'Layouts':
""" Constructor if loading from file. (internal API) """
layouts = cls(doc)
layouts.setup_from_rootdict()
# DXF R12: block/block_record for *Model_Space and *Paper_Space
# already exist:
if len(layouts) < 2: # restore missing DXF Layouts
layouts.restore('Model', MODEL_SPACE_R2000, taborder=0)
layouts.restore('Layout1', PAPER_SPACE_R2000, taborder=1)
return layouts
def restore(self, name: str, block_record_name: str, taborder: int) -> None:
""" Restore layout from block if DXFLayout does not exist.
(internal API) """
if name in self:
return
block_layout = self.doc.blocks.get(block_record_name)
self._new_from_block_layout(name, block_layout, taborder)
def _new_from_block_layout(self, name, block_layout,
taborder: int) -> 'Layout':
dxfattribs = {
'owner': self._dxf_layouts.dxf.handle,
'name': name,
'block_record_handle': block_layout.block_record_handle,
'taborder': taborder,
}
dxf_layout = cast('DXFLayout', self.doc.objects.new_entity(
'LAYOUT', dxfattribs=dxfattribs))
if key(name) == MODEL:
layout = Modelspace.load(dxf_layout, self.doc)
else:
layout = Paperspace.load(dxf_layout, self.doc)
self._add_layout(name, layout)
return layout
def setup_from_rootdict(self) -> None:
""" Setup layout manger from root dictionary. (internal API) """
for name, dxf_layout in self._dxf_layouts.items():
if key(name) == MODEL:
layout = Modelspace(dxf_layout, self.doc)
else:
layout = Paperspace(dxf_layout, self.doc)
# assert name == layout.dxf.name
self._layouts[key(name)] = layout
def modelspace(self) -> Modelspace:
""" Returns the :class:`~ezdxf.layouts.Modelspace` layout. """
return cast(Modelspace, self.get('Model'))
def names(self) -> List[str]:
""" Returns a list of all layout names, all names in original case
sensitive form. """
return [layout.name for layout in self._layouts.values()]
def get(self, name: Optional[str]) -> 'Layout':
""" Returns :class:`~ezdxf.layouts.Layout` by `name`, case insensitive
"Model" == "MODEL".
Args:
name: layout name as shown in tab, e.g. ``'Model'`` for modelspace
"""
name = name or self.names_in_taborder()[1] # first paperspace layout
return self._layouts[key(name)]
def rename(self, old_name: str, new_name: str) -> None:
""" Rename a layout from `old_name` to `new_name`.
Can not rename layout ``'Model'`` and the new name of a layout must
not exist.
Args:
old_name: actual layout name, case insensitive
new_name: new layout name, case insensitive
Raises:
DXFValueError: try to rename ``'Model'``
DXFValueError: Layout `new_name` already exist.
"""
assert isinstance(old_name, str), type(old_name)
assert isinstance(new_name, str), type(new_name)
if key(old_name) == MODEL:
raise DXFValueError('Can not rename model space.')
if new_name in self:
raise DXFValueError(f'Layout "{new_name}" already exist.')
if old_name not in self:
raise DXFValueError(f'Layout "{old_name}" does not exist.')
layout = self.get(old_name)
self._discard(layout)
layout.rename(new_name)
self._add_layout(new_name, layout)
def names_in_taborder(self) -> List[str]:
""" Returns all layout names in tab order as shown in :term:`CAD`
applications. """
names = [(layout.dxf.taborder, layout.name) for layout in
self._layouts.values()]
return [name for order, name in sorted(names)]
def get_layout_for_entity(self, entity: 'DXFEntity') -> 'Layout':
""" Returns the owner layout for a DXF `entity`. """
owner = entity.dxf.owner
if owner is None:
raise DXFKeyError('No associated layout, owner is None.')
return self.get_layout_by_key(entity.dxf.owner)
def get_layout_by_key(self, layout_key: str) -> 'Layout':
""" Returns a layout by its `layout_key`. (internal API) """
assert isinstance(layout_key, str), type(layout_key)
try:
block_record = self.doc.entitydb[layout_key]
dxf_layout = self.doc.entitydb[block_record.dxf.layout]
except KeyError:
raise DXFKeyError(f'Layout with key "{layout_key}" does not exist.')
return self.get(dxf_layout.dxf.name)
def get_active_layout_key(self):
""" Returns layout kay for the active paperspace layout.
(internal API) """
active_layout_block_record = self.doc.block_records.get(
PAPER_SPACE_R2000)
return active_layout_block_record.dxf.handle
def set_active_layout(self, name: str) -> None:
""" Set layout `name` as active paperspace layout. """
assert isinstance(name, str), type(name)
if key(name) == MODEL: # reserved layout name
raise DXFValueError('Can not set model space as active layout')
# raises KeyError if layout 'name' does not exist
new_active_layout = self.get(name)
old_active_layout_key = self.get_active_layout_key()
if old_active_layout_key == new_active_layout.layout_key:
return # layout 'name' is already the active layout
blocks = self.doc.blocks
new_active_paper_space_name = new_active_layout.block_record_name
blocks.rename_block(PAPER_SPACE_R2000, TMP_PAPER_SPACE_NAME)
blocks.rename_block(new_active_paper_space_name, PAPER_SPACE_R2000)
blocks.rename_block(TMP_PAPER_SPACE_NAME, new_active_paper_space_name)
def delete(self, name: str) -> None:
""" Delete layout `name` and destroy all entities in that layout.
Args:
name (str): layout name as shown in tabs
Raises:
DXFKeyError: if layout `name` do not exists
DXFValueError: deleting modelspace layout is not possible
DXFValueError: deleting last paperspace layout is not possible
"""
assert isinstance(name, str), type(name)
if key(name) == MODEL:
raise DXFValueError("Can not delete modelspace layout.")
layout = self.get(name)
if len(self) < 3:
raise DXFValueError("Can not delete last paperspace layout.")
if layout.layout_key == self.get_active_layout_key():
# Layout `name` is the active layout:
for layout_name in self._layouts:
# Set any other paperspace layout as active layout
if layout_name not in (key(name), MODEL):
self.set_active_layout(layout_name)
break
self._discard(layout)
layout.destroy()
def active_layout(self) -> Paperspace:
""" Returns the active paperspace layout. """
for layout in self:
if layout.is_active_paperspace:
return cast(Paperspace, layout)
raise DXFInternalEzdxfError('No active paperspace layout found.')
def audit(self, auditor: 'Auditor'):
from ezdxf.audit import AuditError
doc = auditor.doc
# Find/remove orphaned LAYOUT objects:
layouts = (o for o in doc.objects if o.dxftype() == 'LAYOUT')
for layout in layouts:
name = layout.dxf.get('name')
if name not in self:
auditor.fixed_error(
code=AuditError.ORPHANED_LAYOUT_ENTITY,
message=f'Removed orphaned {str(layout)} "{name}"'
)
doc.objects.delete_entity(layout)
# Find/remove orphaned paperspace BLOCK_RECORDS named: *Paper_Space...
psp_br_handles = {
br.dxf.handle for br in doc.block_records if
br.dxf.name.lower().startswith('*paper_space')
}
psp_layout_br_handles = {
layout.dxf.block_record_handle for layout in
self._layouts.values() if key(layout.name) != MODEL
}
mismatch = psp_br_handles.difference(psp_layout_br_handles)
if len(mismatch):
for handle in mismatch:
br = doc.entitydb.get(handle)
name = br.dxf.get('name')
auditor.fixed_error(
code=AuditError.ORPHANED_PAPER_SPACE_BLOCK_RECORD_ENTITY,
message=f'Removed orphaned layout {str(br)} "{name}"'
)
if name in doc.blocks:
doc.blocks.delete_block(name)
else:
doc.block_records.remove(name)
```
#### File: ezdxf/math/bspline.py
```python
from typing import (
List, Iterable, Sequence, TYPE_CHECKING, Dict, Tuple, Union,
)
import math
from ezdxf.math import (
Vec3, NULLVEC, Basis, Evaluator, create_t_vector,
estimate_end_tangent_magnitude, estimate_tangents,
LUDecomposition, Matrix, BandedMatrixLU, compact_banded_matrix,
detect_banded_matrix, quadratic_equation,
linspace, distance_point_line_3d, arc_angle_span_deg,
)
from ezdxf.lldxf.const import DXFValueError
from ezdxf import PYPY
if TYPE_CHECKING:
from ezdxf.eztypes import Vertex
from ezdxf.math import (
ConstructionArc, ConstructionEllipse, Matrix44, Bezier4P,
)
# Acceleration of banded diagonal matrix solver kicks in at:
# N=15 for CPython on Windows and Linux
# N=60 for pypy3 on Windows and Linux
USE_BANDED_MATRIX_SOLVER_CPYTHON_LIMIT = 15
USE_BANDED_MATRIX_SOLVER_PYPY_LIMIT = 60
__all__ = [
# High level functions:
'fit_points_to_cad_cv', 'global_bspline_interpolation',
'local_cubic_bspline_interpolation', 'rational_bspline_from_arc',
'rational_bspline_from_ellipse', 'fit_points_to_cubic_bezier',
'open_uniform_bspline', 'closed_uniform_bspline',
# B-spline representation with derivatives support:
'BSpline',
# Low level interpolation function:
'unconstrained_global_bspline_interpolation',
'global_bspline_interpolation_end_tangents',
'global_bspline_interpolation_first_derivatives',
'local_cubic_bspline_interpolation_from_tangents',
# Low level knot parametrization functions:
'knots_from_parametrization', 'averaged_knots_unconstrained',
'averaged_knots_constrained',
'natural_knots_unconstrained', 'natural_knots_constrained', 'double_knots',
# Low level knot function:
'required_knot_values', 'uniform_knot_vector', 'open_uniform_knot_vector',
'required_fit_points', 'required_control_points',
]
def fit_points_to_cad_cv(fit_points: Iterable['Vertex'],
tangents: Iterable['Vertex'] = None,
estimate: str = '5-p') -> 'BSpline':
""" Returns a cubic :class:`BSpline` from fit points as close as possible
to common CAD applications like BricsCAD.
There exist infinite numerical correct solution for this setup, but some
facts are known:
- Global curve interpolation with start- and end derivatives, e.g. 6 fit points
creates 8 control vertices in BricsCAD
- Degree of B-spline is always 3, the stored degree is ignored,
this is only valid for B-splines defined by fit points
- Knot parametrization method is "chord"
- Knot distribution is "natural"
The last missing parameter is the start- and end tangents estimation method
used by BricsCAD, if these tangents are stored in the DXF file provide them
as argument `tangents` as 2-tuple (start, end) and the interpolated control
vertices will match the BricsCAD calculation, except for floating point
imprecision.
If the end tangents are not given, the start- and ent tangent directions
will be estimated. The argument `estimate` lets choose from different
estimation methods (first 3 letters are significant):
- "3-points": 3 point interpolation
- "5-points": 5 point interpolation
- "bezier": tangents from an interpolated cubic bezier curve
- "diff": finite difference
The estimation method "5-p" yields the closest match to the BricsCAD
rendering, but sometimes "bez" creates a better result.
If I figure out how BricsCAD estimates the end tangents directions, the
argument `estimate` gets an additional value for that case. The existing
estimation methods will perform the same way as now, except for bug fixes.
But the default value may change, therefore set argument `estimate` to
specific value to always get the same result in the future.
Args:
fit_points: points the spline is passing through
tangents: start- and end tangent, default is autodetect
estimate: tangent direction estimation method
.. versionchanged:: 0.16
removed unused arguments `degree` and `method`
"""
# See also Spline class in ezdxf/entities/spline.py:
# degree has no effect. A spline with degree=3 is always constructed when
# interpolating a series of fit points.
points = Vec3.list(fit_points)
if len(points) < 2:
raise ValueError("two ore more points required ")
m1, m2 = estimate_end_tangent_magnitude(points, method='chord')
if tangents is None:
tangents = estimate_tangents(points, method=estimate, normalize=False)
start_tangent = tangents[0].normalize(m1)
end_tangent = tangents[-1].normalize(m2)
else:
tangents = Vec3.list(tangents)
start_tangent = Vec3(tangents[0]).normalize(m1)
end_tangent = Vec3(tangents[-1]).normalize(m2)
return global_bspline_interpolation(
points,
degree=3,
tangents=(start_tangent, end_tangent),
method='chord',
)
def fit_points_to_cubic_bezier(fit_points: Iterable['Vertex']) -> 'BSpline':
""" Returns a cubic :class:`BSpline` from fit points **without** end
tangents.
This function uses the cubic Bèzier interpolation to create multiple Bèzier
curves and combine them into a single B-spline, this works for short simple
splines better than the :func:`fit_points_to_cad_cv`, but is worse
for longer and more complex splines.
Args:
fit_points: points the spline is passing through
.. versionadded:: 0.16
"""
points = Vec3.list(fit_points)
if len(points) < 2:
raise ValueError("two ore more points required ")
from ezdxf.math import cubic_bezier_interpolation, bezier_to_bspline
bezier_curves = cubic_bezier_interpolation(points)
return bezier_to_bspline(bezier_curves)
def global_bspline_interpolation(
fit_points: Iterable['Vertex'],
degree: int = 3,
tangents: Iterable['Vertex'] = None,
method: str = 'chord') -> 'BSpline':
""" `B-spline`_ interpolation by the `Global Curve Interpolation`_.
Given are the fit points and the degree of the B-spline.
The function provides 3 methods for generating the parameter vector t:
- "uniform": creates a uniform t vector, from 0 to 1 evenly spaced, see
`uniform`_ method
- "chord", "distance": creates a t vector with values proportional to the
fit point distances, see `chord length`_ method
- "centripetal", "sqrt_chord": creates a t vector with values proportional
to the fit point sqrt(distances), see `centripetal`_ method
- "arc": creates a t vector with values proportional to the arc length
between fit points.
It is possible to constraint the curve by tangents, by start- and end
tangent if only two tangents are given or by one tangent for each fit point.
If tangents are given, they represent 1st derivatives and and should be
scaled if they are unit vectors, if only start- and end tangents given the
function :func:`~ezdxf.math.estimate_end_tangent_magnitude` helps with an
educated guess, if all tangents are given, scaling by chord length is a
reasonable choice (Piegl & Tiller).
Args:
fit_points: fit points of B-spline, as list of :class:`Vec3` compatible
objects
tangents: if only two vectors are given, take the first and the last
vector as start- and end tangent constraints or if for all fit
points a tangent is given use all tangents as interpolation
constraints (optional)
degree: degree of B-spline
method: calculation method for parameter vector t
Returns:
:class:`BSpline`
"""
fit_points = Vec3.list(fit_points)
count = len(fit_points)
order = degree + 1
if tangents:
# two control points for tangents will be added
count += 2
if order > count and tangents is None:
raise ValueError(f'More fit points required for degree {degree}')
t_vector = list(create_t_vector(fit_points, method))
# natural knot generation for uneven degrees else averaged
knot_generation_method = 'natural' if degree % 2 else 'average'
if tangents is not None:
tangents = Vec3.list(tangents)
if len(tangents) == 2:
control_points, knots = global_bspline_interpolation_end_tangents(
fit_points, tangents[0], tangents[1], degree, t_vector,
knot_generation_method)
elif len(tangents) == len(fit_points):
control_points, knots = global_bspline_interpolation_first_derivatives(
fit_points, tangents, degree, t_vector)
else:
raise ValueError(
'Invalid count of tangents, two tangents as start- and end '
'tangent constrains or one tangent for each fit point.'
)
else:
control_points, knots = unconstrained_global_bspline_interpolation(
fit_points, degree, t_vector,
knot_generation_method)
bspline = BSpline(control_points, order=order, knots=knots)
return bspline
def local_cubic_bspline_interpolation(
fit_points: Iterable['Vertex'],
method: str = '5-points',
tangents: Iterable['Vertex'] = None) -> 'BSpline':
""" `B-spline`_ interpolation by 'Local Cubic Curve Interpolation', which
creates B-spline from fit points and estimated tangent direction at start-,
end- and passing points.
Source: Piegl & Tiller: "The NURBS Book" - chapter 9.3.4
Available tangent estimation methods:
- "3-points": 3 point interpolation
- "5-points": 5 point interpolation
- "bezier": cubic bezier curve interpolation
- "diff": finite difference
or pass pre-calculated tangents, which overrides tangent estimation.
Args:
fit_points: all B-spline fit points as :class:`Vec3` compatible objects
method: tangent estimation method
tangents: tangents as :class:`Vec3` compatible objects (optional)
Returns:
:class:`BSpline`
"""
from .parametrize import estimate_tangents
fit_points = Vec3.list(fit_points)
if tangents:
tangents = Vec3.list(tangents)
else:
tangents = estimate_tangents(fit_points, method)
control_points, knots = local_cubic_bspline_interpolation_from_tangents(
fit_points, tangents)
return BSpline(control_points, order=4, knots=knots)
def required_knot_values(count: int, order: int) -> int:
""" Returns the count of required knot values for a B-spline of `order` and
`count` control points.
Args:
count: count of control points, in text-books referred as "n + 1"
order: order of B-Spline, in text-books referred as "k"
Relationship:
"p" is the degree of the B-spline, text-book notation.
- k = p + 1
- 2 ≤ k ≤ n + 1
"""
k = int(order)
n = int(count) - 1
p = k - 1
if not (2 <= k <= (n + 1)):
raise DXFValueError('Invalid count/order combination')
# n + p + 2 = count + order
return n + p + 2
def required_fit_points(order: int, tangents=True) -> int:
""" Returns the count of required fit points to calculate the spline
control points.
Args:
order: spline order (degree + 1)
tangents: start- and end tangent are given or estimated
"""
if tangents:
# If tangents are given or estimated two points for start- and end
# tangent will be added automatically for the global bspline
# interpolation. see function fit_points_to_cad_cv()
order -= 2
# required condition: order > count, see global_bspline_interpolation()
return max(order, 2)
def required_control_points(order: int) -> int:
""" Returns the required count of control points for a valid B-spline.
Args:
order: spline order (degree + 1)
Required condition: 2 <= order <= count, therefore: count >= order
"""
return max(order, 2)
def normalize_knots(knots: Sequence[float]) -> List[float]:
""" Normalize knot vector into range [0, 1]. """
min_val = knots[0]
max_val = knots[-1] - min_val
return [(v - min_val) / max_val for v in knots]
def uniform_knot_vector(count: int, order: int, normalize=False) -> List[float]:
""" Returns an uniform knot vector for a B-spline of `order` and `count`
control points.
`order` = degree + 1
Args:
count: count of control points
order: spline order
normalize: normalize values in range [0, 1] if ``True``
"""
if normalize:
max_value = float(count + order - 1)
else:
max_value = 1.0
return [knot_value / max_value for knot_value in range(count + order)]
def open_uniform_knot_vector(
count: int, order: int, normalize=False) -> List[float]:
""" Returns an open (clamped) uniform knot vector for a B-spline of `order`
and `count` control points.
`order` = degree + 1
Args:
count: count of control points
order: spline order
normalize: normalize values in range [0, 1] if ``True``
"""
k = count - order
if normalize:
max_value = float(count - order + 1)
tail = [1.0] * order
else:
max_value = 1.0
tail = [1.0 + k] * order
knots = [0.0] * order
knots.extend((1.0 + v) / max_value for v in range(k))
knots.extend(tail)
return knots
def knots_from_parametrization(
n: int, p: int, t: Iterable[float], method='average',
constrained=False) -> List[float]:
""" Returns a 'clamped' knot vector for B-splines. All knot values are
normalized in the range [0, 1].
Args:
n: count fit points - 1
p: degree of spline
t: parametrization vector, length(t_vector) == n, normalized [0, 1]
method: "average", "natural"
constrained: ``True`` for B-spline constrained by end derivatives
Returns:
List of n+p+2 knot values as floats
"""
order = int(p + 1)
if order > (n + 1):
raise DXFValueError(
'Invalid n/p combination, more fit points required.')
t = [float(v) for v in t]
if t[0] != 0.0 or t[-1] != 1.0:
raise ValueError('Parametrization vector t has to be normalized.')
if method == 'average':
return averaged_knots_constrained(n, p, t) \
if constrained \
else averaged_knots_unconstrained(n, p, t)
elif method == 'natural':
return natural_knots_constrained(n, p, t) \
if constrained \
else natural_knots_unconstrained(n, p, t)
else:
raise ValueError(f'Unknown knot generation method: {method}')
def averaged_knots_unconstrained(
n: int, p: int, t: Sequence[float]) -> List[float]:
""" Returns an averaged knot vector from parametrization vector `t` for an
unconstrained B-spline.
Args:
n: count of control points - 1
p: degree
t: parametrization vector, normalized [0, 1]
"""
assert t[0] == 0.0
assert t[-1] == 1.0
knots = [0.0] * (p + 1)
knots.extend(sum(t[j: j + p]) / p for j in range(1, n - p + 1))
if knots[-1] > 1.0:
raise ValueError('Normalized [0, 1] values required')
knots.extend([1.0] * (p + 1))
return knots
def averaged_knots_constrained(
n: int, p: int, t: Sequence[float]) -> List[float]:
""" Returns an averaged knot vector from parametrization vector `t` for a
constrained B-spline.
Args:
n: count of control points - 1
p: degree
t: parametrization vector, normalized [0, 1]
"""
assert t[0] == 0.0
assert t[-1] == 1.0
knots = [0.0] * (p + 1)
knots.extend(sum(t[j: j + p - 1]) / p for j in range(n - p))
knots.extend([1.0] * (p + 1))
return knots
def natural_knots_unconstrained(
n: int, p: int, t: Sequence[float]) -> List[float]:
""" Returns a 'natural' knot vector from parametrization vector `t` for an
unconstrained B-spline.
Args:
n: count of control points - 1
p: degree
t: parametrization vector, normalized [0, 1]
"""
assert t[0] == 0.0
assert t[-1] == 1.0
knots = [0.0] * (p + 1)
knots.extend(t[2: n - p + 2])
knots.extend([1.0] * (p + 1))
return knots
def natural_knots_constrained(
n: int, p: int, t: Sequence[float]) -> List[float]:
""" Returns a 'natural' knot vector from parametrization vector `t` for a
constrained B-spline.
Args:
n: count of control points - 1
p: degree
t: parametrization vector, normalized [0, 1]
"""
assert t[0] == 0.0
assert t[-1] == 1.0
knots = [0.0] * (p + 1)
knots.extend(t[1: n - p + 1])
knots.extend([1.0] * (p + 1))
return knots
def double_knots(n: int, p: int, t: Sequence[float]) -> List[float]:
""" Returns a knot vector from parametrization vector `t` for B-spline
constrained by first derivatives at all fit points.
Args:
n: count of fit points - 1
p: degree of spline
t: parametrization vector, first value has to be 0.0 and last value has
to be 1.0
"""
assert t[0] == 0.0
assert t[-1] == 1.0
u = [0.0] * (p + 1)
prev_t = 0.0
u1 = []
for t1 in t[1:-1]:
if p == 2:
# add one knot between prev_t and t
u1.append((prev_t + t1) / 2.0)
u1.append(t1)
else:
if prev_t == 0.0: # first knot
u1.append(t1 / 2)
else:
# add one knot at the 1st third and one knot
# at the 2nd third between prev_t and t.
u1.append((2 * prev_t + t1) / 3.0)
u1.append((prev_t + 2 * t1) / 3.0)
prev_t = t1
u.extend(u1[:n * 2 - p])
u.append((t[-2] + 1.0) / 2.0) # last knot
u.extend([1.0] * (p + 1))
return u
def _get_best_solver(matrix: Union[List, Matrix], degree: int):
""" Returns best suited linear equation solver depending on matrix
configuration and python interpreter.
"""
A = matrix if isinstance(matrix, Matrix) else Matrix(matrix=matrix)
if PYPY:
limit = USE_BANDED_MATRIX_SOLVER_PYPY_LIMIT
else:
limit = USE_BANDED_MATRIX_SOLVER_CPYTHON_LIMIT
if A.nrows < limit: # use default equation solver
lu = LUDecomposition(A)
else:
# Theory: band parameters m1, m2 are at maximum degree-1, for
# B-spline interpolation and approximation:
# m1 = m2 = degree-1
# But the speed gain is not that big and just to be sure:
m1, m2 = detect_banded_matrix(A, check_all=False)
A = compact_banded_matrix(A, m1, m2)
lu = BandedMatrixLU(A, m1, m2)
return lu
def unconstrained_global_bspline_interpolation(
fit_points: Sequence['Vertex'],
degree: int,
t_vector: Sequence[float],
knot_generation_method: str = 'average') -> Tuple[
List[Vec3], List[float]]:
""" Interpolates the control points for a B-spline by global interpolation
from fit points without any constraints.
Source: Piegl & Tiller: "The NURBS Book" - chapter 9.2.1
Args:
fit_points: points the B-spline has to pass
degree: degree of spline >= 2
t_vector: parametrization vector, first value has to be 0 and last
value has to be 1
knot_generation_method: knot generation method from parametrization
vector, "average" or "natural"
Returns:
2-tuple of control points as list of Vec3 objects and the knot vector
as list of floats
"""
# Source: http://pages.mtu.edu/~shene/COURSES/cs3621/NOTES/INT-APP/CURVE-INT-global.html
knots = knots_from_parametrization(len(fit_points) - 1, degree, t_vector,
knot_generation_method,
constrained=False)
N = Basis(knots=knots, order=degree + 1, count=len(fit_points))
solver = _get_best_solver([N.basis_vector(t) for t in t_vector], degree)
control_points = solver.solve_matrix(fit_points)
return Vec3.list(control_points.rows()), knots
def global_bspline_interpolation_end_tangents(
fit_points: List[Vec3],
start_tangent: Vec3,
end_tangent: Vec3,
degree: int,
t_vector: Sequence[float],
knot_generation_method: str = 'average') -> Tuple[
List[Vec3], List[float]]:
""" Interpolates the control points for a B-spline by global interpolation
from fit points and 1st derivatives for start- and end point as constraints.
These 'tangents' are 1st derivatives and not unit vectors, if an estimation
of the magnitudes is required use the :func:`estimate_end_tangent_magnitude`
function.
Source: Piegl & Tiller: "The NURBS Book" - chapter 9.2.2
Args:
fit_points: points the B-spline has to pass
start_tangent: 1st derivative as start constraint
end_tangent: 1st derivative as end constrain
degree: degree of spline >= 2
t_vector: parametrization vector, first value has to be 0 and last
value has to be 1
knot_generation_method: knot generation method from parametrization
vector, "average" or "natural"
Returns:
2-tuple of control points as list of Vec3 objects and the knot vector
as list of floats
"""
n = len(fit_points) - 1
p = degree
if degree > 3:
# todo: 'average' produces weird results for degree > 3, 'natural' is
# better but also not good
knot_generation_method = 'natural'
knots = knots_from_parametrization(n + 2, p, t_vector,
knot_generation_method, constrained=True)
N = Basis(knots=knots, order=p + 1, count=n + 3)
rows = [N.basis_vector(u) for u in t_vector]
spacing = [0.0] * (n + 1)
rows.insert(1, [-1.0, +1.0] + spacing)
rows.insert(-1, spacing + [-1.0, +1.0])
fit_points.insert(1, start_tangent * (knots[p + 1] / p))
fit_points.insert(-1, end_tangent * ((1.0 - knots[-(p + 2)]) / p))
solver = _get_best_solver(rows, degree)
control_points = solver.solve_matrix(fit_points)
return Vec3.list(control_points.rows()), knots
def global_bspline_interpolation_first_derivatives(
fit_points: List[Vec3],
derivatives: List[Vec3],
degree: int,
t_vector: Sequence[float]) -> Tuple[List[Vec3], List[float]]:
""" Interpolates the control points for a B-spline by a global
interpolation from fit points and 1st derivatives as constraints.
Source: Piegl & Tiller: "The NURBS Book" - chapter 9.2.4
Args:
fit_points: points the B-spline has to pass
derivatives: 1st derivatives as constrains, not unit vectors!
Scaling by chord length is a reasonable choice (Piegl & Tiller).
degree: degree of spline >= 2
t_vector: parametrization vector, first value has to be 0 and last
value has to be 1
Returns:
2-tuple of control points as list of Vec3 objects and the knot vector
as list of floats
"""
def nbasis(t: float):
span = N.find_span(t)
front = span - p
back = count + p + 1 - span
for basis in N.basis_funcs_derivatives(span, t, n=1):
yield [0.0] * front + basis + [0.0] * back
p = degree
n = len(fit_points) - 1
knots = double_knots(n, p, t_vector)
count = len(fit_points) * 2
N = Basis(knots=knots, order=p + 1, count=count)
A = [
[1.0] + [0.0] * (count - 1), # Q0
[-1.0, +1.0] + [0.0] * (count - 2), # D0
]
for f in (nbasis(t) for t in t_vector[1:-1]):
A.extend(f) # Qi, Di
# swapped equations!
A.append([0.0] * (count - 2) + [-1.0, +1.0]) # Dn
A.append([0.0] * (count - 1) + [+1.0]) # Qn
# Build right handed matrix B
B = []
for rows in zip(fit_points, derivatives):
B.extend(rows) # Qi, Di
# also swap last rows!
B[-1], B[-2] = B[-2], B[-1] # Dn, Qn
# modify equation for derivatives D0 and Dn
B[1] *= knots[p + 1] / p
B[-2] *= (1.0 - knots[-(p + 2)]) / p
solver = _get_best_solver(A, degree)
control_points = solver.solve_matrix(B)
return Vec3.list(control_points.rows()), knots
def local_cubic_bspline_interpolation_from_tangents(
fit_points: List[Vec3],
tangents: List[Vec3]) -> Tuple[List[Vec3], List[float]]:
""" Interpolates the control points for a cubic B-spline by local
interpolation from fit points and tangents as unit vectors for each fit
point. Use the :func:`estimate_tangents` function to estimate end tangents.
Source: Piegl & Tiller: "The NURBS Book" - chapter 9.3.4
Args:
fit_points: curve definition points - curve has to pass all given fit
points
tangents: one tangent vector for each fit point as unit vectors
Returns:
2-tuple of control points as list of Vec3 objects and the knot vector
as list of floats
"""
assert len(fit_points) == len(tangents)
assert len(fit_points) > 2
degree = 3
order = degree + 1
control_points = [fit_points[0]]
u = 0.0
params = []
for i in range(len(fit_points) - 1):
p0 = fit_points[i]
p3 = fit_points[i + 1]
t0 = tangents[i]
t3 = tangents[i + 1]
a = 16.0 - (t0 + t3).magnitude_square
b = 12.0 * (p3 - p0).dot(t0 + t3)
c = -36.0 * (p3 - p0).magnitude_square
alpha_plus, alpha_minus = quadratic_equation(a, b, c)
p1 = p0 + alpha_plus * t0 / 3.0
p2 = p3 - alpha_plus * t3 / 3.0
control_points.extend((p1, p2))
u += 3.0 * (p1 - p0).magnitude
params.append(u)
control_points.append(fit_points[-1])
knots = [0.0] * order
max_u = params[-1]
for v in params[:-1]:
knot = v / max_u
knots.extend((knot, knot))
knots.extend([1.0] * 4)
assert len(knots) == required_knot_values(len(control_points), order)
return control_points, knots
class BSpline:
""" Representation of a `B-spline`_ curve. The default configuration of
the knot vector is an uniform open `knot`_ vector ("clamped").
Factory functions:
- :func:`fit_points_to_cad_cv`
- :func:`fit_points_to_cubic_bezier`
- :func:`open_uniform_bspline`
- :func:`closed_uniform_bspline`
- :func:`rational_bspline_from_arc`
- :func:`rational_bspline_from_ellipse`
- :func:`global_bspline_interpolation`
- :func:`local_cubic_bspline_interpolation`
Args:
control_points: iterable of control points as :class:`Vec3` compatible
objects
order: spline order (degree + 1)
knots: iterable of knot values
weights: iterable of weight values
"""
__slots__ = ('_control_points', '_basis', '_clamped')
def __init__(self, control_points: Iterable['Vertex'],
order: int = 4,
knots: Iterable[float] = None,
weights: Iterable[float] = None):
self._control_points = Vec3.tuple(control_points)
count = len(self._control_points)
order = int(order)
if order > count:
raise DXFValueError(
f'Invalid need more control points for order {order}')
if knots is None:
knots = open_uniform_knot_vector(count, order, normalize=True)
else:
knots = tuple(knots)
required_knot_count = count + order
if len(knots) != required_knot_count:
raise ValueError(
f"{required_knot_count} knot values required, got {len(knots)}.")
if knots[0] != 0.0:
knots = normalize_knots(knots)
self._basis = Basis(knots, order, count, weights=weights)
self._clamped = not any(knots[:order])
def __str__(self):
return f'BSpline degree={self.degree}, {self.count} ' \
f'control points, {len(self.knots())} knot values, ' \
f'{len(self.weights())} weights'
@property
def control_points(self) -> Sequence[Vec3]:
""" Control points as tuple of :class:`~ezdxf.math.Vec3` """
return self._control_points
@property
def count(self) -> int:
""" Count of control points, (n + 1 in text book notation). """
return len(self._control_points)
@property
def max_t(self) -> float:
""" Biggest `knot`_ value. """
return self._basis.max_t
@property
def order(self) -> int:
""" Order (k) of B-spline = p + 1 """
return self._basis.order
@property
def degree(self) -> int:
""" Degree (p) of B-spline = order - 1 """
return self._basis.degree
@property
def evaluator(self) -> Evaluator:
return Evaluator(self._basis, self._control_points)
@property
def is_rational(self):
""" Returns ``True`` if curve is a rational B-spline. (has weights) """
return self._basis.is_rational
@property
def is_clamped(self):
""" Returns ``True`` if curve is a clamped (open) B-spline. """
return self._clamped
@staticmethod
def from_fit_points(points: Iterable['Vertex'], degree=3,
method='chord') -> 'BSpline':
""" Returns :class:`BSpline` defined by fit points. """
return global_bspline_interpolation(points, degree, method=method)
@staticmethod
def ellipse_approximation(ellipse: 'ConstructionEllipse',
num: int = 16) -> 'BSpline':
""" Returns an ellipse approximation as :class:`BSpline` with `num`
control points.
"""
return global_bspline_interpolation(
ellipse.vertices(ellipse.params(num)), degree=2)
@staticmethod
def arc_approximation(arc: 'ConstructionArc', num: int = 16) -> 'BSpline':
""" Returns an arc approximation as :class:`BSpline` with `num`
control points.
"""
return global_bspline_interpolation(arc.vertices(arc.angles(num)),
degree=2)
@staticmethod
def from_ellipse(ellipse: 'ConstructionEllipse') -> 'BSpline':
""" Returns the ellipse as :class:`BSpline` of 2nd degree with as few
control points as possible.
"""
return rational_bspline_from_ellipse(ellipse, segments=1)
@staticmethod
def from_arc(arc: 'ConstructionArc') -> 'BSpline':
""" Returns the arc as :class:`BSpline` of 2nd degree with as few control
points as possible.
"""
return rational_bspline_from_arc(arc.center, arc.radius, arc.start_angle,
arc.end_angle, segments=1)
@staticmethod
def from_nurbs_python_curve(curve) -> 'BSpline':
""" Interface to the `NURBS-Python <https://pypi.org/project/geomdl/>`_
package.
Returns a :class:`BSpline` object from a :class:`geomdl.BSpline.Curve`
object.
"""
return BSpline(
control_points=curve.ctrlpts,
order=curve.order,
knots=curve.knotvector,
weights=curve.weights,
)
def reverse(self) -> 'BSpline':
""" Returns a new :class:`BSpline` object with reversed control point
order.
"""
def reverse_knots():
for k in reversed(normalize_knots(self.knots())):
yield 1.0 - k
return self.__class__(
control_points=reversed(self.control_points),
order=self.order,
knots=reverse_knots(),
weights=reversed(self.weights()) if self.is_rational else None,
)
def knots(self) -> Tuple[float, ...]:
""" Returns a tuple of `knot`_ values as floats, the knot vector
**always** has order + count values (n + p + 2 in text book notation).
"""
return self._basis.knots
def weights(self) -> Tuple[float, ...]:
""" Returns a tuple of weights values as floats, one for each control
point or an empty tuple.
"""
return self._basis.weights
def approximate(self, segments: int = 20) -> Iterable[Vec3]:
""" Approximates curve by vertices as :class:`Vec3` objects, vertices
count = segments + 1.
"""
return self.evaluator.points(self.params(segments))
def params(self, segments: int) -> Iterable[float]:
""" Yield evenly spaced parameters for given segment count. """
# works for clamped and unclamped curves
knots = self.knots()
lower_bound = knots[self.order - 1]
upper_bound = knots[self.count]
return linspace(lower_bound, upper_bound, segments + 1)
def flattening(self, distance: float,
segments: int = 4) -> Iterable[Vec3]:
""" Adaptive recursive flattening. The argument `segments` is the
minimum count of approximation segments between two knots, if the
distance from the center of the approximation segment to the curve is
bigger than `distance` the segment will be subdivided.
Args:
distance: maximum distance from the projected curve point onto the
segment chord.
segments: minimum segment count between two knots
.. versionadded:: 0.15
"""
def subdiv(s: Vec3, e: Vec3, start_t: float, end_t: float):
mid_t = (start_t + end_t) * 0.5
m = evaluator.point(mid_t)
try:
_dist = distance_point_line_3d(m, s, e)
except ZeroDivisionError: # s == e
_dist = 0
if _dist < distance:
yield e
else:
yield from subdiv(s, m, start_t, mid_t)
yield from subdiv(m, e, mid_t, end_t)
evaluator = self.evaluator
knots = self.knots()
if self.is_clamped:
lower_bound = 0.0
else:
lower_bound = knots[self.order - 1]
knots = knots[:self.count + 1]
knots = list(set(knots)) # set() must preserve order!
t = lower_bound
start_point = evaluator.point(t)
yield start_point
for t1 in knots[1:]:
delta = (t1 - t) / segments
while t < t1:
next_t = t + delta
if math.isclose(next_t, t1):
next_t = t1
end_point = evaluator.point(next_t)
yield from subdiv(start_point, end_point, t, next_t)
t = next_t
start_point = end_point
def point(self, t: float) -> Vec3:
""" Returns point for parameter `t`.
Args:
t: parameter in range [0, max_t]
"""
return self.evaluator.point(t)
def points(self, t: Iterable[float]) -> Iterable[Vec3]:
""" Yields points for parameter vector `t`.
Args:
t: parameters in range [0, max_t]
"""
return self.evaluator.points(t)
def derivative(self, t: float, n: int = 2) -> List[Vec3]:
""" Return point and derivatives up to `n` <= degree for parameter `t`.
e.g. n=1 returns point and 1st derivative.
Args:
t: parameter in range [0, max_t]
n: compute all derivatives up to n <= degree
Returns:
n+1 values as :class:`Vec3` objects
"""
return self.evaluator.derivative(t, n)
def derivatives(self, t: Iterable[float], n: int = 2) -> Iterable[
List[Vec3]]:
""" Yields points and derivatives up to `n` <= degree for parameter
vector `t`.
e.g. n=1 returns point and 1st derivative.
Args:
t: parameters in range [0, max_t]
n: compute all derivatives up to n <= degree
Returns:
List of n+1 values as :class:`Vec3` objects
"""
return self.evaluator.derivatives(t, n)
def insert_knot(self, t: float) -> 'BSpline':
""" Insert an additional knot, without altering the shape of the curve.
Returns a new :class:`BSpline` object.
Args:
t: position of new knot 0 < t < max_t
"""
if self._basis.is_rational:
raise TypeError('Rational B-splines not supported.')
knots = list(self._basis.knots)
cpoints = list(self._control_points)
p = self.degree
def new_point(index: int) -> Vec3:
a = (t - knots[index]) / (knots[index + p] - knots[index])
return cpoints[index - 1] * (1 - a) + cpoints[index] * a
if t <= 0. or t >= self.max_t:
raise DXFValueError('Invalid position t')
k = self._basis.find_span(t)
if k < p:
raise DXFValueError('Invalid position t')
cpoints[k - p + 1:k] = [new_point(i) for i in range(k - p + 1, k + 1)]
knots.insert(k + 1, t) # knot[k] <= t < knot[k+1]
return BSpline(cpoints, self.order, knots)
def knot_refinement(self, u: Iterable[float]) -> 'BSpline':
""" Insert multiple knots, without altering the shape of the curve.
Returns a new :class:`BSpline` object.
Args:
u: vector of new knots t and for each t: 0 < t < max_t
"""
spline = self
for t in u:
spline = spline.insert_knot(t)
return spline
def transform(self, m: 'Matrix44') -> 'BSpline':
""" Returns a new :class:`BSpline` object transformed by a
:class:`Matrix44` transformation matrix.
"""
cpoints = m.transform_vertices(self.control_points)
return BSpline(cpoints, self.order, self.knots(), self.weights())
def to_nurbs_python_curve(self):
""" Returns a :class:`geomdl.BSpline.Curve` object, if the
`NURBS-Python <https://pypi.org/project/geomdl/>`_ package is installed.
"""
if self._basis.is_rational:
from geomdl.NURBS import Curve
else:
from geomdl.BSpline import Curve
curve = Curve()
curve.degree = self.degree
curve.ctrlpts = [v.xyz for v in self.control_points]
curve.knotvector = self.knots()
curve.weights = self.weights()
return curve
def bezier_decomposition(self) -> Iterable[List[Vec3]]:
""" Decompose a non-rational B-spline into multiple Bézier curves.
This is the preferred method to represent the most common non-rational
B-splines of 3rd degree by cubic Bézier curves, which are often supported
by render backends.
Returns:
Yields control points of Bézier curves, each Bézier segment
has degree+1 control points e.g. B-spline of 3rd degree yields
cubic Bézier curves of 4 control points.
"""
# Source: "The NURBS Book": Algorithm A5.6
if self._basis.is_rational:
raise TypeError('Rational B-splines not supported.')
if not self.is_clamped:
raise TypeError('Clamped B-Spline required.')
n = self.count - 1
p = self.degree
knots = self._basis.knots # U
control_points = self._control_points # Pw
alphas = [0.0] * len(knots)
m = n + p + 1
a = p
b = p + 1
bezier_points = list(control_points[0: p + 1]) # Qw
while b < m:
next_bezier_points = [NULLVEC] * (p + 1)
i = b
while b < m and math.isclose(knots[b + 1], knots[b]):
b += 1
mult = b - i + 1
if mult < p:
numer = knots[b] - knots[a]
for j in range(p, mult, -1):
alphas[j - mult - 1] = numer / (knots[a + j] - knots[a])
r = p - mult
for j in range(1, r + 1):
save = r - j
s = mult + j
for k in range(p, s - 1, -1):
alpha = alphas[k - s]
bezier_points[k] = bezier_points[k] * alpha + \
bezier_points[k - 1] * (1.0 - alpha)
if b < m:
next_bezier_points[save] = bezier_points[p]
yield bezier_points
if b < m:
for i in range(p - mult, p + 1):
next_bezier_points[i] = control_points[b - p + i]
a = b
b += 1
bezier_points = next_bezier_points
def cubic_bezier_approximation(
self, level: int = 3, segments: int = None) -> Iterable['Bezier4P']:
""" Approximate arbitrary B-splines (degree != 3 and/or rational) by
multiple segments of cubic Bézier curves. The choice of cubic Bézier
curves is based on the widely support of this curves by many render
backends. For cubic non-rational B-splines, which is maybe the most
common used B-spline, is :meth:`bezier_decomposition` the better choice.
1. approximation by `level`: an educated guess, the first level of
approximation segments is based on the count of control points
and their distribution along the B-spline, every additional level
is a subdivision of the previous level.
E.g. a B-Spline of 8 control points has 7 segments at the first level,
14 at the 2nd level and 28 at the 3rd level, a level >= 3 is recommended.
2. approximation by a given count of evenly distributed approximation
segments.
Args:
level: subdivision level of approximation segments (ignored if
argument `segments` is not ``None``)
segments: absolute count of approximation segments
Returns:
Yields control points of cubic Bézier curves as :class:`Bezier4P`
objects
"""
if segments is None:
points = list(self.points(self.approximation_params(level)))
else:
points = list(self.approximate(segments))
from .bezier_interpolation import cubic_bezier_interpolation
return cubic_bezier_interpolation(points)
def approximation_params(self, level: int = 3) -> List[float]:
""" Returns an educated guess, the first level of approximation
segments is based on the count of control points and their distribution
along the B-spline, every additional level is a subdivision of the
previous level.
E.g. a B-Spline of 8 control points has 7 segments at the first level,
14 at the 2nd level and 28 at the 3rd level.
"""
params = list(create_t_vector(self._control_points, 'chord'))
if self.max_t != 1.0:
max_t = self.max_t
params = [p * max_t for p in params]
for _ in range(level - 1):
params = list(subdivide_params(params))
return params
def subdivide_params(p: List[float]) -> Iterable[float]:
for i in range(len(p) - 1):
yield p[i]
yield (p[i] + p[i + 1]) / 2.0
yield p[-1]
def open_uniform_bspline(control_points: Iterable['Vertex'], order: int = 4,
weights: Iterable[float] = None) -> BSpline:
""" Creates an open uniform (periodic) `B-spline`_ curve (`open curve`_).
This is an unclamped curve, which means the curve passes none of the
control points.
Args:
control_points: iterable of control points as :class:`Vec3` compatible
objects
order: spline order (degree + 1)
weights: iterable of weight values
"""
control_points = Vec3.tuple(control_points)
knots = uniform_knot_vector(len(control_points), order, normalize=False)
return BSpline(control_points, order=order, knots=knots, weights=weights)
def closed_uniform_bspline(control_points: Iterable['Vertex'], order: int = 4,
weights: Iterable[float] = None) -> BSpline:
""" Creates an closed uniform (periodic) `B-spline`_ curve (`open curve`_).
This B-spline does not pass any of the control points.
Args:
control_points: iterable of control points as :class:`Vec3` compatible
objects
order: spline order (degree + 1)
weights: iterable of weight values
"""
control_points = Vec3.list(control_points)
control_points.extend(control_points[:order - 1])
if weights is not None:
weights = list(weights)
weights.extend(weights[:order - 1])
return open_uniform_bspline(control_points, order, weights)
def rational_bspline_from_arc(
center: Vec3 = (0, 0), radius: float = 1, start_angle: float = 0,
end_angle: float = 360,
segments: int = 1) -> BSpline:
""" Returns a rational B-splines for a circular 2D arc.
Args:
center: circle center as :class:`Vec3` compatible object
radius: circle radius
start_angle: start angle in degrees
end_angle: end angle in degrees
segments: count of spline segments, at least one segment for each
quarter (90 deg), default is 1, for as few as needed.
"""
center = Vec3(center)
radius = float(radius)
start_rad = math.radians(start_angle % 360)
end_rad = start_rad + math.radians(
arc_angle_span_deg(start_angle, end_angle)
)
control_points, weights, knots = nurbs_arc_parameters(
start_rad, end_rad, segments)
return BSpline(
control_points=(center + (p * radius) for p in control_points),
weights=weights,
knots=knots,
order=3,
)
PI_2 = math.pi / 2.0
def rational_bspline_from_ellipse(ellipse: 'ConstructionEllipse',
segments: int = 1) -> BSpline:
""" Returns a rational B-splines for an elliptic arc.
Args:
ellipse: ellipse parameters as :class:`~ezdxf.math.ConstructionEllipse`
object
segments: count of spline segments, at least one segment for each
quarter (π/2), default is 1, for as few as needed.
"""
start_angle = ellipse.start_param % math.tau
end_angle = start_angle + ellipse.param_span
def transform_control_points() -> Iterable[Vec3]:
center = Vec3(ellipse.center)
x_axis = ellipse.major_axis
y_axis = ellipse.minor_axis
for p in control_points:
yield center + x_axis * p.x + y_axis * p.y
control_points, weights, knots = nurbs_arc_parameters(start_angle,
end_angle, segments)
return BSpline(
control_points=transform_control_points(),
weights=weights,
knots=knots,
order=3,
)
def nurbs_arc_parameters(start_angle: float, end_angle: float,
segments: int = 1):
""" Returns a rational B-spline parameters for a circular 2D arc with center
at (0, 0) and a radius of 1.
Args:
start_angle: start angle in radians
end_angle: end angle in radians
segments: count of segments, at least one segment for each quarter (π/2)
Returns:
control_points, weights, knots
"""
# Source: https://www.researchgate.net/publication/283497458_ONE_METHOD_FOR_REPRESENTING_AN_ARC_OF_ELLIPSE_BY_A_NURBS_CURVE/citation/download
if segments < 1:
raise ValueError('Invalid argument segments (>= 1).')
delta_angle = end_angle - start_angle
arc_count = max(math.ceil(delta_angle / PI_2), segments)
segment_angle = delta_angle / arc_count
segment_angle_2 = segment_angle / 2
arc_weight = math.cos(segment_angle_2)
# First control point
control_points = [Vec3(math.cos(start_angle), math.sin(start_angle))]
weights = [1.0]
angle = start_angle
d = 1.0 / math.cos(segment_angle / 2.0)
for _ in range(arc_count):
# next control point between points on arc
angle += segment_angle_2
control_points.append(Vec3(math.cos(angle) * d, math.sin(angle) * d))
weights.append(arc_weight)
# next control point on arc
angle += segment_angle_2
control_points.append(Vec3(math.cos(angle), math.sin(angle)))
weights.append(1.0)
# Knot vector calculation for B-spline of order=3
# Clamped B-Spline starts with `order` 0.0 knots and
# ends with `order` 1.0 knots
knots = [0.0, 0.0, 0.0]
step = 1.0 / ((max(len(control_points) + 1, 4) - 4) / 2.0 + 1.0)
g = step
while g < 1.0:
knots.extend((g, g))
g += step
knots.extend(
[1.0] * (required_knot_values(len(control_points), 3) - len(knots)))
return control_points, weights, knots
def bspline_basis(u: float, index: int, degree: int,
knots: Sequence[float]) -> float:
""" B-spline basis_vector function.
Simple recursive implementation for testing and comparison.
Args:
u: curve parameter in range [0, max(knots)]
index: index of control point
degree: degree of B-spline
knots: knots vector
Returns:
float: basis_vector value N_i,p(u)
"""
cache: Dict[Tuple[int, int], float] = {}
u = float(u)
def N(i: int, p: int) -> float:
try:
return cache[(i, p)]
except KeyError:
if p == 0:
retval = 1 if knots[i] <= u < knots[i + 1] else 0.
else:
dominator = (knots[i + p] - knots[i])
f1 = (u - knots[i]) / dominator * N(i,
p - 1) if dominator else 0.
dominator = (knots[i + p + 1] - knots[i + 1])
f2 = (knots[i + p + 1] - u) / dominator * N(i + 1,
p - 1) if dominator else 0.
retval = f1 + f2
cache[(i, p)] = retval
return retval
return N(int(index), int(degree))
def bspline_basis_vector(u: float, count: int, degree: int,
knots: Sequence[float]) -> List[float]:
""" Create basis_vector vector at parameter u.
Used with the bspline_basis() for testing and comparison.
Args:
u: curve parameter in range [0, max(knots)]
count: control point count (n + 1)
degree: degree of B-spline (order = degree + 1)
knots: knot vector
Returns:
List[float]: basis_vector vector, len(basis_vector) == count
"""
assert len(knots) == (count + degree + 1)
basis = [bspline_basis(u, index, degree, knots) for index in
range(count)] # type: List[float]
# pick up last point ??? why is this necessary ???
if math.isclose(u, knots[-1]):
basis[-1] = 1.
return basis
```
#### File: ezdxf/math/_bspline.py
```python
from typing import List, Iterable, Sequence, Tuple
import math
import bisect
# The pure Python implementation can't import from ._ctypes or ezdxf.math!
from ._vector import Vec3, NULLVEC
from .linalg import binomial_coefficient
__all__ = ['Basis', 'Evaluator']
class Basis:
""" Immutable Basis function class. """
__slots__ = ('_knots', '_weights', '_order', '_count')
def __init__(self, knots: Iterable[float], order: int, count: int,
weights: Sequence[float] = None):
self._knots = tuple(knots)
self._weights = tuple(weights or [])
self._order: int = int(order)
self._count: int = int(count)
# validation checks:
len_weights = len(self._weights)
if len_weights != 0 and len_weights != self._count:
raise ValueError('invalid weight count')
if len(self._knots) != self._order + self._count:
raise ValueError('invalid knot count')
@property
def max_t(self) -> float:
return self._knots[-1]
@property
def order(self) -> int:
return self._order
@property
def degree(self) -> int:
return self._order - 1
@property
def knots(self) -> Tuple[float, ...]:
return self._knots
@property
def weights(self) -> Tuple[float, ...]:
return self._weights
@property
def is_rational(self) -> bool:
""" Returns ``True`` if curve is a rational B-spline. (has weights) """
return bool(self._weights)
def basis_vector(self, t: float) -> List[float]:
""" Returns the expanded basis vector. """
span = self.find_span(t)
p = self._order - 1
front = span - p
back = self._count - span - 1
basis = self.basis_funcs(span, t)
return ([0.0] * front) + basis + ([0.0] * back)
def find_span(self, u: float) -> int:
""" Determine the knot span index. """
# Linear search is more reliable than binary search of the Algorithm A2.1
# from The NURBS Book by <NAME>.
knots = self._knots
count = self._count # text book: n+1
if u >= knots[count]: # special case
return count - 1 # n
p = self._order - 1
# common clamped spline:
if knots[p] == 0.0: # use binary search
# This is fast and works most of the time,
# but Test 621 : test_weired_closed_spline()
# goes into an infinity loop, because of
# a weird knot configuration.
return bisect.bisect_right(knots, u, p, count) - 1
else: # use linear search
span = 0
while knots[span] <= u and span < count:
span += 1
return span - 1
def basis_funcs(self, span: int, u: float) -> List[float]:
# Source: The NURBS Book: Algorithm A2.2
order = self._order
knots = self._knots
N = [0.0] * order
left = list(N)
right = list(N)
N[0] = 1.0
for j in range(1, order):
left[j] = u - knots[max(0, span + 1 - j)]
right[j] = knots[span + j] - u
saved = 0.0
for r in range(j):
temp = N[r] / (right[r + 1] + left[j - r])
N[r] = saved + right[r + 1] * temp
saved = left[j - r] * temp
N[j] = saved
if self.is_rational:
return self.span_weighting(N, span)
else:
return N
def span_weighting(self, nbasis: List[float], span: int) -> List[float]:
size = len(nbasis)
weights = self._weights[span - self._order + 1: span + 1]
products = [nb * w for nb, w in zip(nbasis, weights)]
s = sum(products)
return [0.0] * size if s == 0.0 else [p / s for p in products]
def basis_funcs_derivatives(self, span: int, u: float, n: int = 1):
# Source: The NURBS Book: Algorithm A2.3
order = self._order
p = order - 1
n = min(n, p)
knots = self._knots
left = [1.0] * order
right = [1.0] * order
ndu = [[1.0] * order for _ in range(order)]
for j in range(1, order):
left[j] = u - knots[max(0, span + 1 - j)]
right[j] = knots[span + j] - u
saved = 0.0
for r in range(j):
# lower triangle
ndu[j][r] = right[r + 1] + left[j - r]
temp = ndu[r][j - 1] / ndu[j][r]
# upper triangle
ndu[r][j] = saved + (right[r + 1] * temp)
saved = left[j - r] * temp
ndu[j][j] = saved
# load the basis_vector functions
derivatives = [[0.0] * order for _ in range(order)]
for j in range(order):
derivatives[0][j] = ndu[j][p]
# loop over function index
a = [[1.0] * order, [1.0] * order]
for r in range(order):
s1 = 0
s2 = 1
# alternate rows in array a
a[0][0] = 1.0
# loop to compute kth derivative
for k in range(1, n + 1):
d = 0.0
rk = r - k
pk = p - k
if r >= k:
a[s2][0] = a[s1][0] / ndu[pk + 1][rk]
d = a[s2][0] * ndu[rk][pk]
if rk >= -1:
j1 = 1
else:
j1 = -rk
if (r - 1) <= pk:
j2 = k - 1
else:
j2 = p - r
for j in range(j1, j2 + 1):
a[s2][j] = (a[s1][j] - a[s1][j - 1]) / ndu[pk + 1][rk + j]
d += (a[s2][j] * ndu[rk + j][pk])
if r <= pk:
a[s2][k] = -a[s1][k - 1] / ndu[pk + 1][r]
d += (a[s2][k] * ndu[r][pk])
derivatives[k][r] = d
# Switch rows
s1, s2 = s2, s1
# Multiply through by the the correct factors
r = float(p)
for k in range(1, n + 1):
for j in range(order):
derivatives[k][j] *= r
r *= (p - k)
return derivatives[:n + 1]
class Evaluator:
""" B-spline curve point and curve derivative evaluator. """
__slots__ = ['_basis', '_control_points']
def __init__(self, basis: Basis, control_points: Sequence[Vec3]):
self._basis = basis
self._control_points = control_points
def point(self, u: float) -> Vec3:
# Source: The NURBS Book: Algorithm A3.1
basis = self._basis
control_points = self._control_points
if math.isclose(u, basis.max_t):
u = basis.max_t
p = basis.degree
span = basis.find_span(u)
N = basis.basis_funcs(span, u)
return Vec3.sum(
N[i] * control_points[span - p + i] for i in range(p + 1))
def points(self, t: Iterable[float]) -> Iterable[Vec3]:
for u in t:
yield self.point(u)
def derivative(self, u: float, n: int = 1) -> List[Vec3]:
""" Return point and derivatives up to n <= degree for parameter u. """
# Source: The NURBS Book: Algorithm A3.2
basis = self._basis
control_points = self._control_points
if math.isclose(u, basis.max_t):
u = basis.max_t
p = basis.degree
span = basis.find_span(u)
basis_funcs_ders = basis.basis_funcs_derivatives(span, u, n)
if basis.is_rational:
# Homogeneous point representation required:
# (x*w, y*w, z*w, w)
CKw = []
wders = []
weights = basis.weights
for k in range(n + 1):
v = NULLVEC
wder = 0.0
for j in range(p + 1):
index = span - p + j
bas_func_weight = basis_funcs_ders[k][j] * weights[index]
# control_point * weight * bas_func_der = (x*w, y*w, z*w) * bas_func_der
v += control_points[index] * bas_func_weight
wder += bas_func_weight
CKw.append(v)
wders.append(wder)
# Source: The NURBS Book: Algorithm A4.2
CK = []
for k in range(n + 1):
v = CKw[k]
for i in range(1, k + 1):
v -= binomial_coefficient(k, i) * wders[i] * CK[k - i]
CK.append(v / wders[0])
else:
CK = [
Vec3.sum(
basis_funcs_ders[k][j] * control_points[span - p + j]
for j in range(p + 1))
for k in range(n + 1)
]
return CK
def derivatives(
self, t: Iterable[float], n: int = 1) -> Iterable[List[Vec3]]:
for u in t:
yield self.derivative(u, n)
```
#### File: ezdxf/math/_matrix44.py
```python
from typing import Sequence, Iterable, List, Tuple, TYPE_CHECKING
import math
from math import sin, cos, tan
from itertools import chain
# The pure Python implementation can't import from ._ctypes or ezdxf.math!
from ._vector import Vec3, X_AXIS, Y_AXIS, Z_AXIS, NULLVEC
if TYPE_CHECKING:
from ezdxf.eztypes import Vertex
__all__ = ['Matrix44']
# removed array.array because array is optimized for space not speed, and space
# optimization is not needed
def floats(items: Iterable) -> List[float]:
return [float(v) for v in items]
class Matrix44:
""" This is a pure Python implementation for 4x4 `transformation matrices`_ ,
to avoid dependency to big numerical packages like :mod:`numpy`, before binary
wheels, installation of these packages wasn't always easy on Windows.
The utility functions for constructing transformations and transforming
vectors and points assumes that vectors are stored as row vectors, meaning
when multiplied, transformations are applied left to right (e.g. vAB
transforms v by A then by B).
Matrix44 initialization:
- ``Matrix44()`` returns the identity matrix.
- ``Matrix44(values)`` values is an iterable with the 16 components of
the matrix.
- ``Matrix44(row1, row2, row3, row4)`` four rows, each row with four
values.
.. _transformation matrices: https://en.wikipedia.org/wiki/Transformation_matrix
"""
_identity = (
1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0
)
__slots__ = ('_matrix',)
def __init__(self, *args):
"""
Matrix44() is the identity matrix.
Matrix44(values) values is an iterable with the 16 components of the matrix.
Matrix44(row1, row2, row3, row4) four rows, each row with four values.
"""
nargs = len(args)
if nargs == 0:
self._matrix = floats(Matrix44._identity)
elif nargs == 1:
self._matrix = floats(args[0])
elif nargs == 4:
self._matrix = floats(chain(*args))
else:
raise ValueError("Invalid count of arguments (4 row vectors or one "
"list with 16 values).")
if len(self._matrix) != 16:
raise ValueError("Invalid matrix count")
def __repr__(self) -> str:
""" Returns the representation string of the matrix:
``Matrix44((col0, col1, col2, col3), (...), (...), (...))``
"""
def format_row(row):
return "(%s)" % ", ".join(str(value) for value in row)
return "Matrix44(%s)" % \
", ".join(format_row(row) for row in self.rows())
def get_2d_transformation(self) -> Tuple[float, ...]:
""" Returns a the 2D transformation as a row-major matrix in a linear
array (tuple).
A more correct transformation could be implemented like so:
https://stackoverflow.com/questions/10629737/convert-3d-4x4-rotation-matrix-into-2d
"""
m = self._matrix
return m[0], m[1], 0.0, m[4], m[5], 0.0, m[12], m[13], 1.0
def get_row(self, row: int) -> Tuple[float, ...]:
""" Get row as list of of four float values.
Args:
row: row index [0 .. 3]
"""
if 0 <= row < 4:
index = row * 4
return tuple(self._matrix[index:index + 4])
else:
raise IndexError(f'invalid row index: {row}')
def set_row(self, row: int, values: Sequence[float]) -> None:
"""
Sets the values in a row.
Args:
row: row index [0 .. 3]
values: iterable of four row values
"""
if 0 <= row < 4:
index = row * 4
self._matrix[index:index + len(values)] = floats(values)
else:
raise IndexError(f'invalid row index: {row}')
def get_col(self, col: int) -> Tuple[float, ...]:
"""
Returns a column as a tuple of four floats.
Args:
col: column index [0 .. 3]
"""
if 0 <= col < 4:
m = self._matrix
return m[col], m[col + 4], m[col + 8], m[col + 12]
else:
raise IndexError(f'invalid row index: {col}')
def set_col(self, col: int, values: Sequence[float]):
"""
Sets the values in a column.
Args:
col: column index [0 .. 3]
values: iterable of four column values
"""
if 0 <= col < 4:
m = self._matrix
a, b, c, d = values
m[col] = float(a)
m[col + 4] = float(b)
m[col + 8] = float(c)
m[col + 12] = float(d)
else:
raise IndexError(f'invalid row index: {col}')
def copy(self) -> 'Matrix44':
""" Returns a copy of same type. """
return self.__class__(self._matrix)
__copy__ = copy
@property
def origin(self) -> Vec3:
m = self._matrix
return Vec3(m[12], m[13], m[14])
@origin.setter
def origin(self, v: 'Vertex') -> None:
m = self._matrix
m[12], m[13], m[14] = Vec3(v)
@property
def ux(self) -> Vec3:
return Vec3(self._matrix[0:3])
@property
def uy(self) -> Vec3:
return Vec3(self._matrix[4:7])
@property
def uz(self) -> Vec3:
return Vec3(self._matrix[8:11])
@property
def is_cartesian(self) -> bool:
""" Returns ``True`` if target coordinate system is a right handed
orthogonal coordinate system.
"""
return self.uy.cross(self.uz).normalize().isclose(self.ux.normalize())
@property
def is_orthogonal(self) -> bool:
""" Returns ``True`` if target coordinate system has orthogonal axis.
Does not check for left- or right handed orientation, any orientation
of the axis valid.
"""
ux = self.ux.normalize()
uy = self.uy.normalize()
uz = self.uz.normalize()
return math.isclose(ux.dot(uy), 0.0, abs_tol=1e-9) and \
math.isclose(ux.dot(uz), 0.0, abs_tol=1e-9) and \
math.isclose(uy.dot(uz), 0.0, abs_tol=1e-9)
@classmethod
def scale(cls, sx: float, sy: float = None, sz: float = None) -> 'Matrix44':
""" Returns a scaling transformation matrix. If `sy` is ``None``,
`sy` = `sx`, and if `sz` is ``None`` `sz` = `sx`.
"""
if sy is None:
sy = sx
if sz is None:
sz = sx
m = cls([
float(sx), 0., 0., 0.,
0., float(sy), 0., 0.,
0., 0., float(sz), 0.,
0., 0., 0., 1.
])
return m
@classmethod
def translate(cls, dx: float, dy: float, dz: float) -> 'Matrix44':
""" Returns a translation matrix for translation vector (dx, dy, dz).
"""
return cls([
1., 0., 0., 0.,
0., 1., 0., 0.,
0., 0., 1., 0.,
float(dx), float(dy), float(dz), 1.
])
@classmethod
def x_rotate(cls, angle: float) -> 'Matrix44':
""" Returns a rotation matrix about the x-axis.
Args:
angle: rotation angle in radians
"""
cos_a = cos(angle)
sin_a = sin(angle)
return cls([
1., 0., 0., 0.,
0., cos_a, sin_a, 0.,
0., -sin_a, cos_a, 0.,
0., 0., 0., 1.
])
@classmethod
def y_rotate(cls, angle: float) -> 'Matrix44':
""" Returns a rotation matrix about the y-axis.
Args:
angle: rotation angle in radians
"""
cos_a = cos(angle)
sin_a = sin(angle)
return cls([
cos_a, 0., -sin_a, 0.,
0., 1., 0., 0.,
sin_a, 0., cos_a, 0.,
0., 0., 0., 1.
])
@classmethod
def z_rotate(cls, angle: float) -> 'Matrix44':
""" Returns a rotation matrix about the z-axis.
Args:
angle: rotation angle in radians
"""
cos_a = cos(angle)
sin_a = sin(angle)
return cls([
cos_a, sin_a, 0., 0.,
-sin_a, cos_a, 0., 0.,
0., 0., 1., 0.,
0., 0., 0., 1.
])
@classmethod
def axis_rotate(cls, axis: 'Vertex', angle: float) -> 'Matrix44':
""" Returns a rotation matrix about an arbitrary `axis`.
Args:
axis: rotation axis as ``(x, y, z)`` tuple or :class:`Vec3` object
angle: rotation angle in radians
"""
c = cos(angle)
s = sin(angle)
omc = 1. - c
x, y, z = Vec3(axis).normalize()
return cls([
x * x * omc + c, y * x * omc + z * s, x * z * omc - y * s, 0.,
x * y * omc - z * s, y * y * omc + c, y * z * omc + x * s, 0.,
x * z * omc + y * s, y * z * omc - x * s, z * z * omc + c, 0.,
0., 0., 0., 1.
])
@classmethod
def xyz_rotate(cls, angle_x: float, angle_y: float,
angle_z: float) -> 'Matrix44':
"""
Returns a rotation matrix for rotation about each axis.
Args:
angle_x: rotation angle about x-axis in radians
angle_y: rotation angle about y-axis in radians
angle_z: rotation angle about z-axis in radians
"""
cx = cos(angle_x)
sx = sin(angle_x)
cy = cos(angle_y)
sy = sin(angle_y)
cz = cos(angle_z)
sz = sin(angle_z)
sxsy = sx * sy
cxsy = cx * sy
return cls([
cy * cz, sxsy * cz + cx * sz, -cxsy * cz + sx * sz, 0.,
-cy * sz, -sxsy * sz + cx * cz, cxsy * sz + sx * cz, 0.,
sy, -sx * cy, cx * cy, 0.,
0., 0., 0., 1.])
@classmethod
def shear_xy(cls, angle_x: float = 0, angle_y: float = 0) -> 'Matrix44':
""" Returns a translation matrix for shear mapping (visually similar
to slanting) in the xy-plane.
Args:
angle_x: slanting angle in x direction in radians
angle_y: slanting angle in y direction in radians
"""
tx = math.tan(angle_x)
ty = math.tan(angle_y)
return cls([
1., ty, 0., 0.,
tx, 1., 0., 0.,
0., 0., 1., 0.,
0., 0., 0., 1.
])
@classmethod
def perspective_projection(cls, left: float, right: float, top: float,
bottom: float, near: float,
far: float) -> 'Matrix44':
""" Returns a matrix for a 2D projection.
Args:
left: Coordinate of left of screen
right: Coordinate of right of screen
top: Coordinate of the top of the screen
bottom: Coordinate of the bottom of the screen
near: Coordinate of the near clipping plane
far: Coordinate of the far clipping plane
"""
return cls([
(2. * near) / (right - left), 0., 0., 0.,
0., (2. * near) / (top - bottom), 0., 0.,
(right + left) / (right - left), (top + bottom) / (top - bottom),
-((far + near) / (far - near)), -1.,
0., 0., -((2. * far * near) / (far - near)), 0.
])
@classmethod
def perspective_projection_fov(cls, fov: float, aspect: float, near: float,
far: float) -> 'Matrix44':
""" Returns a matrix for a 2D projection.
Args:
fov: The field of view (in radians)
aspect: The aspect ratio of the screen (width / height)
near: Coordinate of the near clipping plane
far: Coordinate of the far clipping plane
"""
vrange = near * tan(fov / 2.)
left = -vrange * aspect
right = vrange * aspect
bottom = -vrange
top = vrange
return cls.perspective_projection(left, right, bottom, top, near, far)
@staticmethod
def chain(*matrices: 'Matrix44') -> 'Matrix44':
""" Compose a transformation matrix from one or more `matrices`. """
transformation = Matrix44()
for matrix in matrices:
transformation *= matrix
return transformation
@staticmethod
def ucs(ux=X_AXIS, uy=Y_AXIS, uz=Z_AXIS, origin=NULLVEC) -> 'Matrix44':
""" Returns a matrix for coordinate transformation from WCS to UCS.
For transformation from UCS to WCS, transpose the returned matrix.
Args:
ux: x-axis for UCS as unit vector
uy: y-axis for UCS as unit vector
uz: z-axis for UCS as unit vector
origin: UCS origin as location vector
"""
ux_x, ux_y, ux_z = ux
uy_x, uy_y, uy_z = uy
uz_x, uz_y, uz_z = uz
or_x, or_y, or_z = origin
return Matrix44((
ux_x, ux_y, ux_z, 0,
uy_x, uy_y, uy_z, 0,
uz_x, uz_y, uz_z, 0,
or_x, or_y, or_z, 1,
))
def __setitem__(self, index: Tuple[int, int], value: float):
""" Set (row, column) element. """
row, col = index
if 0 <= row < 4 and 0 <= col < 4:
self._matrix[row * 4 + col] = float(value)
else:
raise IndexError(f'index out of range: {index}')
def __getitem__(self, index: Tuple[int, int]):
""" Get (row, column) element. """
row, col = index
if 0 <= row < 4 and 0 <= col < 4:
return self._matrix[row * 4 + col]
else:
raise IndexError(f'index out of range: {index}')
def __iter__(self) -> Iterable[float]:
""" Iterates over all matrix values. """
return iter(self._matrix)
def __mul__(self, other: 'Matrix44') -> 'Matrix44':
""" Returns a new matrix as result of the matrix multiplication with
another matrix.
"""
res_matrix = self.copy()
res_matrix.__imul__(other)
return res_matrix
# __matmul__ = __mul__ does not work!
def __matmul__(self, other: 'Matrix44') -> 'Matrix44':
""" Returns a new matrix as result of the matrix multiplication with
another matrix.
"""
res_matrix = self.copy()
res_matrix.__imul__(other)
return res_matrix
def __imul__(self, other: 'Matrix44') -> 'Matrix44':
""" Inplace multiplication with another matrix. """
m1 = self._matrix
m2 = other._matrix
self._matrix = [
m1[0] * m2[0] + m1[1] * m2[4] + m1[2] * m2[8] + m1[3] * m2[12],
m1[0] * m2[1] + m1[1] * m2[5] + m1[2] * m2[9] + m1[3] * m2[13],
m1[0] * m2[2] + m1[1] * m2[6] + m1[2] * m2[10] + m1[3] * m2[14],
m1[0] * m2[3] + m1[1] * m2[7] + m1[2] * m2[11] + m1[3] * m2[15],
m1[4] * m2[0] + m1[5] * m2[4] + m1[6] * m2[8] + m1[7] * m2[12],
m1[4] * m2[1] + m1[5] * m2[5] + m1[6] * m2[9] + m1[7] * m2[13],
m1[4] * m2[2] + m1[5] * m2[6] + m1[6] * m2[10] + m1[7] * m2[14],
m1[4] * m2[3] + m1[5] * m2[7] + m1[6] * m2[11] + m1[7] * m2[15],
m1[8] * m2[0] + m1[9] * m2[4] + m1[10] * m2[8] + m1[11] * m2[12],
m1[8] * m2[1] + m1[9] * m2[5] + m1[10] * m2[9] + m1[11] * m2[13],
m1[8] * m2[2] + m1[9] * m2[6] + m1[10] * m2[10] + m1[11] * m2[14],
m1[8] * m2[3] + m1[9] * m2[7] + m1[10] * m2[11] + m1[11] * m2[15],
m1[12] * m2[0] + m1[13] * m2[4] + m1[14] * m2[8] + m1[15] * m2[12],
m1[12] * m2[1] + m1[13] * m2[5] + m1[14] * m2[9] + m1[15] * m2[13],
m1[12] * m2[2] + m1[13] * m2[6] + m1[14] * m2[10] + m1[15] * m2[14],
m1[12] * m2[3] + m1[13] * m2[7] + m1[14] * m2[11] + m1[15] * m2[15]
]
return self
def rows(self) -> Iterable[Tuple[float, ...]]:
""" Iterate over rows as 4-tuples. """
return (self.get_row(index) for index in (0, 1, 2, 3))
def columns(self) -> Iterable[Tuple[float, ...]]:
""" Iterate over columns as 4-tuples. """
return (self.get_col(index) for index in (0, 1, 2, 3))
def transform(self, vector: 'Vertex') -> Vec3:
""" Returns a transformed vertex. """
m = self._matrix
x, y, z = vector
return Vec3(x * m[0] + y * m[4] + z * m[8] + m[12],
x * m[1] + y * m[5] + z * m[9] + m[13],
x * m[2] + y * m[6] + z * m[10] + m[14])
def transform_direction(self, vector: 'Vertex', normalize=False) -> Vec3:
""" Returns a transformed direction vector without translation. """
m = self._matrix
x, y, z = vector
v = Vec3(x * m[0] + y * m[4] + z * m[8],
x * m[1] + y * m[5] + z * m[9],
x * m[2] + y * m[6] + z * m[10])
return v.normalize() if normalize else v
ocs_to_wcs = transform_direction
def transform_vertices(self, vectors: Iterable['Vertex']) -> Iterable[Vec3]:
""" Returns an iterable of transformed vertices. """
m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, m15 = self._matrix
for vector in vectors:
x, y, z = vector
yield Vec3(
x * m0 + y * m4 + z * m8 + m12,
x * m1 + y * m5 + z * m9 + m13,
x * m2 + y * m6 + z * m10 + m14
)
def transform_directions(self, vectors: Iterable['Vertex'],
normalize=False) -> Iterable[Vec3]:
""" Returns an iterable of transformed direction vectors without
translation.
"""
m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, *_ = self._matrix
for vector in vectors:
x, y, z = vector
v = Vec3(
x * m0 + y * m4 + z * m8,
x * m1 + y * m5 + z * m9,
x * m2 + y * m6 + z * m10
)
yield v.normalize() if normalize else v
def ucs_vertex_from_wcs(self, wcs: Vec3) -> Vec3:
""" Returns an UCS vector from WCS vertex.
Works only if matrix is used as cartesian UCS without scaling.
(internal API)
"""
return self.ucs_direction_from_wcs(wcs - self.origin)
def ucs_direction_from_wcs(self, wcs: Vec3) -> Vec3:
""" Returns UCS direction vector from WCS direction.
Works only if matrix is used as cartesian UCS without scaling.
(internal API)
"""
m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, *_ = self._matrix
x, y, z = wcs
return Vec3(
x * m0 + y * m1 + z * m2,
x * m4 + y * m5 + z * m6,
x * m8 + y * m9 + z * m10,
)
ocs_from_wcs = ucs_direction_from_wcs
def transpose(self) -> None:
""" Swaps the rows for columns inplace. """
m00, m01, m02, m03, \
m10, m11, m12, m13, \
m20, m21, m22, m23, \
m30, m31, m32, m33 = self._matrix
self._matrix = [
m00, m10, m20, m30,
m01, m11, m21, m31,
m02, m12, m22, m32,
m03, m13, m23, m33
]
def determinant(self) -> float:
""" Returns determinant. """
m00, m01, m02, m03, \
m10, m11, m12, m13, \
m20, m21, m22, m23, \
m30, m31, m32, m33 = self._matrix
return m00 * m11 * m22 * m33 - m00 * m11 * m23 * m32 + \
m00 * m12 * m23 * m31 - m00 * m12 * m21 * m33 + \
m00 * m13 * m21 * m32 - m00 * m13 * m22 * m31 - \
m01 * m12 * m23 * m30 + m01 * m12 * m20 * m33 - \
m01 * m13 * m20 * m32 + m01 * m13 * m22 * m30 - \
m01 * m10 * m22 * m33 + m01 * m10 * m23 * m32 + \
m02 * m13 * m20 * m31 - m02 * m13 * m21 * m30 + \
m02 * m10 * m21 * m33 - m02 * m10 * m23 * m31 + \
m02 * m11 * m23 * m30 - m02 * m11 * m20 * m33 - \
m03 * m10 * m21 * m32 + m03 * m10 * m22 * m31 - \
m03 * m11 * m22 * m30 + m03 * m11 * m20 * m32 - \
m03 * m12 * m20 * m31 + m03 * m12 * m21 * m30
def inverse(self) -> None:
""" Calculates the inverse of the matrix.
Raises:
ZeroDivisionError: if matrix has no inverse.
"""
det = self.determinant()
f = 1. / det # catch ZeroDivisionError by caller
m00, m01, m02, m03, \
m10, m11, m12, m13, \
m20, m21, m22, m23, \
m30, m31, m32, m33 = self._matrix
self._matrix = [
(
m12 * m23 * m31 - m13 * m22 * m31 + m13 * m21 * m32 -
m11 * m23 * m32 - m12 * m21 * m33 + m11 * m22 * m33) * f,
(
m03 * m22 * m31 - m02 * m23 * m31 - m03 * m21 * m32 +
m01 * m23 * m32 + m02 * m21 * m33 - m01 * m22 * m33) * f,
(
m02 * m13 * m31 - m03 * m12 * m31 + m03 * m11 * m32 -
m01 * m13 * m32 - m02 * m11 * m33 + m01 * m12 * m33) * f,
(
m03 * m12 * m21 - m02 * m13 * m21 - m03 * m11 * m22 +
m01 * m13 * m22 + m02 * m11 * m23 - m01 * m12 * m23) * f,
(
m13 * m22 * m30 - m12 * m23 * m30 - m13 * m20 * m32 +
m10 * m23 * m32 + m12 * m20 * m33 - m10 * m22 * m33) * f,
(
m02 * m23 * m30 - m03 * m22 * m30 + m03 * m20 * m32 -
m00 * m23 * m32 - m02 * m20 * m33 + m00 * m22 * m33) * f,
(
m03 * m12 * m30 - m02 * m13 * m30 - m03 * m10 * m32 +
m00 * m13 * m32 + m02 * m10 * m33 - m00 * m12 * m33) * f,
(
m02 * m13 * m20 - m03 * m12 * m20 + m03 * m10 * m22 -
m00 * m13 * m22 - m02 * m10 * m23 + m00 * m12 * m23) * f,
(
m11 * m23 * m30 - m13 * m21 * m30 + m13 * m20 * m31 -
m10 * m23 * m31 - m11 * m20 * m33 + m10 * m21 * m33) * f,
(
m03 * m21 * m30 - m01 * m23 * m30 - m03 * m20 * m31 +
m00 * m23 * m31 + m01 * m20 * m33 - m00 * m21 * m33) * f,
(
m01 * m13 * m30 - m03 * m11 * m30 + m03 * m10 * m31 -
m00 * m13 * m31 - m01 * m10 * m33 + m00 * m11 * m33) * f,
(
m03 * m11 * m20 - m01 * m13 * m20 - m03 * m10 * m21 +
m00 * m13 * m21 + m01 * m10 * m23 - m00 * m11 * m23) * f,
(
m12 * m21 * m30 - m11 * m22 * m30 - m12 * m20 * m31 +
m10 * m22 * m31 + m11 * m20 * m32 - m10 * m21 * m32) * f,
(
m01 * m22 * m30 - m02 * m21 * m30 + m02 * m20 * m31 -
m00 * m22 * m31 - m01 * m20 * m32 + m00 * m21 * m32) * f,
(
m02 * m11 * m30 - m01 * m12 * m30 - m02 * m10 * m31 +
m00 * m12 * m31 + m01 * m10 * m32 - m00 * m11 * m32) * f,
(
m01 * m12 * m20 - m02 * m11 * m20 + m02 * m10 * m21 -
m00 * m12 * m21 - m01 * m10 * m22 + m00 * m11 * m22) * f,
]
```
#### File: ezdxf/math/ucs.py
```python
from typing import TYPE_CHECKING, Tuple, Sequence, Iterable
from ezdxf.math import Vec3, X_AXIS, Y_AXIS, Z_AXIS, Matrix44
if TYPE_CHECKING:
from ezdxf.eztypes import Vertex, BaseLayout
__all__ = ["OCS", "UCS", "PassTroughUCS"]
def render_axis(layout: 'BaseLayout',
start: 'Vertex',
points: Sequence['Vertex'],
colors: Tuple[int, int, int] = (1, 3, 5)) -> None:
for point, color in zip(points, colors):
layout.add_line(start, point, dxfattribs={'color': color})
class OCS:
"""
Establish an :ref:`OCS` for a given extrusion vector.
Args:
extrusion: extrusion vector.
"""
def __init__(self, extrusion: 'Vertex' = Z_AXIS):
Az = Vec3(extrusion).normalize()
self.transform = not Az.isclose(Z_AXIS)
if self.transform:
if (abs(Az.x) < 1 / 64.) and (abs(Az.y) < 1 / 64.):
Ax = Y_AXIS.cross(Az)
else:
Ax = Z_AXIS.cross(Az)
Ax = Ax.normalize()
Ay = Az.cross(Ax).normalize()
self.matrix = Matrix44.ucs(Ax, Ay, Az)
@property
def ux(self) -> Vec3:
""" x-axis unit vector """
return self.matrix.ux if self.transform else X_AXIS
@property
def uy(self) -> Vec3:
""" y-axis unit vector """
return self.matrix.uy if self.transform else Y_AXIS
@property
def uz(self) -> Vec3:
""" z-axis unit vector """
return self.matrix.uz if self.transform else Z_AXIS
def from_wcs(self, point: 'Vertex') -> 'Vertex':
""" Returns OCS vector for WCS `point`. """
if self.transform:
return self.matrix.ocs_from_wcs(point)
else:
return point
def points_from_wcs(self, points: Iterable['Vertex']) -> Iterable['Vertex']:
""" Returns iterable of OCS vectors from WCS `points`. """
if self.transform:
from_wcs = self.matrix.ocs_from_wcs
for point in points:
yield from_wcs(point)
else:
yield from points
def to_wcs(self, point: 'Vertex') -> 'Vertex':
""" Returns WCS vector for OCS `point`. """
if self.transform:
return self.matrix.ocs_to_wcs(point)
else:
return point
def points_to_wcs(self, points: Iterable['Vertex']) -> Iterable['Vertex']:
""" Returns iterable of WCS vectors for OCS `points`. """
if self.transform:
to_wcs = self.matrix.ocs_to_wcs
for point in points:
yield to_wcs(point)
else:
yield from points
def render_axis(self, layout: 'BaseLayout', length: float = 1, colors: Tuple[int, int, int] = (1, 3, 5)):
""" Render axis as 3D lines into a `layout`. """
render_axis(
layout,
start=(0, 0, 0),
points=(
self.to_wcs(X_AXIS * length),
self.to_wcs(Y_AXIS * length),
self.to_wcs(Z_AXIS * length),
),
colors=colors,
)
class UCS:
"""
Establish an user coordinate system (:ref:`UCS`). The UCS is defined by the origin and two unit vectors for the x-,
y- or z-axis, all axis in :ref:`WCS`. The missing axis is the cross product of the given axis.
If x- and y-axis are ``None``: ux = ``(1, 0, 0)``, uy = ``(0, 1, 0)``, uz = ``(0, 0, 1)``.
Unit vectors don't have to be normalized, normalization is done at initialization, this is also the reason why
scaling gets lost by copying or rotating.
Args:
origin: defines the UCS origin in world coordinates
ux: defines the UCS x-axis as vector in :ref:`WCS`
uy: defines the UCS y-axis as vector in :ref:`WCS`
uz: defines the UCS z-axis as vector in :ref:`WCS`
"""
def __init__(self, origin: 'Vertex' = (0, 0, 0), ux: 'Vertex' = None, uy: 'Vertex' = None, uz: 'Vertex' = None):
if ux is None and uy is None:
ux = X_AXIS
uy = Y_AXIS
uz = Z_AXIS
elif ux is None:
uy = Vec3(uy).normalize()
uz = Vec3(uz).normalize()
ux = Vec3(uy).cross(uz).normalize()
elif uy is None:
ux = Vec3(ux).normalize()
uz = Vec3(uz).normalize()
uy = Vec3(uz).cross(ux).normalize()
elif uz is None:
ux = Vec3(ux).normalize()
uy = Vec3(uy).normalize()
uz = Vec3(ux).cross(uy).normalize()
else: # all axis are given
ux = Vec3(ux).normalize()
uy = Vec3(uy).normalize()
uz = Vec3(uz).normalize()
self.matrix: Matrix44 = Matrix44.ucs(ux, uy, uz, origin)
@property
def ux(self) -> Vec3:
""" x-axis unit vector """
return self.matrix.ux
@property
def uy(self) -> Vec3:
""" y-axis unit vector """
return self.matrix.uy
@property
def uz(self) -> Vec3:
""" z-axis unit vector """
return self.matrix.uz
@property
def origin(self) -> Vec3:
""" Returns the origin """
return self.matrix.origin
@origin.setter
def origin(self, v: 'Vertex') -> None:
""" Set origin. """
self.matrix.origin = v
def copy(self) -> 'UCS':
""" Returns a copy of this UCS. """
return UCS(self.origin, self.ux, self.uy, self.uz)
def to_wcs(self, point: 'Vec3') -> 'Vec3':
""" Returns WCS point for UCS `point`. """
return self.matrix.transform(point)
def points_to_wcs(self, points: Iterable['Vec3']) -> Iterable['Vec3']:
""" Returns iterable of WCS vectors for UCS `points`. """
return self.matrix.transform_vertices(points)
def direction_to_wcs(self, vector: 'Vec3') -> 'Vec3':
""" Returns WCS direction for UCS `vector` without origin adjustment. """
return self.matrix.transform_direction(vector)
def from_wcs(self, point: 'Vec3') -> 'Vec3':
""" Returns UCS point for WCS `point`. """
return self.matrix.ucs_vertex_from_wcs(point)
def points_from_wcs(self, points: Iterable['Vec3']) -> Iterable['Vec3']:
""" Returns iterable of UCS vectors from WCS `points`. """
from_wcs = self.from_wcs
for point in points:
yield from_wcs(point)
def direction_from_wcs(self, vector: 'Vec3') -> 'Vec3':
""" Returns UCS vector for WCS `vector` without origin adjustment. """
return self.matrix.ucs_direction_from_wcs(vector)
def to_ocs(self, point: 'Vec3') -> 'Vec3':
"""
Returns OCS vector for UCS `point`.
The :class:`OCS` is defined by the z-axis of the :class:`UCS`.
"""
wpoint = self.to_wcs(point)
return OCS(self.uz).from_wcs(wpoint)
def points_to_ocs(self, points: Iterable['Vec3']) -> Iterable['Vec3']:
"""
Returns iterable of OCS vectors for UCS `points`.
The :class:`OCS` is defined by the z-axis of the :class:`UCS`.
Args:
points: iterable of UCS vertices
"""
wcs = self.to_wcs
ocs = OCS(self.uz)
for point in points:
yield ocs.from_wcs(wcs(point))
def to_ocs_angle_deg(self, angle: float) -> float:
"""
Transforms `angle` from current UCS to the parent coordinate system (most likely the WCS) including
the transformation to the OCS established by the extrusion vector :attr:`UCS.uz`.
Args:
angle: in UCS in degrees
"""
return self.ucs_direction_to_ocs_direction(Vec3.from_deg_angle(angle)).angle_deg
def to_ocs_angle_rad(self, angle: float) -> float:
"""
Transforms `angle` from current UCS to the parent coordinate system (most likely the WCS) including
the transformation to the OCS established by the extrusion vector :attr:`UCS.uz`.
Args:
angle: in UCS in radians
"""
return self.ucs_direction_to_ocs_direction(Vec3.from_angle(angle)).angle
def ucs_direction_to_ocs_direction(self, direction: Vec3) -> Vec3:
"""
Transforms UCS `direction` vector into OCS direction vector of the parent coordinate system (most likely
the WCS), target OCS is defined by the UCS z-axis.
"""
return OCS(self.uz).from_wcs(self.direction_to_wcs(direction))
def rotate(self, axis: 'Vertex', angle: float) -> 'UCS':
""" Returns a new rotated UCS, with the same origin as the source UCS.
The rotation vector is located in the origin and has :ref:`WCS`
coordinates e.g. (0, 0, 1) is the WCS z-axis as rotation vector.
Args:
axis: arbitrary rotation axis as vector in :ref:`WCS`
angle: rotation angle in radians
"""
t = Matrix44.axis_rotate(Vec3(axis), angle)
ux, uy, uz = t.transform_vertices([self.ux, self.uy, self.uz])
return UCS(origin=self.origin, ux=ux, uy=uy, uz=uz)
def rotate_local_x(self, angle: float) -> 'UCS':
""" Returns a new rotated UCS, rotation axis is the local x-axis.
Args:
angle: rotation angle in radians
"""
t = Matrix44.axis_rotate(self.ux, angle)
uy, uz = t.transform_vertices([self.uy, self.uz])
return UCS(origin=self.origin, ux=self.ux, uy=uy, uz=uz)
def rotate_local_y(self, angle: float) -> 'UCS':
""" Returns a new rotated UCS, rotation axis is the local y-axis.
Args:
angle: rotation angle in radians
"""
t = Matrix44.axis_rotate(self.uy, angle)
ux, uz = t.transform_vertices([self.ux, self.uz])
return UCS(origin=self.origin, ux=ux, uy=self.uy, uz=uz)
def rotate_local_z(self, angle: float) -> 'UCS':
""" Returns a new rotated UCS, rotation axis is the local z-axis.
Args:
angle: rotation angle in radians
"""
t = Matrix44.axis_rotate(self.uz, angle)
ux, uy = t.transform_vertices([self.ux, self.uy])
return UCS(origin=self.origin, ux=ux, uy=uy, uz=self.uz)
def shift(self, delta: 'Vertex') -> 'UCS':
""" Shifts current UCS by `delta` vector and returns `self`.
Args:
delta: shifting vector
"""
self.origin += Vec3(delta)
return self
def moveto(self, location: 'Vertex') -> 'UCS':
""" Place current UCS at new origin `location` and returns `self`.
Args:
location: new origin in WCS
"""
self.origin = Vec3(location)
return self
def transform(self, m: Matrix44) -> 'UCS':
""" General inplace transformation interface, returns `self` (floating interface).
Args:
m: 4x4 transformation matrix (:class:`ezdxf.math.Matrix44`)
.. versionadded:: 0.14
"""
self.matrix *= m
return self
@property
def is_cartesian(self) -> bool:
""" Returns ``True`` if cartesian coordinate system. """
return self.matrix.is_cartesian
@staticmethod
def from_x_axis_and_point_in_xy(origin: 'Vertex', axis: 'Vertex', point: 'Vertex') -> 'UCS':
"""
Returns an new :class:`UCS` defined by the origin, the x-axis vector and an arbitrary point in the xy-plane.
Args:
origin: UCS origin as (x, y, z) tuple in :ref:`WCS`
axis: x-axis vector as (x, y, z) tuple in :ref:`WCS`
point: arbitrary point unlike the origin in the xy-plane as (x, y, z) tuple in :ref:`WCS`
"""
x_axis = Vec3(axis)
z_axis = x_axis.cross(Vec3(point) - origin)
return UCS(origin=origin, ux=x_axis, uz=z_axis)
@staticmethod
def from_x_axis_and_point_in_xz(origin: 'Vertex', axis: 'Vertex', point: 'Vertex') -> 'UCS':
"""
Returns an new :class:`UCS` defined by the origin, the x-axis vector and an arbitrary point in the xz-plane.
Args:
origin: UCS origin as (x, y, z) tuple in :ref:`WCS`
axis: x-axis vector as (x, y, z) tuple in :ref:`WCS`
point: arbitrary point unlike the origin in the xz-plane as (x, y, z) tuple in :ref:`WCS`
"""
x_axis = Vec3(axis)
xz_vector = Vec3(point) - origin
y_axis = xz_vector.cross(x_axis)
return UCS(origin=origin, ux=x_axis, uy=y_axis)
@staticmethod
def from_y_axis_and_point_in_xy(origin: 'Vertex', axis: 'Vertex', point: 'Vertex') -> 'UCS':
"""
Returns an new :class:`UCS` defined by the origin, the y-axis vector and an arbitrary point in the xy-plane.
Args:
origin: UCS origin as (x, y, z) tuple in :ref:`WCS`
axis: y-axis vector as (x, y, z) tuple in :ref:`WCS`
point: arbitrary point unlike the origin in the xy-plane as (x, y, z) tuple in :ref:`WCS`
"""
y_axis = Vec3(axis)
xy_vector = Vec3(point) - origin
z_axis = xy_vector.cross(y_axis)
return UCS(origin=origin, uy=y_axis, uz=z_axis)
@staticmethod
def from_y_axis_and_point_in_yz(origin: 'Vertex', axis: 'Vertex', point: 'Vertex') -> 'UCS':
"""
Returns an new :class:`UCS` defined by the origin, the y-axis vector and an arbitrary point in the yz-plane.
Args:
origin: UCS origin as (x, y, z) tuple in :ref:`WCS`
axis: y-axis vector as (x, y, z) tuple in :ref:`WCS`
point: arbitrary point unlike the origin in the yz-plane as (x, y, z) tuple in :ref:`WCS`
"""
y_axis = Vec3(axis)
yz_vector = Vec3(point) - origin
x_axis = yz_vector.cross(y_axis)
return UCS(origin=origin, ux=x_axis, uy=y_axis)
@staticmethod
def from_z_axis_and_point_in_xz(origin: 'Vertex', axis: 'Vertex', point: 'Vertex') -> 'UCS':
"""
Returns an new :class:`UCS` defined by the origin, the z-axis vector and an arbitrary point in the xz-plane.
Args:
origin: UCS origin as (x, y, z) tuple in :ref:`WCS`
axis: z-axis vector as (x, y, z) tuple in :ref:`WCS`
point: arbitrary point unlike the origin in the xz-plane as (x, y, z) tuple in :ref:`WCS`
"""
z_axis = Vec3(axis)
y_axis = z_axis.cross(Vec3(point) - origin)
return UCS(origin=origin, uy=y_axis, uz=z_axis)
@staticmethod
def from_z_axis_and_point_in_yz(origin: 'Vertex', axis: 'Vertex', point: 'Vertex') -> 'UCS':
"""
Returns an new :class:`UCS` defined by the origin, the z-axis vector and an arbitrary point in the yz-plane.
Args:
origin: UCS origin as (x, y, z) tuple in :ref:`WCS`
axis: z-axis vector as (x, y, z) tuple in :ref:`WCS`
point: arbitrary point unlike the origin in the yz-plane as (x, y, z) tuple in :ref:`WCS`
"""
z_axis = Vec3(axis)
yz_vector = Vec3(point) - origin
x_axis = yz_vector.cross(z_axis)
return UCS(origin=origin, ux=x_axis, uz=z_axis)
def render_axis(self, layout: 'BaseLayout', length: float = 1, colors: Tuple[int, int, int] = (1, 3, 5)):
""" Render axis as 3D lines into a `layout`. """
render_axis(
layout,
start=self.origin,
points=(
self.to_wcs(X_AXIS * length),
self.to_wcs(Y_AXIS * length),
self.to_wcs(Z_AXIS * length),
),
colors=colors,
)
class PassTroughUCS(UCS):
""" UCS is equal to the WCS and OCS (extrusion = 0, 0, 1) """
def __init__(self):
super().__init__()
def to_wcs(self, point: 'Vec3') -> Vec3:
return point
def points_to_wcs(self, points: Iterable['Vec3']) -> Iterable[Vec3]:
return points
def to_ocs(self, point: 'Vec3') -> 'Vec3':
return point
def points_to_ocs(self, points: Iterable['Vec3']) -> Iterable['Vec3']:
return points
def to_ocs_angle_deg(self, angle: float) -> float:
return angle
def to_ocs_angle_rad(self, angle: float) -> float:
return angle
def from_wcs(self, point: 'Vec3') -> Vec3:
return point
def points_from_wcs(self, points: Iterable['Vec3']) -> Iterable[Vec3]:
return points
```
#### File: ezdxf/render/arrows.py
```python
from typing import TYPE_CHECKING, Iterable, Dict
from ezdxf.math import Vec2, Shape2d, NULLVEC
from .forms import open_arrow, arrow2
if TYPE_CHECKING:
from ezdxf.eztypes import Vertex, GenericLayoutType, DXFGraphic, Drawing
DEFAULT_ARROW_ANGLE = 18.924644
DEFAULT_BETA = 45.
# The base arrow is oriented for the right hand side ->| of the dimension line, reverse is the left hand side |<-.
class BaseArrow:
def __init__(self, vertices: Iterable['Vertex']):
self.shape = Shape2d(vertices)
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
pass
def place(self, insert: 'Vertex', angle: float):
self.shape.rotate(angle)
self.shape.translate(insert)
class NoneStroke(BaseArrow):
def __init__(self, insert: 'Vertex', size: float = 1.0, angle: float = 0):
super().__init__([Vec2(insert)])
class ObliqueStroke(BaseArrow):
def __init__(self, insert: 'Vertex', size: float = 1.0, angle: float = 0):
self.size = size
s2 = size / 2
# shape = [center, lower left, upper right]
super().__init__([Vec2((-s2, -s2)), Vec2((s2, s2))])
self.place(insert, angle)
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
layout.add_line(start=self.shape[0], end=self.shape[1], dxfattribs=dxfattribs)
class ArchTick(ObliqueStroke):
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
width = self.size * .15
if layout.dxfversion > 'AC1009':
dxfattribs['const_width'] = width
layout.add_lwpolyline(self.shape, format='xy', dxfattribs=dxfattribs)
else:
dxfattribs['default_start_width'] = width
dxfattribs['default_end_width'] = width
layout.add_polyline2d(self.shape, dxfattribs=dxfattribs)
class ClosedArrowBlank(BaseArrow):
def __init__(self, insert: 'Vertex', size: float = 1.0, angle: float = 0):
super().__init__(open_arrow(size, angle=DEFAULT_ARROW_ANGLE))
self.place(insert, angle)
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
if layout.dxfversion > 'AC1009':
polyline = layout.add_lwpolyline(
points=self.shape,
dxfattribs=dxfattribs)
else:
polyline = layout.add_polyline2d(
points=self.shape,
dxfattribs=dxfattribs)
polyline.close(True)
class ClosedArrow(ClosedArrowBlank):
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
super().render(layout, dxfattribs)
end_point = self.shape[0].lerp(self.shape[2])
layout.add_line(start=self.shape[1], end=end_point, dxfattribs=dxfattribs)
class ClosedArrowFilled(ClosedArrow):
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
layout.add_solid(
points=self.shape,
dxfattribs=dxfattribs,
)
class _OpenArrow(BaseArrow):
def __init__(self, arrow_angle: float, insert: 'Vertex', size: float = 1.0, angle: float = 0):
points = list(open_arrow(size, angle=arrow_angle))
points.append((-1, 0))
super().__init__(points)
self.place(insert, angle)
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
if layout.dxfversion > 'AC1009':
layout.add_lwpolyline(points=self.shape[:-1], dxfattribs=dxfattribs)
else:
layout.add_polyline2d(points=self.shape[:-1], dxfattribs=dxfattribs)
layout.add_line(start=self.shape[1], end=self.shape[-1], dxfattribs=dxfattribs)
class OpenArrow(_OpenArrow):
def __init__(self, insert: 'Vertex', size: float = 1.0, angle: float = 0):
super().__init__(DEFAULT_ARROW_ANGLE, insert, size, angle)
class OpenArrow30(_OpenArrow):
def __init__(self, insert: 'Vertex', size: float = 1.0, angle: float = 0):
super().__init__(30, insert, size, angle)
class OpenArrow90(_OpenArrow):
def __init__(self, insert: 'Vertex', size: float = 1.0, angle: float = 0):
super().__init__(90, insert, size, angle)
class Circle(BaseArrow):
def __init__(self, insert: 'Vertex', size: float = 1.0, angle: float = 0):
self.radius = size / 2
# shape = [center point, connection point]
super().__init__([
Vec2((0, 0)),
Vec2((-self.radius, 0)),
Vec2((-size, 0)),
])
self.place(insert, angle)
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
layout.add_circle(center=self.shape[0], radius=self.radius, dxfattribs=dxfattribs)
class Origin(Circle):
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
super().render(layout, dxfattribs)
layout.add_line(start=self.shape[0], end=self.shape[2], dxfattribs=dxfattribs)
class CircleBlank(Circle):
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
super().render(layout, dxfattribs)
layout.add_line(start=self.shape[1], end=self.shape[2], dxfattribs=dxfattribs)
class Origin2(Circle):
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
layout.add_circle(center=self.shape[0], radius=self.radius, dxfattribs=dxfattribs)
layout.add_circle(center=self.shape[0], radius=self.radius / 2, dxfattribs=dxfattribs)
layout.add_line(start=self.shape[1], end=self.shape[2], dxfattribs=dxfattribs)
class DotSmall(Circle):
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
center = self.shape[0]
d = Vec2((self.radius / 2, 0))
p1 = center - d
p2 = center + d
if layout.dxfversion > 'AC1009':
dxfattribs['const_width'] = self.radius
layout.add_lwpolyline([(p1, 1), (p2, 1)], format='vb', close=True,
dxfattribs=dxfattribs)
else:
dxfattribs['default_start_width'] = self.radius
dxfattribs['default_end_width'] = self.radius
polyline = layout.add_polyline2d(points=[p1, p2], close=True,
dxfattribs=dxfattribs)
polyline[0].dxf.bulge = 1
polyline[1].dxf.bulge = 1
class Dot(DotSmall):
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
layout.add_line(start=self.shape[1], end=self.shape[2], dxfattribs=dxfattribs)
super().render(layout, dxfattribs)
class Box(BaseArrow):
def __init__(self, insert: 'Vertex', size: float = 1.0, angle: float = 0):
# shape = [lower_left, lower_right, upper_right, upper_left, connection point]
s2 = size / 2
super().__init__([
Vec2((-s2, -s2)),
Vec2((+s2, -s2)),
Vec2((+s2, +s2)),
Vec2((-s2, +s2)),
Vec2((-s2, 0)),
Vec2((-size, 0)),
])
self.place(insert, angle)
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
if layout.dxfversion > 'AC1009':
polyline = layout.add_lwpolyline(points=self.shape[0:4], dxfattribs=dxfattribs)
else:
polyline = layout.add_polyline2d(points=self.shape[0:4], dxfattribs=dxfattribs)
polyline.close(True)
layout.add_line(start=self.shape[4], end=self.shape[5], dxfattribs=dxfattribs)
class BoxFilled(Box):
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
def solid_order():
v = self.shape.vertices
return [v[0], v[1], v[3], v[2]]
layout.add_solid(points=solid_order(), dxfattribs=dxfattribs)
layout.add_line(start=self.shape[4], end=self.shape[5], dxfattribs=dxfattribs)
class Integral(BaseArrow):
def __init__(self, insert: 'Vertex', size: float = 1.0, angle: float = 0):
self.radius = size * .3535534
self.angle = angle
# shape = [center, left_center, right_center]
super().__init__([
Vec2((0, 0)),
Vec2((-self.radius, 0)),
Vec2((self.radius, 0)),
])
self.place(insert, angle)
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
angle = self.angle
layout.add_arc(center=self.shape[1], radius=self.radius, start_angle=-90 + angle, end_angle=angle,
dxfattribs=dxfattribs)
layout.add_arc(center=self.shape[2], radius=self.radius, start_angle=90 + angle, end_angle=180 + angle,
dxfattribs=dxfattribs)
class DatumTriangle(BaseArrow):
REVERSE_ANGLE = 180
def __init__(self, insert: 'Vertex', size: float = 1.0, angle: float = 0):
d = .577350269 * size # tan(30)
# shape = [upper_corner, lower_corner, connection_point]
super().__init__([
Vec2((0, d)),
Vec2((0, -d)),
Vec2((-size, 0)),
])
self.place(insert, angle)
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
if layout.dxfversion > 'AC1009':
polyline = layout.add_lwpolyline(points=self.shape, dxfattribs=dxfattribs)
else:
polyline = layout.add_polyline2d(points=self.shape, dxfattribs=dxfattribs)
polyline.close(True)
class DatumTriangleFilled(DatumTriangle):
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
layout.add_solid(points=self.shape, dxfattribs=dxfattribs)
class _EzArrow(BaseArrow):
def __init__(self, insert: 'Vertex', size: float = 1.0, angle: float = 0):
points = list(arrow2(size, angle=DEFAULT_ARROW_ANGLE))
points.append((-1, 0))
super().__init__(points)
self.place(insert, angle)
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
if layout.dxfversion > 'AC1009':
polyline = layout.add_lwpolyline(self.shape[:-1], dxfattribs=dxfattribs)
else:
polyline = layout.add_polyline2d(self.shape[:-1], dxfattribs=dxfattribs)
polyline.close(True)
class EzArrowBlank(_EzArrow):
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
super().render(layout, dxfattribs)
layout.add_line(start=self.shape[-2], end=self.shape[-1], dxfattribs=dxfattribs)
class EzArrow(_EzArrow):
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
super().render(layout, dxfattribs)
layout.add_line(start=self.shape[1], end=self.shape[-1], dxfattribs=dxfattribs)
class EzArrowFilled(_EzArrow):
def render(self, layout: 'GenericLayoutType', dxfattribs: dict = None):
points = self.shape.vertices
layout.add_solid([points[0], points[1], points[3], points[2]], dxfattribs=dxfattribs)
layout.add_line(start=self.shape[-2], end=self.shape[-1], dxfattribs=dxfattribs)
class _Arrows:
closed_filled = ""
dot = "DOT"
dot_small = "DOTSMALL"
dot_blank = "DOTBLANK"
origin_indicator = "ORIGIN"
origin_indicator_2 = "ORIGIN2"
open = "OPEN"
right_angle = "OPEN90"
open_30 = "OPEN30"
closed = "CLOSED"
dot_smallblank = "SMALL"
none = "NONE"
oblique = "OBLIQUE"
box_filled = "BOXFILLED"
box = "BOXBLANK"
closed_blank = "CLOSEDBLANK"
datum_triangle_filled = "DATUMFILLED"
datum_triangle = "DATUMBLANK"
integral = "INTEGRAL"
architectural_tick = "ARCHTICK"
# ezdxf special arrows
ez_arrow = "EZ_ARROW"
ez_arrow_blank = "EZ_ARROW_BLANK"
ez_arrow_filled = "EZ_ARROW_FILLED"
CLASSES = {
closed_filled: ClosedArrowFilled,
dot: Dot,
dot_small: DotSmall,
dot_blank: CircleBlank,
origin_indicator: Origin,
origin_indicator_2: Origin2,
open: OpenArrow,
right_angle: OpenArrow90,
open_30: OpenArrow30,
closed: ClosedArrow,
dot_smallblank: Circle,
none: NoneStroke,
oblique: ObliqueStroke,
box_filled: BoxFilled,
box: Box,
closed_blank: ClosedArrowBlank,
datum_triangle: DatumTriangle,
datum_triangle_filled: DatumTriangleFilled,
integral: Integral,
architectural_tick: ArchTick,
ez_arrow: EzArrow,
ez_arrow_blank: EzArrowBlank,
ez_arrow_filled: EzArrowFilled,
}
# arrows with origin at dimension line start/end
ORIGIN_ZERO = {
architectural_tick,
oblique,
dot_small,
dot_smallblank,
integral,
none,
}
__acad__ = {
closed_filled, dot, dot_small, dot_blank, origin_indicator, origin_indicator_2, open, right_angle, open_30,
closed, dot_smallblank, none, oblique, box_filled, box, closed_blank, datum_triangle, datum_triangle_filled,
integral, architectural_tick
}
__ezdxf__ = {
ez_arrow,
ez_arrow_blank,
ez_arrow_filled,
}
__all_arrows__ = __acad__ | __ezdxf__
EXTENSIONS_ALLOWED = {
architectural_tick,
oblique,
none,
dot_smallblank,
integral,
dot_small,
}
def is_acad_arrow(self, item: str) -> bool:
return item.upper() in self.__acad__
def is_ezdxf_arrow(self, item: str) -> bool:
return item.upper() in self.__ezdxf__
def has_extension_line(self, name):
return name in self.EXTENSIONS_ALLOWED
def __contains__(self, item: str) -> bool:
if item is None:
return False
return item.upper() in self.__all_arrows__
def create_block(self, blocks, name: str):
block_name = self.block_name(name)
if block_name not in blocks:
block = blocks.new(block_name)
arrow = self.arrow_shape(name, insert=(0, 0), size=1, rotation=0)
arrow.render(block, dxfattribs={'color': 0, 'linetype': 'BYBLOCK'})
return block_name
def block_name(self, name):
if not self.is_acad_arrow(name): # common BLOCK definition
return name.upper() # e.g. Dimension.dxf.bkl = 'EZ_ARROW' == Insert.dxf.name
elif name == '': # special AutoCAD arrow symbol 'CLOSED_FILLED' has no name
# ezdxf uses blocks for ALL arrows, but '_' (closed filled) as block name?
return '_CLOSEDFILLED' # Dimension.dxf.bkl = '' != Insert.dxf.name = '_CLOSED_FILLED'
else: # add preceding '_' to AutoCAD arrow symbol names
return '_' + name.upper() # Dimension.dxf.bkl = 'DOT' != Insert.dxf.name = '_DOT'
def arrow_name(self, block_name: str) -> str:
if block_name.startswith('_'):
name = block_name[1:].upper()
if name == 'CLOSEDFILLED':
return ''
elif self.is_acad_arrow(name):
return name
return block_name
def insert_arrow(self, layout: 'GenericLayoutType',
name: str,
insert: 'Vertex' = NULLVEC,
size: float = 1.0,
rotation: float = 0, *,
dxfattribs: Dict = None) -> Vec2:
""" Insert arrow as block reference into `layout`. """
block_name = self.create_block(layout.doc.blocks, name)
dxfattribs = dict(dxfattribs) if dxfattribs else {} # copy attribs
dxfattribs['rotation'] = rotation
dxfattribs['xscale'] = size
dxfattribs['yscale'] = size
layout.add_blockref(block_name, insert=insert, dxfattribs=dxfattribs)
return connection_point(name, insert=insert, scale=size, rotation=rotation)
def render_arrow(self, layout: 'GenericLayoutType',
name: str,
insert: 'Vertex' = NULLVEC,
size: float = 1.0,
rotation: float = 0, *,
dxfattribs: Dict = None) -> Vec2:
""" Render arrow as basic DXF entities into `layout`. """
dxfattribs = dxfattribs or {}
arrow = self.arrow_shape(name, insert, size, rotation)
arrow.render(layout, dxfattribs)
return connection_point(name, insert=insert, scale=size, rotation=rotation)
def virtual_entities(self,
name: str,
insert: 'Vertex' = NULLVEC,
size: float = 0.625,
rotation: float = 0, *,
dxfattribs: Dict = None) -> Iterable['DXFGraphic']:
""" Yield arrow components as virtual DXF entities. """
from ezdxf.layouts import VirtualLayout
if name in self:
layout = VirtualLayout()
dxfattribs = dxfattribs or {}
ARROWS.render_arrow(
layout, name,
insert=insert,
size=size,
rotation=rotation,
dxfattribs=dxfattribs,
)
yield from iter(layout)
def arrow_shape(self, name: str, insert: 'Vertex', size: float, rotation: float) -> BaseArrow:
# size depending shapes
name = name.upper()
if name == self.dot_small:
size *= .25
elif name == self.dot_smallblank:
size *= .5
cls = self.CLASSES[name]
return cls(insert, size, rotation)
def connection_point(arrow_name: str, insert: 'Vertex', scale: float = 1, rotation: float = 0) -> Vec2:
insert = Vec2(insert)
if arrow_name in _Arrows.ORIGIN_ZERO:
return insert
else:
return insert - Vec2.from_deg_angle(rotation, scale)
ARROWS = _Arrows()
```
#### File: ezdxf/tools/complex_ltype.py
```python
from typing import TYPE_CHECKING, Iterable, Sequence, Union
from ezdxf.lldxf.const import DXFValueError, DXFTableEntryError
from ezdxf.lldxf.tags import DXFTag, Tags
if TYPE_CHECKING: # import forward references
from ezdxf.eztypes import Drawing
Token = Union[str, float, list]
def lin_compiler(definition: str) -> Sequence[DXFTag]:
"""
Compiles line type definitions like 'A,.5,-.25,.5,-.25,0,-.25' or 'A,.5,-.2,["GAS",STANDARD,S=.1,U=0.0,X=-0.1,Y=-.05],-.25'
into DXFTags().
Args:
definition: definition string
Returns:
list of DXFTag()
"""
# 'A,.5,-.2,["GAS",STANDARD,S=.1,U=0.0,X=-0.1,Y=-.05],-.25'
# ['A', .5, -.2, ['TEXT', 'GAS', 'STANDARD', 's', .1, 'u', 0.0, 'x', -.1, 'y', -.05], -.25]
tags = []
for token in lin_parser(definition):
if token == 'A':
continue
elif isinstance(token, float):
tags.append(DXFTag(49, token)) # Dash, dot or space length (one entry per element)
elif isinstance(token, list): # yield from
tags.append(compile_complex_defnition(token))
return tags
class ComplexLineTypePart:
def __init__(self, type_: str, value, font: str = 'STANDARD'):
self.type = type_
self.value = value
self.font = font
self.tags = Tags()
def complex_ltype_tags(self, doc: 'Drawing') -> Sequence[DXFTag]:
def get_font_handle() -> str:
if self.type == 'SHAPE':
# Create new shx or returns existing entry:
font = doc.styles.get_shx(self.font)
else:
try:
# Case insensitive search for text style:
font = doc.styles.get(self.font)
except DXFTableEntryError:
font = doc.styles.new(self.font)
return font.dxf.handle
# Note: AutoCAD/BricsCAD do NOT report an error or even crash, if the
# text style handle is invalid!
if doc is not None:
handle = get_font_handle()
else:
handle = '0'
tags = []
if self.type == 'TEXT':
tags.append(DXFTag(74, 2))
tags.append(DXFTag(75, 0))
else: # SHAPE
tags.append(DXFTag(74, 4))
tags.append(DXFTag(75, self.value))
tags.append(DXFTag(340, handle))
tags.extend(self.tags)
if self.type == 'TEXT':
tags.append(DXFTag(9, self.value))
return tags
CMD_CODES = {
's': 46,
'r': 50, # r == u
'u': 50,
'x': 44,
'y': 45,
}
def compile_complex_defnition(tokens: Sequence) -> ComplexLineTypePart:
part = ComplexLineTypePart(tokens[0], tokens[1], tokens[2])
commands = list(reversed(tokens[3:]))
params = {}
while len(commands):
cmd = commands.pop()
value = commands.pop()
code = CMD_CODES.get(cmd, 0)
params[code] = DXFTag(code, value)
for code in (46, 50, 44, 45):
tag = params.get(code, DXFTag(code, 0.))
part.tags.append(tag)
return part
def lin_parser(definition: str) -> Sequence[Token]:
bag = []
sublist = None
first = True
for token in lin_tokenizer(definition):
if token == 'A' and first:
bag.append(token)
first = False
continue
try:
value = float(token) # only outside of TEXT or SHAPE definition
bag.append(value)
continue
except ValueError:
pass
if token.startswith('['):
if sublist is not None:
raise DXFValueError('Complex line type error. {}'.format(definition))
sublist = []
if token.startswith('["'):
sublist.append('TEXT')
sublist.append(token[2:-1]) # text without surrounding '["' and '"'
else:
sublist.append('SHAPE')
try:
sublist.append(int(token[1:])) # shape index! required
except ValueError:
raise DXFValueError('Complex line type with shapes requires shape index not shape name!')
else:
_token = token.rstrip(']')
subtokens = _token.split('=')
if len(subtokens) == 2:
sublist.append(subtokens[0].lower())
sublist.append(float(subtokens[1]))
else:
sublist.append(_token)
if token.endswith(']'):
if sublist is None:
raise DXFValueError('Complex line type error. {}'.format(definition))
bag.append(sublist)
sublist = None
return bag
def lin_tokenizer(definition: str) -> Iterable[str]:
token = ''
escape = False
for char in definition:
if char == ',' and not escape:
yield token.strip()
token = ''
continue
token += char
if char == '"':
escape = not escape
if escape:
raise DXFValueError("Line type parsing error: '{}'".format(definition))
if token:
yield token.strip()
```
#### File: tests/test_00_dxf_low_level_structs/test_013_juliandate.py
```python
import pytest
from datetime import datetime
from ezdxf.tools.juliandate import juliandate, calendardate
class TestJulianDate:
def test_1582_10_15(self):
assert 2299161. == pytest.approx(juliandate(datetime(1582, 10, 15)))
def test_1990_01_01(self):
assert 2447893. == pytest.approx(juliandate(datetime(1990, 1, 1)))
def test_2000_01_01(self):
assert 2451545. == pytest.approx(juliandate(datetime(2000, 1, 1)))
def test_2011_03_21(self):
assert 2455642.75 == pytest.approx(juliandate(datetime(2011, 3, 21, 18, 0, 0)))
def test_1999_12_31(self):
assert 2451544.91568287 == pytest.approx(juliandate(datetime(1999, 12, 31, 21, 58, 35)))
class TestCalendarDate:
def test_1999_12_31(self):
check = datetime(1999, 12, 31, 21, 58, 35)
assert calendardate(2451544.91568288) == check
def test_2011_03_21(self):
check = datetime(2011, 3, 21, 18, 0, 0)
assert calendardate(2455642.75) == check
```
#### File: tests/test_00_dxf_low_level_structs/test_040_tags.py
```python
import pytest
from io import StringIO
from copy import deepcopy
from ezdxf.lldxf.tags import Tags, DXFTag
from ezdxf.lldxf.tagwriter import TagWriter
from ezdxf.lldxf.const import DXFValueError
TEST_TAGREADER = """ 0
SECTION
2
HEADER
9
$ACADVER
1
AC1018
9
$DWGCODEPAGE
3
ANSI_1252
0
ENDSEC
0
EOF
"""
TEST_TAGREADER_COMMENTS = """999
Comment0
0
SECTION
2
HEADER
9
$ACADVER
999
Comment1
1
AC1018
9
$DWGCODEPAGE
3
ANSI_1252
0
ENDSEC
0
EOF
"""
TESTHANDLE5 = """ 0
TEST
5
F5
"""
TESTHANDLE105 = """ 0
TEST
105
F105
"""
TESTFINDALL = """ 0
TEST0
0
TEST1
0
TEST2
"""
TAGS_WITH_VERTEX = """ 0
TEST
10
1.0
20
2.0
30
3.0
"""
class HandlesMock:
calls = 0
@property
def next(self):
self.calls += 1
return 'FF'
class TestTags:
@pytest.fixture
def tags(self):
return Tags.from_text(TEST_TAGREADER)
def test_from_text(self, tags):
assert 8, len(tags)
def test_write(self, tags):
stream = StringIO()
tagwriter = TagWriter(stream)
tagwriter.write_tags(tags)
result = stream.getvalue()
stream.close()
assert TEST_TAGREADER == result
def test_update(self, tags):
tags.update(DXFTag(2, 'XHEADER'))
assert 'XHEADER' == tags[1].value
def test_update_error(self, tags):
with pytest.raises(DXFValueError):
tags.update(DXFTag(999, 'DOESNOTEXIST'))
def test_set_first(self, tags):
tags.set_first(DXFTag(999, 'NEWTAG'))
assert 'NEWTAG' == tags[-1].value
def test_find_first(self, tags):
value = tags.get_first_value(9)
assert '$ACADVER' == value
def test_find_first_default(self, tags):
value = tags.get_first_value(1234, default=999)
assert 999 == value
def test_find_first_error(self, tags):
with pytest.raises(DXFValueError):
tags.get_first_value(1234)
def test_get_handle_5(self):
tags = Tags.from_text(TESTHANDLE5)
assert 'F5' == tags.get_handle()
def test_get_handle_105(self):
tags = Tags.from_text(TESTHANDLE105)
assert 'F105' == tags.get_handle()
def test_get_handle_create_new(self, tags):
with pytest.raises(DXFValueError):
tags.get_handle()
def test_find_all(self):
tags = Tags.from_text(TESTFINDALL)
assert 3 == len(tags.find_all(0))
def test_tag_index(self):
tags = Tags.from_text(TESTFINDALL)
index = tags.tag_index(0)
assert 0 == index
index = tags.tag_index(0, index + 1)
assert 1 == index
def test_find_first_value_error(self):
tags = Tags.from_text(TESTFINDALL)
with pytest.raises(DXFValueError):
tags.tag_index(1)
def test_clone_is_equal(self, tags):
clone = tags.clone()
assert id(tags) != id(clone)
assert tags == clone
def test_clone_is_independent(self, tags):
clone = tags.clone()
clone.pop()
assert self.tags != clone
def test_deepcopy(self):
tags = Tags.from_text(TAGS_WITH_VERTEX)
assert len(tags) == 2
v = tags[1]
assert v.value == (1., 2., 3.)
tags2 = deepcopy(tags)
assert id(tags) != id(tags2)
assert tags == tags2, "same content"
# same ids, DXFTags are immutable
assert id(v) == id(tags[1])
def test_replace_handle_5(self):
tags = Tags.from_text(TESTHANDLE5)
tags.replace_handle('AA')
assert 'AA' == tags.get_handle()
def test_replace_handle_105(self):
tags = Tags.from_text(TESTHANDLE105)
tags.replace_handle('AA')
assert 'AA' == tags.get_handle()
def test_replace_no_handle_without_error(self, tags):
tags.replace_handle('AA')
with pytest.raises(DXFValueError):
tags.get_handle() # handle still doesn't exist
def test_remove_tags(self, tags):
tags.remove_tags(codes=(0,))
assert 5 == len(tags)
def test_strip_tags(self, tags):
tags.remove_tags(codes=(0,))
result = Tags.strip(tags, codes=(0,))
assert 5 == len(result)
assert isinstance(result, Tags)
def test_has_tag(self, tags):
assert tags.has_tag(2)
def test_has_not_tag(self, tags):
assert tags.has_tag(7) is False
def test_pop_tags(self):
tags = Tags([
DXFTag(1, 'name1'),
DXFTag(40, 1),
DXFTag(40, 2),
DXFTag(1, 'name2'),
DXFTag(40, 3),
DXFTag(1, 'name3'),
DXFTag(40, 4),
DXFTag(1, 'name4'),
])
result = list(tags.pop_tags(codes=(40, )))
assert len(result) == 4
assert result[0] == (40, 1)
assert result[-1] == (40, 4)
assert len(tags) == 4
assert tags[0] == (1, 'name1')
assert tags[-1] == (1, 'name4')
DUPLICATETAGS = """ 0
FIRST
0
LAST
1
TEST2
"""
COLLECT_1 = """ 0
ZERO
1
ONE
2
TWO
3
THREE
4
FOUR
0
ZERO
1
ONE
2
TWO
3
THREE
4
FOUR
"""
class TestTagsCollect:
@pytest.fixture
def tags(self):
return Tags.from_text(COLLECT_1)
def test_with_start_param(self, tags):
collected_tags = tags.collect_consecutive_tags([1, 2, 3], start=1)
assert 3 == len(collected_tags)
assert "THREE" == collected_tags[2].value
def test_with_end_param(self, tags):
collected_tags = tags.collect_consecutive_tags([0, 1, 2, 3], end=3)
assert 3 == len(collected_tags)
assert "TWO" == collected_tags[2].value
def test_with_start_and_end_param(self, tags):
collected_tags = tags.collect_consecutive_tags([1, 2, 3], start=6, end=9)
assert 3 == len(collected_tags)
assert "THREE" == collected_tags[2].value
def test_none_existing_codes(self, tags):
collected_tags = tags.collect_consecutive_tags([7, 8, 9])
assert 0 == len(collected_tags)
def test_all_codes(self, tags):
collected_tags = tags.collect_consecutive_tags([0, 1, 2, 3, 4])
assert 10 == len(collected_tags)
def test_emtpy_tags(self):
tags = Tags()
collected_tags = tags.collect_consecutive_tags([0, 1, 2, 3, 4])
assert 0 == len(collected_tags)
```
#### File: tests/test_00_dxf_low_level_structs/test_043_filter_invalid_point_codes.py
```python
from ezdxf.lldxf.repair import filter_invalid_point_codes
def test_invalid_y_coord_after_xyz():
result = list(filter_invalid_point_codes(
[(10, 1), (20, 2), (30, 3), (20, 0)]
))
assert result == [(10, 1), (20, 2), (30, 3)]
def test_invalid_y_coord_after_xy():
result = list(filter_invalid_point_codes(
[(10, 1), (20, 2), (21, 2)]
))
assert result == [(10, 1), (20, 2)]
def test_z_axis_after_valid_point():
result = list(filter_invalid_point_codes(
[(10, 1), (20, 2), (30, 3), (30, 0)]
))
assert result == [(10, 1), (20, 2), (30, 3)]
def test_misplaced_z_axis():
result = list(filter_invalid_point_codes(
[(10, 1), (20, 2), (1, 'Text'), (30, 0), (1, 'xxx')]
))
assert result == [(10, 1), (20, 2), (1, 'Text'), (1, 'xxx')]
def test_correct_xy_axis():
result = list(filter_invalid_point_codes(
[(10, 1), (20, 2), (10, 1), (20, 0)]
))
assert result == [(10, 1), (20, 2), (10, 1), (20, 0)]
def test_invalid_single_x_axis():
result = list(filter_invalid_point_codes(
[(10, 1), (20, 2), (10, 1)]
))
assert result == [(10, 1), (20, 2)]
def test_preserve_leading_0_tag():
result = list(filter_invalid_point_codes(
[(0, 'SECTION'), (10, 1), (20, 2)]
))
assert result == [(0, 'SECTION'), (10, 1), (20, 2)]
def test_preserve_elevation_group_code_38():
result = list(filter_invalid_point_codes(
[(10, 1), (20, 2), (38, 0)]
))
assert result == [(10, 1), (20, 2), (38, 0)]
```
#### File: tests/test_00_dxf_low_level_structs/test_051_load_dxf_structure.py
```python
import pytest
from ezdxf.lldxf.tagger import internal_tag_compiler
from ezdxf.lldxf.loader import load_dxf_structure
from ezdxf.lldxf.const import DXFStructureError
def test_loader():
sections = load_dxf_structure(internal_tag_compiler(TEST_HEADER))
assert len(sections) == 3
header = sections['HEADER']
assert len(header) == 1 # header load_section has always only one entity
header_entity = header[0]
assert header_entity[0] == (0, 'SECTION')
assert header_entity[1] == (2, 'HEADER')
assert header_entity[2] == (9, '$ACADVER')
assert header_entity[-1] == (3, 'ANSI_1252')
tables = sections['TABLES']
assert len(tables) == 1
tables_header = tables[0]
assert tables_header[0] == (0, 'SECTION')
assert tables_header[1] == (2, 'TABLES')
entities = sections['ENTITIES']
assert len(entities) == 1
entities_header = entities[0]
assert entities_header[0] == (0, 'SECTION')
assert entities_header[1] == (2, 'ENTITIES')
def test_error_section():
with pytest.raises(DXFStructureError):
load_dxf_structure(internal_tag_compiler(SECTION_INVALID_NAME_TAG))
with pytest.raises(DXFStructureError):
load_dxf_structure(internal_tag_compiler(SECTION_NO_NAME_TAG))
def validator(text):
tags = internal_tag_compiler(text)
return load_dxf_structure(tags)
def test_valid_structure():
sections = validator(" 0\nSECTION\n 2\nHEADER\n 0\nENDSEC\n 0\nSECTION\n 2\nCLASSES\n 0\nENDSEC\n 0\nEOF\n")
assert len(sections) == 2
assert len(sections['HEADER']) == 1 # ENDSEC is not present
assert len(sections['CLASSES']) == 1 # ENDSEC is not present
def test_eof_without_lineending():
sections = validator(" 0\nSECTION\n 2\nHEADER\n 0\nENDSEC\n 0\nSECTION\n 2\nCLASSES\n 0\nENDSEC\n 0\nEOF")
assert len(sections) == 2
assert len(sections['HEADER']) == 1 # ENDSEC is not present
assert len(sections['CLASSES']) == 1 # ENDSEC is not present
def test_missing_eof():
with pytest.raises(DXFStructureError):
validator("999\ncomment")
def test_missing_endsec():
with pytest.raises(DXFStructureError):
validator(" 0\nSECTION\n 2\nHEADER\n 0\nSECTION\n 2\nCLASSES\n 0\nENDSEC\n 0\nEOF\n")
with pytest.raises(DXFStructureError):
validator(" 0\nSECTION\n 2\nHEADER\n 0\nSECTION\n 2\nCLASSES\n 0\nEOF\n")
def test_missing_endsec_and_eof():
with pytest.raises(DXFStructureError):
validator(" 0\nSECTION\n 2\nHEADER\n 0\nENDSEC\n 0\nSECTION\n 2\nCLASSES\n")
def test_missing_section():
with pytest.raises(DXFStructureError):
validator(" 0\nENDSEC\n 0\nSECTION\n 2\nCLASSES\n 0\nENDSEC\n 0\nEOF\n")
TEST_HEADER = """ 0
SECTION
2
HEADER
9
$ACADVER
1
AC1018
9
$DWGCODEPAGE
3
ANSI_1252
0
ENDSEC
0
SECTION
2
TABLES
0
ENDSEC
0
SECTION
2
ENTITIES
0
ENDSEC
0
EOF
"""
SECTION_INVALID_NAME_TAG = """ 0
SECTION
3
HEADER
0
ENDSEC
"""
SECTION_NO_NAME_TAG = """ 0
SECTION
0
ENDSEC
"""
```
#### File: tests/test_00_dxf_low_level_structs/test_054_dxfattr.py
```python
import pytest
from ezdxf.lldxf.attributes import DXFAttr, RETURN_DEFAULT
def test_return_default():
attr = DXFAttr(
code=62,
default=12,
validator=lambda x: False,
fixer=RETURN_DEFAULT,
)
assert attr.fixer(7) == 12
attr2 = DXFAttr(
code=63,
default=13,
validator=lambda x: False,
fixer=RETURN_DEFAULT,
)
assert attr2.fixer(7) == 13
if __name__ == '__main__':
pytest.main([__file__])
```
#### File: tests/test_01_dxf_entities/test_103_reactors.py
```python
import pytest
from ezdxf.entities.appdata import Reactors
from ezdxf.lldxf.const import REACTOR_HANDLE_CODE, ACAD_REACTORS, APP_DATA_MARKER
from ezdxf.lldxf.tags import Tags
class TagWriter:
""" Mockup """
def __init__(self):
self.tags = []
def write_tag2(self, code, value):
self.tags.append((code, value))
def test_reactors_new():
reactors = Reactors(['DDDD', 'AA', 'CCCC', 'BBB'])
assert len(reactors) == 4
handles = reactors.get()
# sorted handles
assert handles[0] == 'AA'
assert handles[3] == 'DDDD'
def test_reactors_add():
reactors = Reactors(['AA', 'BBB', 'CCCC'])
reactors.add('AA')
assert len(reactors) == 3, "do not add duplicates"
reactors.add('0')
assert len(reactors) == 4, "add unique handles"
def test_reactors_set():
reactors = Reactors()
assert len(reactors) == 0
reactors.set(['0', '1', '2'])
assert len(reactors) == 3
reactors.set(['0'])
assert len(reactors) == 1
# delete all
reactors.set([])
assert len(reactors) == 0
def test_reactors_discard():
reactors = Reactors(['AA', 'BBB', 'CCCC'])
reactors.discard('AA')
assert len(reactors) == 2
# ignore non existing handles
reactors.discard('abcd')
assert len(reactors) == 2
def test_export_dxf():
reactors = Reactors(['AA', 'BBB', 'CCCC'])
tagwriter = TagWriter()
reactors.export_dxf(tagwriter)
tags = tagwriter.tags
assert len(tags) == 5
# sorted handles!
assert tags[0] == (APP_DATA_MARKER, ACAD_REACTORS)
assert tags[1] == (REACTOR_HANDLE_CODE, 'AA')
assert tags[2] == (REACTOR_HANDLE_CODE, 'BBB')
assert tags[3] == (REACTOR_HANDLE_CODE, 'CCCC')
assert tags[4] == (APP_DATA_MARKER, '}')
def test_from_tags():
reactors = Reactors.from_tags(Tags.from_text(HANDLES))
handles = reactors.get()
assert len(handles) == 3
assert handles[0] == 'C000'
assert handles[1] == 'D000'
assert handles[2] == 'E000'
HANDLES = """102
{ACAD_REACTORS
330
D000
330
C000
330
E000
102
}
"""
if __name__ == '__main__':
pytest.main([__file__])
```
#### File: tests/test_01_dxf_entities/test_118_appid_table_entry.py
```python
import pytest
from ezdxf.entities.appid import AppID
@pytest.fixture
def appid():
return AppID.new('FFFF', dxfattribs={
'name': 'EZDXF',
})
def test_name(appid):
assert appid.dxf.name == 'EZDXF'
```
#### File: tests/test_01_dxf_entities/test_122_vport_table_entry.py
```python
import pytest
from ezdxf.entities.vport import VPort
@pytest.fixture
def vport():
return VPort.new('FFFF', dxfattribs={
'name': 'VP1',
})
def test_name(vport):
assert vport.dxf.name == 'VP1'
```
#### File: tests/test_01_dxf_entities/test_132_layer_filter.py
```python
from typing import cast
import pytest
import ezdxf
from ezdxf.entities.idbuffer import LayerFilter
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
LAYERFILTER = """0
LAYER_FILTER
5
0
102
{ACAD_REACTORS
330
0
102
}
330
0
100
AcDbFilter
100
AcDbLayerFilter
"""
@pytest.fixture
def entity():
return LayerFilter.from_text(LAYERFILTER)
def test_registered():
from ezdxf.entities.factory import ENTITY_CLASSES
assert 'LAYER_FILTER' in ENTITY_CLASSES
def test_default_init():
entity = LayerFilter()
assert entity.dxftype() == 'LAYER_FILTER'
assert entity.dxf.handle is None
assert entity.dxf.owner is None
def test_default_new():
entity = LayerFilter.new(handle='ABBA', owner='0', dxfattribs={
})
assert len(entity.handles) == 0
def test_load_from_text(entity):
assert len(entity.handles) == 0
def test_write_dxf():
entity = LayerFilter.from_text(LAYERFILTER)
result = TagCollector.dxftags(entity)
expected = basic_tags_from_text(LAYERFILTER)
assert result == expected
@pytest.fixture(scope='module')
def doc():
return ezdxf.new('R2007')
def test_generic_field_list(doc):
layers = doc.objects.new_entity('LAYER_FILTER', {})
assert layers.dxftype() == 'LAYER_FILTER'
assert len(layers.handles) == 0
def test_set_get_field_list(doc):
field_list = doc.objects.new_entity('LAYER_FILTER', {})
assert field_list.dxftype() == 'LAYER_FILTER'
field_list.handles = ['FF', 'EE', 'DD']
handles = field_list.handles
assert len(handles) == 3
assert handles == ['FF', 'EE', 'DD']
handles.append('FFFF')
assert handles[-1] == 'FFFF'
def test_dxf_tags(doc):
buffer = cast(LayerFilter, doc.objects.new_entity('LAYER_FILTER', {}))
buffer.handles = ['FF', 'EE', 'DD', 'CC']
tags = TagCollector.dxftags(buffer)[-4:]
assert len(tags) == 4
assert tags[0] == (330, 'FF')
assert tags[-1] == (330, 'CC')
def test_clone(doc):
layers = cast(LayerFilter, doc.objects.new_entity('LAYER_FILTER', {}))
layers.handles = ['FF', 'EE', 'DD', 'CC']
layers2 = cast(LayerFilter, layers.copy())
layers2.handles[-1] = 'ABCD'
assert layers.handles[:-1] == layers2.handles[:-1]
assert layers.handles[-1] != layers2.handles[-1]
```
#### File: tests/test_01_dxf_entities/test_137_sortentstable.py
```python
import pytest
import ezdxf
from ezdxf.entities.dxfobj import SortEntsTable
@pytest.fixture(scope='module')
def doc():
return ezdxf.new('R2000')
def get_entry(table, index):
return list(table)[index]
def test_sort_entities_table(doc):
sort_entities_table = doc.objects.new_entity('SORTENTSTABLE', {'block_record_handle': 'FFFF'})
assert sort_entities_table.dxftype() == 'SORTENTSTABLE'
assert sort_entities_table.dxf.block_record_handle == 'FFFF'
assert len(sort_entities_table) == 0
sort_entities_table.append('AAA', 'BBB')
assert get_entry(sort_entities_table, 0) == ('AAA', 'BBB')
def test_sort_entities_table_as_list(doc):
sort_entities_table = doc.objects.new_entity('SORTENTSTABLE', {})
sort_entities_table.set_handles([
('AAA', 'BBB'), ('CCC', 'DDD'), ('EEE', 'FFF'),
])
assert len(sort_entities_table) == 3
assert get_entry(sort_entities_table, 0) == ('AAA', 'BBB')
assert get_entry(sort_entities_table, -1) == ('EEE', 'FFF')
sort_entities_table.clear()
assert len(sort_entities_table) == 0
def test_sort_entities_table_to_dict(doc):
sort_entities_table = doc.objects.new_entity('SORTENTSTABLE', {})
sort_entities_table.set_handles([
('AAA', 'BBB'), ('CCC', 'DDD'), ('EEE', 'FFF'),
])
assert len(sort_entities_table) == 3
assert get_entry(sort_entities_table, 2) == ('EEE', 'FFF')
# simple way to dict()
d = dict(sort_entities_table)
assert d['AAA'] == 'BBB'
assert d['CCC'] == 'DDD'
assert d['EEE'] == 'FFF'
def test_remove_invalid_handles(doc):
sort_entities_table = doc.objects.new_entity('SORTENTSTABLE', {})
sort_entities_table.set_handles([
('AAA', 'BBB'), ('CCC', 'DDD'), ('EEE', 'FFF'),
])
assert len(sort_entities_table) == 3
sort_entities_table.remove_invalid_handles()
assert len(sort_entities_table) == 0
def test_remove_handle(doc):
sort_entities_table = doc.objects.new_entity('SORTENTSTABLE', {})
sort_entities_table.set_handles([
('AAA', 'BBB'), ('CCC', 'DDD'), ('EEE', 'FFF'),
])
assert len(sort_entities_table) == 3
sort_entities_table.remove_handle('AAA')
assert len(sort_entities_table) == 2
# no exception if handle not exists
sort_entities_table.remove_handle('FFFF')
assert len(sort_entities_table) == 2
SORT_ENTITIES_TABLE = """0
SORTENTSTABLE
5
0
102
{ACAD_REACTORS
330
0
102
}
330
0
100
AcDbSortentsTable
330
ABBA
331
1
5
A
331
2
5
B
"""
def test_load_table():
table = SortEntsTable.from_text(SORT_ENTITIES_TABLE)
assert table.dxf.block_record_handle == 'ABBA'
assert len(table) == 2
assert table.table['1'] == 'A'
assert table.table['2'] == 'B'
```
#### File: tests/test_02_dxf_graphics/test_217_dimlines_R2000.py
```python
import pytest
import ezdxf
from ezdxf.entities.dimension import Dimension
@pytest.fixture(scope='module')
def dxf2000():
return ezdxf.new('R2000', setup='all')
@pytest.fixture(scope='module')
def dxf2007():
return ezdxf.new('R2007', setup='all')
def test_dimstyle_standard_exist(dxf2000):
assert 'EZDXF' in dxf2000.dimstyles
def test_rotated_dimline(dxf2000):
msp = dxf2000.modelspace()
dxfattribs = {
'dimtype': Dimension.LINEAR
}
dimline = msp.new_entity('DIMENSION', dxfattribs)
assert dimline.dimtype == Dimension.LINEAR
assert dimline.dxf.defpoint == (0, 0, 0)
assert dimline.dxf.insert == (0, 0, 0)
assert dimline.dxf.defpoint2 == (0, 0, 0)
assert dimline.dxf.defpoint3 == (0, 0, 0)
assert dimline.dxf.angle == 0.
assert dimline.dxf.oblique_angle == 0.
def test_aligned_dimline(dxf2000):
msp = dxf2000.modelspace()
dxfattribs = {
'dimtype': Dimension.ALIGNED
}
dimline = msp.new_entity('DIMENSION', dxfattribs)
assert dimline.dimtype == Dimension.ALIGNED
assert dimline.dxf.defpoint == (0, 0, 0)
assert dimline.dxf.insert == (0, 0, 0)
assert dimline.dxf.defpoint2 == (0, 0, 0)
assert dimline.dxf.defpoint3 == (0, 0, 0)
assert dimline.dxf.angle == 0.
assert dimline.dxf.oblique_angle == 0.
def test_angular_dimline(dxf2000):
msp = dxf2000.modelspace()
dxfattribs = {
'dimtype': Dimension.ANGULAR
}
dimline = msp.new_entity('DIMENSION', dxfattribs)
assert dimline.dimtype == Dimension.ANGULAR
assert dimline.dxf.defpoint == (0, 0, 0)
assert dimline.dxf.defpoint2 == (0, 0, 0)
assert dimline.dxf.defpoint3 == (0, 0, 0)
assert dimline.dxf.defpoint4 == (0, 0, 0)
assert dimline.dxf.defpoint5 == (0, 0, 0)
def test_angular_3p_dimline(dxf2000):
msp = dxf2000.modelspace()
dxfattribs = {
'dimtype': Dimension.ANGULAR_3P
}
dimline = msp.new_entity('DIMENSION', dxfattribs)
assert dimline.dimtype == Dimension.ANGULAR_3P
def test_radius_dimline(dxf2000):
msp = dxf2000.modelspace()
dxfattribs = {
'dimtype': Dimension.RADIUS
}
dimline = msp.new_entity('DIMENSION', dxfattribs)
assert dimline.dimtype == Dimension.RADIUS
assert dimline.dxf.defpoint == (0, 0, 0)
assert dimline.dxf.defpoint4 == (0, 0, 0)
assert dimline.is_supported_dxf_attrib('leader_length')
def test_diameter_dimline(dxf2000):
msp = dxf2000.modelspace()
dxfattribs = {
'dimtype': Dimension.DIAMETER
}
dimline = msp.new_entity('DIMENSION', dxfattribs)
assert dimline.dimtype == Dimension.DIAMETER
assert dimline.dxf.defpoint == (0, 0, 0)
assert dimline.dxf.defpoint4 == (0, 0, 0)
assert dimline.is_supported_dxf_attrib('leader_length')
def test_ordinate_dimline(dxf2000):
msp = dxf2000.modelspace()
dxfattribs = {
'dimtype': Dimension.ORDINATE
}
dimline = msp.new_entity('DIMENSION', dxfattribs)
assert dimline.dimtype == Dimension.ORDINATE
assert dimline.dxf.defpoint == (0, 0, 0)
assert dimline.dxf.defpoint2 == (0, 0, 0)
assert dimline.dxf.defpoint3 == (0, 0, 0)
def test_add_horizontal_dimline(dxf2000):
msp = dxf2000.modelspace()
override = msp.add_linear_dim(
base=(3, 2, 0),
p1=(0, 0, 0),
p2=(3, 0, 0),
)
dimline = override.dimension
assert dimline.dxf.dimstyle == 'EZDXF'
override.render()
block_name = dimline.dxf.geometry
assert block_name.startswith('*D')
block = dimline.get_geometry_block()
assert len(list(block.query('MTEXT'))) == 1
assert len(list(block.query('INSERT'))) == 2
assert len(list(block.query('LINE'))) == 3 # dimension line + 2 extension lines
assert len(list(block.query('POINT'))) == 3 # def points
def test_virtual_entities_and_explode(dxf2000):
msp = dxf2000.modelspace()
override = msp.add_linear_dim(
base=(3, 2, 0),
p1=(0, 0, 0),
p2=(3, 0, 0),
)
dimline = override.dimension
dimline.render()
parts = list(dimline.virtual_entities())
assert len(parts) == 9
geometry = dimline.dxf.geometry
parts = dimline.explode()
assert len(list(parts.query('MTEXT'))) == 1
assert len(list(parts.query('INSERT'))) == 2
assert len(list(parts.query('LINE'))) == 3 # dimension line + 2 extension lines
assert len(list(parts.query('POINT'))) == 3 # def points
assert dimline.is_alive is False
assert geometry in dxf2000.blocks, 'Do not destroy anonymous block, may be used by block references.'
def test_dimstyle_override(dxf2000):
msp = dxf2000.modelspace()
dimstyle = msp.add_linear_dim(
base=(3, 2, 0),
p1=(0, 0, 0),
p2=(3, 0, 0),
dxfattribs={
'dimstyle': 'EZDXF',
}
)
dimline = dimstyle.dimension
assert dimline.dxf.dimstyle == 'EZDXF'
if 'TEST' not in dxf2000.styles: # text style must exists
dxf2000.styles.new('TEST')
preset = {
'dimtxsty': 'TEST',
'dimexe': 0.777,
}
dimstyle.update(preset)
assert dimstyle['dimtxsty'] == 'TEST'
assert dimstyle['dimexe'] == 0.777
assert dimstyle['invalid'] is None
dimstyle.update({'invalid': 7})
# ezdxf 0.10 and later uses internally only resource names not handles for dim style attributes
# unknown attributes are ignored
dstyle_orig = dimstyle.get_dstyle_dict()
assert len(dstyle_orig) == 0
dimstyle.commit()
# ezdxf 0.10 and later uses internally only resource names not handles for dim style attributes
dstyle = dimstyle.get_dstyle_dict()
assert dstyle['dimexe'] == 0.777
# handle attributes not available, just stored transparent in XDATA
assert 'dimtxsty_handle' not in dstyle
assert dstyle['dimtxsty'] == 'TEST'
def test_linetype_override_R2000(dxf2000):
msp = dxf2000.modelspace()
preset = {
'dimltype': 'DOT',
'dimltex1': 'DOT2',
'dimltex2': 'DOTX2',
}
dimstyle = msp.add_linear_dim(
base=(3, 2, 0),
p1=(0, 0, 0),
p2=(3, 0, 0),
dimstyle='EZDXF',
override=preset,
)
assert dimstyle['dimltype'] == 'DOT'
assert dimstyle['dimltex1'] == 'DOT2'
assert dimstyle['dimltex2'] == 'DOTX2'
dimstyle.commit()
# ezdxf 0.10 and later uses internally only resource names not handles for dim style attributes
dstyle = dimstyle.get_dstyle_dict()
# handle attributes not available, just stored transparent in XDATA
assert 'dimltype_handle' not in dstyle
assert 'dimltex1_handle' not in dstyle
assert 'dimltex2_handle' not in dstyle
# line type not supported by DXF R2000
assert 'dimltype' not in dstyle
assert 'dimltex1' not in dstyle
assert 'dimltex2' not in dstyle
def test_linetype_override_R2007(dxf2007):
msp = dxf2007.modelspace()
preset = {
'dimltype': 'DOT',
'dimltex1': 'DOT2',
'dimltex2': 'DOTX2',
}
dimstyle = msp.add_linear_dim(
base=(3, 2, 0),
p1=(0, 0, 0),
p2=(3, 0, 0),
dimstyle='EZDXF',
override=preset,
)
assert dimstyle['dimltype'] == 'DOT'
assert dimstyle['dimltex1'] == 'DOT2'
assert dimstyle['dimltex2'] == 'DOTX2'
dimstyle.commit()
# ezdxf 0.10 and later uses internally only resource names not handles for dim style attributes
dstyle = dimstyle.get_dstyle_dict()
# handle attributes not available, just stored transparent in XDATA
assert 'dimltype_handle' not in dstyle
assert 'dimltex1_handle' not in dstyle
assert 'dimltex2_handle' not in dstyle
assert dstyle['dimltype'] == 'DOT'
assert dstyle['dimltex1'] == 'DOT2'
assert dstyle['dimltex2'] == 'DOTX2'
def test_dimstyle_override_arrows(dxf2000):
msp = dxf2000.modelspace()
arrows = ezdxf.ARROWS
blocks = dxf2000.blocks
arrows.create_block(blocks, arrows.dot_blank)
arrows.create_block(blocks, arrows.box)
arrows.create_block(blocks, arrows.closed)
arrows.create_block(blocks, arrows.closed_filled)
preset = {
'dimblk': arrows.dot_blank,
'dimblk1': arrows.box,
'dimblk2': arrows.closed,
'dimldrblk': arrows.closed_filled, # virtual attribute
}
dimstyle = msp.add_linear_dim(
base=(3, 2, 0),
p1=(0, 0, 0),
p2=(3, 0, 0),
dimstyle='EZDXF',
override=preset,
)
# still as block names stored
assert dimstyle['dimblk'] == arrows.dot_blank
assert dimstyle['dimblk1'] == arrows.box
assert dimstyle['dimblk2'] == arrows.closed
assert dimstyle['dimldrblk'] == arrows.closed_filled
dstyle_orig = dimstyle.get_dstyle_dict()
assert len(dstyle_orig) == 0
dimstyle.commit()
# ezdxf 0.10 and later uses internally only resource names not handles for dim style attributes
dstyle = dimstyle.get_dstyle_dict()
# handle attributes not available, just stored transparent in XDATA
assert 'dimblk_handle' not in dstyle
assert 'dimblk1_handle' not in dstyle
assert 'dimblk2_handle' not in dstyle
assert 'dimldrblk_handle' not in dstyle
assert dstyle['dimblk'] == arrows.dot_blank
assert dstyle['dimblk1'] == arrows.box
assert dstyle['dimblk2'] == arrows.closed
assert dstyle['dimldrblk'] == '' # special handle for closed filled
dimstyle.set_arrows(blk=arrows.closed, blk1=arrows.dot_blank, blk2=arrows.box, ldrblk=arrows.dot_small)
assert dimstyle['dimblk'] == arrows.closed
assert dimstyle['dimblk1'] == arrows.dot_blank
assert dimstyle['dimblk2'] == arrows.box
assert dimstyle['dimldrblk'] == arrows.dot_small
dimstyle.commit()
# ezdxf 0.10 and later uses internally only resource names not handles for dim style attributes
dstyle = dimstyle.get_dstyle_dict()
assert dstyle['dimblk'] == arrows.closed
assert dstyle['dimblk1'] == arrows.dot_blank
assert dstyle['dimblk2'] == arrows.box
# create acad arrows on demand
assert dstyle['dimldrblk'] == arrows.dot_small
```
#### File: tests/test_02_dxf_graphics/test_218_poly_line_mesh_face.py
```python
import pytest
from ezdxf.lldxf.const import VTX_3D_POLYLINE_VERTEX
from ezdxf import DXFIndexError
from ezdxf.layouts import VirtualLayout
@pytest.fixture
def msp():
return VirtualLayout()
def test_create_polyline2D(msp):
polyline = msp.add_polyline2d([(0, 0), (1, 1)])
assert (0., 0.) == polyline[0].dxf.location
assert (1., 1.) == polyline[1].dxf.location
assert 'AcDb2dPolyline' == polyline.get_mode()
def test_create_polyline3D(msp):
polyline = msp.add_polyline3d([(1, 2, 3), (4, 5, 6)])
assert (1., 2., 3.) == polyline[0].dxf.location
assert (4., 5., 6.) == polyline[1].dxf.location
assert VTX_3D_POLYLINE_VERTEX == polyline[0].dxf.flags
assert 'AcDb3dPolyline' == polyline.get_mode()
def test_polyline3d_vertex_layer(msp):
attribs = {'layer': 'polyline_layer'}
polyline = msp.add_polyline3d([(1, 2, 3), (4, 5, 6)], dxfattribs=attribs)
for vertex in polyline.vertices:
assert 'polyline_layer' == vertex.dxf.layer, "VERTEX entity not on the same layer as the POLYLINE entity."
def test_polyline3d_change_polyline_layer(msp):
attribs = {'layer': 'polyline_layer'}
polyline = msp.add_polyline3d([(1, 2, 3), (4, 5, 6)], dxfattribs=attribs)
polyline.dxf.layer = "changed_layer"
for vertex in polyline.vertices:
assert 'changed_layer' == vertex.dxf.layer, "VERTEX entity not on the same layer as the POLYLINE entity."
def test_polyline2d_set_vertex(msp):
polyline = msp.add_polyline2d([(0, 0), (1, 1), (2, 2), (3, 3)])
polyline[2].dxf.location = (7, 7)
assert (7., 7.) == polyline[2].dxf.location
def test_polyline2d_points(msp):
points = [(0, 0), (1, 1), (2, 2), (3, 3)]
polyline = msp.add_polyline2d(points)
assert points == list(polyline.points())
def test_polyline2d_point_slicing(msp):
points = [(0, 0), (1, 1), (2, 2), (3, 3)]
polyline = msp.add_polyline2d(points)
assert [(1, 1), (2, 2)] == list(polyline.points())[1:3]
def test_poyline2d_append_vertices(msp):
polyline = msp.add_polyline2d([(0, 0), (1, 1)])
polyline.append_vertices([(7, 7), (8, 8)])
assert (7., 7.) == polyline[2].dxf.location
assert 4 == len(polyline)
def test_polyline2d_insert_vertices(msp):
polyline = msp.add_polyline2d([(0, 0), (1, 1)])
polyline.insert_vertices(0, [(7, 7), (8, 8)])
assert (7, 7) == polyline[0].dxf.location
assert (1, 1) == polyline[3].dxf.location
assert 4 == len(polyline)
def test_polyline2d_delete_one_vertex(msp):
polyline = msp.add_polyline2d([(0, 0), (1, 1), (2, 2), (3, 3)])
del polyline.vertices[0]
assert (1, 1) == polyline[0].dxf.location
assert 3 == len(polyline)
def test_polyline2d_delete_two_vertices(msp):
polyline = msp.add_polyline2d([(0, 0), (1, 1), (2, 2), (3, 3)])
del polyline.vertices[0:2]
assert (2, 2) == polyline[0].dxf.location
assert 2 == len(polyline)
def test_polymesh_create_mesh(msp):
msp.add_polymesh((4, 4))
assert True
def test_polymesh_set_vertex(msp):
mesh = msp.add_polymesh((4, 4))
mesh.set_mesh_vertex((1, 1), (1, 2, 3))
result = mesh.get_mesh_vertex((1, 1)).dxf.location
assert (1, 2, 3) == result
def test_polymesh_error_nindex(msp):
mesh = msp.add_polymesh((4, 4))
with pytest.raises(DXFIndexError):
mesh.get_mesh_vertex((0, 4))
def test_polymesh_error_mindex(msp):
mesh = msp.add_polymesh((4, 4))
with pytest.raises(DXFIndexError):
mesh.get_mesh_vertex((4, 0))
def test_polymesh_mesh_cache(msp):
pos = (2, 1)
mesh = msp.add_polymesh((4, 4))
cache = mesh.get_mesh_vertex_cache()
cache[pos] = (1, 2, 3)
vertex = mesh.get_mesh_vertex(pos)
assert vertex.dxf.location == cache[pos]
with pytest.raises(DXFIndexError):
cache[4, 0]
def test_polyface_create_face(msp):
face = msp.add_polyface()
assert 0 == len(face)
def test_polyface_add_face(msp):
face = msp.add_polyface()
face.append_face([(0, 0), (1, 1), (2, 2), (3, 3)])
assert [(0, 0), (1, 1), (2, 2), (3, 3), (0, 0, 0)] == list(face.points())
def test_polyface_face_indices(msp):
face = msp.add_polyface()
face.append_face([(0, 0), (1, 1), (2, 2), (3, 3)])
face_record = face[4]
assert 1 == face_record.dxf.vtx0
assert 2 == face_record.dxf.vtx1
assert 3 == face_record.dxf.vtx2
assert 4 == face_record.dxf.vtx3
def test_polyface_add_two_face_indices(msp):
face = msp.add_polyface()
face.append_face([(0, 0), (1, 1), (2, 2), (3, 3)])
# second face has same vertices as the first face
face.append_face([(0, 0), (1, 1), (2, 2)])
face_record = face[5] # second face
assert 1 == face_record.dxf.vtx0
assert 2 == face_record.dxf.vtx1
assert 3 == face_record.dxf.vtx2
assert 4 == face.dxf.m_count # vertices count
assert 2 == face.dxf.n_count # faces count
def test_polyface_faces(msp):
face = msp.add_polyface()
face.append_face([(0, 0), (1, 1), (2, 2), (3, 3)])
face.append_face([(0, 0), (1, 1), (2, 2)])
result = list(face.faces())
assert 2 == len(result)
points1 = [vertex.dxf.location for vertex in result[0]]
# the last vertex is the face_record and is always (0, 0, 0)
# the face_record contains indices to the face building vertices
assert [(0, 0, 0), (1, 1, 0), (2, 2, 0), (3, 3, 0), (0, 0, 0)] == points1
def test_polyface_optimized_cube(msp):
face = msp.add_polyface()
# a cube consist of 6 faces and 24 vertices
# duplicated vertices should be removed
face.append_faces(cube_faces())
assert 8 == face.dxf.m_count # vertices count
assert 6 == face.dxf.n_count # faces count
def cube_faces():
# cube corner points
p1 = (0, 0, 0)
p2 = (0, 0, 1)
p3 = (0, 1, 0)
p4 = (0, 1, 1)
p5 = (1, 0, 0)
p6 = (1, 0, 1)
p7 = (1, 1, 0)
p8 = (1, 1, 1)
# define the 6 cube faces
# look into -x direction
# Every add_face adds 4 vertices 6x4 = 24 vertices
return [
[p1, p5, p7, p3],
[p1, p5, p6, p2],
[p5, p7, p8, p6],
[p7, p8, p4, p3],
[p1, p3, p4, p2],
[p2, p6, p8, p4],
]
```
#### File: tests/test_02_dxf_graphics/test_221_ellipse.py
```python
import pytest
import math
from ezdxf.math import Vec3
from ezdxf.entities.ellipse import Ellipse, MIN_RATIO, MAX_RATIO
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
ELLIPSE = """0
ELLIPSE
5
0
330
0
100
AcDbEntity
8
0
100
AcDbEllipse
10
0.0
20
0.0
30
0.0
11
1.0
21
0.0
31
0.0
40
1.0
41
0.0
42
6.283185307179586
"""
@pytest.fixture
def entity():
return Ellipse.from_text(ELLIPSE)
def test_registered():
from ezdxf.entities.factory import ENTITY_CLASSES
assert 'ELLIPSE' in ENTITY_CLASSES
def test_default_init():
entity = Ellipse()
assert entity.dxftype() == 'ELLIPSE'
assert entity.dxf.handle is None
assert entity.dxf.owner is None
def test_default_new():
entity = Ellipse.new(handle='ABBA', owner='0', dxfattribs={
'color': 7,
'ratio': 0.5,
'center': (1, 2, 3),
'major_axis': (4, 5, 6),
'start_param': 10,
'end_param': 20,
})
assert entity.dxf.layer == '0'
assert entity.dxf.color == 7
assert entity.dxf.center == (1, 2, 3)
assert entity.dxf.major_axis == (4, 5, 6)
assert entity.dxf.ratio == 0.5
assert entity.dxf.start_param == 10
assert entity.dxf.end_param == 20
def test_extrusion_can_not_be_a_null_vector():
e = Ellipse.new(dxfattribs={'extrusion': (0, 0, 0)})
assert e.dxf.extrusion == (0, 0, 1), 'expected default extrusion'
def test_major_axis_can_not_be_a_null_vector():
pytest.raises(ValueError, Ellipse.new, dxfattribs={'major_axis': (0, 0, 0)})
@pytest.mark.parametrize('ratio', [-2, -1, 0, 1, 2])
def test_ratio_is_always_valid(ratio):
e = Ellipse.new(dxfattribs={'ratio': ratio})
assert MIN_RATIO <= abs(e.dxf.ratio) <= MAX_RATIO
@pytest.mark.parametrize('ratio', [-1, -0.5, -1e-9])
def test_ratio_can_be_negative(ratio):
e = Ellipse.new(dxfattribs={'ratio': ratio})
assert e.dxf.ratio < 0
def test_load_from_text(entity):
assert entity.dxf.layer == '0'
assert entity.dxf.color == 256, 'default color is 256 (by layer)'
assert entity.dxf.center == (0, 0, 0)
assert entity.dxf.major_axis == (1, 0, 0)
assert entity.dxf.ratio == 1
assert entity.dxf.start_param == 0
assert entity.dxf.end_param == math.pi * 2
def test_get_start_and_end_vertex():
ellipse = Ellipse.new(handle='ABBA', owner='0', dxfattribs={
'center': (1, 2, 3),
'major_axis': (4, 3, 0),
'ratio': .7,
'start_param': math.pi / 2,
'end_param': math.pi,
'extrusion': (0, 0, -1),
})
start, end = list(ellipse.vertices([
ellipse.dxf.start_param,
ellipse.dxf.end_param,
]))
# test values from BricsCAD
assert start.isclose(Vec3(3.1, -0.8, 3), abs_tol=1e-6)
assert end.isclose(Vec3(-3, -1, 3), abs_tol=1e-6)
# for convenience, but Ellipse.vertices is much more efficient:
assert ellipse.start_point.isclose(Vec3(3.1, -0.8, 3), abs_tol=1e-6)
assert ellipse.end_point.isclose(Vec3(-3, -1, 3), abs_tol=1e-6)
def test_write_dxf():
entity = Ellipse.from_text(ELLIPSE)
result = TagCollector.dxftags(entity)
expected = basic_tags_from_text(ELLIPSE)
assert result == expected
def test_from_arc():
from ezdxf.entities.arc import Arc
arc = Arc.new(dxfattribs={'center': (2, 2, 2), 'radius': 3})
ellipse = Ellipse.from_arc(arc)
assert ellipse.dxf.center == (2, 2, 2)
assert ellipse.dxf.major_axis == (3, 0, 0)
assert ellipse.dxf.ratio == 1
assert ellipse.dxf.start_param == 0
assert math.isclose(ellipse.dxf.end_param, math.tau)
# tests for swap_axis() are done in test_648_construction_ellipse.py
# tests for params() are done in test_648_construction_ellipse.py
```
#### File: tests/test_02_dxf_graphics/test_232_acis_2.py
```python
import pytest
import math
from ezdxf.entities.acis import Body, Solid3d, Region, Surface, ExtrudedSurface, LoftedSurface, RevolvedSurface, \
SweptSurface
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
BODY = """0
BODY
5
0
330
0
100
AcDbEntity
8
0
100
AcDbModelerGeometry
70
1
"""
class MockDoc:
def __init__(self):
self.dxfversion = 'AC1024'
@pytest.fixture
def entity():
return Body.from_text(BODY, doc=MockDoc())
def test_registered():
from ezdxf.entities.factory import ENTITY_CLASSES
assert 'BODY' in ENTITY_CLASSES
assert '3DSOLID' in ENTITY_CLASSES
assert 'REGION' in ENTITY_CLASSES
assert 'SURFACE' in ENTITY_CLASSES
assert 'EXTRUDEDSURFACE' in ENTITY_CLASSES
assert 'LOFTEDSURFACE' in ENTITY_CLASSES
assert 'REVOLVEDSURFACE' in ENTITY_CLASSES
assert 'SWEPTSURFACE' in ENTITY_CLASSES
def test_default_init():
entity = Body()
assert entity.dxftype() == 'BODY'
entity = Solid3d()
assert entity.dxftype() == '3DSOLID'
entity = Region()
assert entity.dxftype() == 'REGION'
entity = Surface()
assert entity.dxftype() == 'SURFACE'
entity = ExtrudedSurface()
assert entity.dxftype() == 'EXTRUDEDSURFACE'
entity = LoftedSurface()
assert entity.dxftype() == 'LOFTEDSURFACE'
entity = RevolvedSurface()
assert entity.dxftype() == 'REVOLVEDSURFACE'
entity = SweptSurface()
assert entity.dxftype() == 'SWEPTSURFACE'
def test_default_new():
entity = Body.new(handle='ABBA', owner='0', dxfattribs={
'color': 7,
})
assert entity.dxf.layer == '0'
assert entity.dxf.color == 7
def test_load_from_text(entity):
assert entity.dxf.layer == '0'
assert entity.dxf.color == 256, 'default color is 256 (by layer)'
assert entity.dxf.version == 1
def test_body_write_dxf():
entity = Body.from_text(BODY, doc=MockDoc())
result = TagCollector.dxftags(entity)
expected = basic_tags_from_text(BODY)
assert result == expected
def test_region_write_dxf():
entity = Region.from_text(REGION, doc=MockDoc())
result = TagCollector.dxftags(entity)
expected = basic_tags_from_text(REGION)
assert result == expected
REGION = """0
REGION
5
0
330
0
100
AcDbEntity
8
0
100
AcDbModelerGeometry
70
1
"""
def test_3dsolid_write_dxf():
entity = Solid3d.from_text(SOLID3D, doc=MockDoc())
result = TagCollector.dxftags(entity)
expected = basic_tags_from_text(SOLID3D)
assert result == expected
SOLID3D = """0
3DSOLID
5
0
330
0
100
AcDbEntity
8
0
100
AcDbModelerGeometry
70
1
100
AcDb3dSolid
350
0
"""
def test_surface_write_dxf():
entity = Surface.from_text(SURFACE, doc=MockDoc())
result = TagCollector.dxftags(entity)
expected = basic_tags_from_text(SURFACE)
assert result == expected
SURFACE = """0
SURFACE
5
0
330
0
100
AcDbEntity
8
0
100
AcDbModelerGeometry
70
1
100
AcDbSurface
71
0
72
0
"""
def test_extruded_surface_write_dxf():
entity = ExtrudedSurface.from_text(EXTRUDEDSURFACE, doc=MockDoc())
result = TagCollector.dxftags(entity)
expected = basic_tags_from_text(EXTRUDEDSURFACE)
assert result == expected
EXTRUDEDSURFACE = """0
EXTRUDEDSURFACE
5
0
330
0
100
AcDbEntity
8
0
100
AcDbModelerGeometry
70
1
100
AcDbSurface
71
0
72
0
100
AcDbExtrudedSurface
90
18
10
0.0
20
0.0
30
0.0
40
1.0
40
0.0
40
0.0
40
0.0
40
0.0
40
1.0
40
0.0
40
0.0
40
0.0
40
0.0
40
1.0
40
0.0
40
0.0
40
0.0
40
0.0
40
1.0
42
0.0
43
0.0
44
0.0
45
0.0
48
1.0
49
0.0
46
1.0
46
0.0
46
0.0
46
0.0
46
0.0
46
1.0
46
0.0
46
0.0
46
0.0
46
0.0
46
1.0
46
0.0
46
0.0
46
0.0
46
0.0
46
1.0
47
1.0
47
0.0
47
0.0
47
0.0
47
0.0
47
1.0
47
0.0
47
0.0
47
0.0
47
0.0
47
1.0
47
0.0
47
0.0
47
0.0
47
0.0
47
1.0
290
0
70
0
71
2
292
1
293
0
294
0
295
1
296
0
11
0.0
21
0.0
31
0.0
"""
def test_lofted_surface_write_dxf():
entity = LoftedSurface.from_text(LOFTEDSURFACE, doc=MockDoc())
result = TagCollector.dxftags(entity)
expected = basic_tags_from_text(LOFTEDSURFACE)
assert result == expected
LOFTEDSURFACE = """0
LOFTEDSURFACE
5
0
330
0
100
AcDbEntity
8
0
100
AcDbModelerGeometry
70
1
100
AcDbSurface
71
0
72
0
100
AcDbLoftedSurface
40
1.0
40
0.0
40
0.0
40
0.0
40
0.0
40
1.0
40
0.0
40
0.0
40
0.0
40
0.0
40
1.0
40
0.0
40
0.0
40
0.0
40
0.0
40
1.0
70
0
41
0.0
42
0.0
43
0.0
44
0.0
290
0
291
1
292
1
293
1
294
0
295
0
296
0
297
1
"""
def test_revolved_surface_write_dxf():
entity = RevolvedSurface.from_text(REVOLVEDSURFACE, doc=MockDoc())
result = TagCollector.dxftags(entity)
expected = basic_tags_from_text(REVOLVEDSURFACE)
assert result == expected
REVOLVEDSURFACE = """0
REVOLVEDSURFACE
5
0
330
0
100
AcDbEntity
8
0
100
AcDbModelerGeometry
70
1
100
AcDbSurface
71
0
72
0
100
AcDbRevolvedSurface
90
36
10
0.0
20
0.0
30
0.0
11
0.0
21
0.0
31
1.0
40
0.0
41
0.0
42
1.0
42
0.0
42
0.0
42
0.0
42
0.0
42
1.0
42
0.0
42
0.0
42
0.0
42
0.0
42
1.0
42
0.0
42
0.0
42
0.0
42
0.0
42
1.0
43
0.0
44
0.0
45
0.0
46
0.0
290
0
291
0
"""
def test_swept_surface_write_dxf():
entity = SweptSurface.from_text(SWEPTSURFACE, doc=MockDoc())
result = TagCollector.dxftags(entity)
expected = basic_tags_from_text(SWEPTSURFACE)
assert result == expected
SWEPTSURFACE = """0
SWEPTSURFACE
5
0
330
0
100
AcDbEntity
8
0
100
AcDbModelerGeometry
70
1
100
AcDbSurface
71
0
72
0
100
AcDbSweptSurface
90
36
91
36
40
1.0
40
0.0
40
0.0
40
0.0
40
0.0
40
1.0
40
0.0
40
0.0
40
0.0
40
0.0
40
1.0
40
0.0
40
0.0
40
0.0
40
0.0
40
1.0
41
1.0
41
0.0
41
0.0
41
0.0
41
0.0
41
1.0
41
0.0
41
0.0
41
0.0
41
0.0
41
1.0
41
0.0
41
0.0
41
0.0
41
0.0
41
1.0
42
0.0
43
0.0
44
0.0
45
0.0
48
1.0
49
0.0
46
1.0
46
0.0
46
0.0
46
0.0
46
0.0
46
1.0
46
0.0
46
0.0
46
0.0
46
0.0
46
1.0
46
0.0
46
0.0
46
0.0
46
0.0
46
1.0
47
1.0
47
0.0
47
0.0
47
0.0
47
0.0
47
1.0
47
0.0
47
0.0
47
0.0
47
0.0
47
1.0
47
0.0
47
0.0
47
0.0
47
0.0
47
1.0
290
0
70
1
71
2
292
0
293
0
294
1
295
1
296
1
11
0.0
21
0.0
31
0.0
"""
```
#### File: tests/test_02_dxf_graphics/test_238_tolerance.py
```python
import pytest
import ezdxf
from ezdxf.entities.tolerance import Tolerance
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
TOLERANCE = """0
TOLERANCE
5
0
330
0
100
AcDbEntity
8
0
100
AcDbFcf
3
Standard
10
0.0
20
0.0
30
0.0
1
11
1.0
21
0.0
31
0.0
"""
@pytest.fixture
def entity():
return Tolerance.from_text(TOLERANCE)
def test_registered():
from ezdxf.entities.factory import ENTITY_CLASSES
assert 'TOLERANCE' in ENTITY_CLASSES
def test_default_init():
entity = Tolerance()
assert entity.dxftype() == 'TOLERANCE'
assert entity.dxf.handle is None
assert entity.dxf.owner is None
def test_default_new():
entity = Tolerance.new(handle='ABBA', owner='0', dxfattribs={
'color': 7,
'dimstyle': 'EZDXF',
'insert': (1, 2, 3),
'extrusion': (4, 5, 6),
'x_axis_vector': (7, 8, 9),
'content': 'abcdef',
})
assert entity.dxf.layer == '0'
assert entity.dxf.color == 7
assert entity.dxf.dimstyle == 'EZDXF'
assert entity.dxf.insert == (1, 2, 3)
assert entity.dxf.extrusion == (4, 5, 6)
assert entity.dxf.x_axis_vector == (7, 8, 9)
assert entity.dxf.content == 'abcdef'
def test_load_from_text(entity):
assert entity.dxf.layer == '0'
assert entity.dxf.color == 256, 'default color is 256 (by layer)'
assert entity.dxf.dimstyle == 'Standard'
assert entity.dxf.insert == (0, 0, 0)
assert entity.dxf.extrusion == (0, 0, 1) # default value
assert entity.dxf.x_axis_vector == (1, 0, 0)
assert entity.dxf.content == ''
def test_write_dxf():
entity = Tolerance.from_text(TOLERANCE)
result = TagCollector.dxftags(entity)
expected = basic_tags_from_text(TOLERANCE)
assert result == expected
def test_add_tolerance():
doc = ezdxf.new()
msp = doc.modelspace()
light = msp.new_entity('TOLERANCE', {})
assert light.dxftype() == 'TOLERANCE'
assert light.dxf.dimstyle == 'Standard'
assert light.dxf.insert == (0, 0, 0)
assert light.dxf.content == ''
assert light.dxf.extrusion == (0, 0, 1)
assert light.dxf.x_axis_vector == (1, 0, 0)
```
#### File: tests/test_02_dxf_graphics/test_241_hyperlink.py
```python
import pytest
from ezdxf.entities import DXFGraphic
def test_set_hyperlink():
entity = DXFGraphic()
assert entity.has_hyperlink() is False
entity.set_hyperlink('link')
assert entity.has_hyperlink() is True
hyperlink, description, location = entity.get_hyperlink()
assert hyperlink == 'link'
assert description == ''
assert location == ''
def test_set_description():
entity = DXFGraphic()
entity.set_hyperlink('link', 'description')
hyperlink, description, location = entity.get_hyperlink()
assert hyperlink == 'link'
assert description == 'description'
assert location == ''
def test_set_location():
entity = DXFGraphic()
entity.set_hyperlink('link', 'description', 'location')
hyperlink, description, location = entity.get_hyperlink()
assert hyperlink == 'link'
assert description == 'description'
assert location == 'location'
if __name__ == '__main__':
pytest.main([__file__])
```
#### File: tests/test_03_dxf_layouts/test_309_query_parser.py
```python
import pytest
from ezdxf.queryparser import EntityQueryParser, ParseException, InfixBoolQuery
class TestEntityQueryParserWithoutAttributes:
def test_without_wildcards(self):
result = EntityQueryParser.parseString("LINE", parseAll=True)
name = result.EntityQuery[0]
assert "LINE" == name
def test_two_entity_names(self):
result = EntityQueryParser.parseString("LINE CIRCLE", parseAll=True)
assert "LINE" == result.EntityQuery[0]
assert "CIRCLE" == result.EntityQuery[1]
def test_star_wildcard(self):
result = EntityQueryParser.parseString("*", parseAll=True)
name = result.EntityQuery[0]
assert "*" == name
def test_wrong_star_wildcard(self):
with pytest.raises(ParseException):
EntityQueryParser.parseString("LIN*", parseAll=True)
def test_star_wildcard_2(self):
result = EntityQueryParser.parseString("* !LINE", parseAll=True)
assert result.EntityQuery[0] == '*'
assert result.EntityQuery[1] == '!LINE'
def test_star_wildcard_3(self):
with pytest.raises(ParseException):
EntityQueryParser.parseString("!LINE *", parseAll=True)
def test_star_wildcard_4(self):
with pytest.raises(ParseException):
EntityQueryParser.parseString("* LINE", parseAll=True)
class TestEntityQueryParserWithAttributes:
def test_empty_attribute_list_not_allowed(self):
with pytest.raises(ParseException):
EntityQueryParser.parseString("LINE[]", parseAll=True)
def test_one_attribute(self):
result = EntityQueryParser.parseString('LINE[layer=="0"]', parseAll=True)
assert "LINE" == result.EntityQuery[0]
assert ('layer', '==', '0') == tuple(result.AttribQuery)
def test_double_quoted_attributes(self):
result = EntityQueryParser.parseString('LINE[layer=="0"]', parseAll=True)
assert "LINE" == result.EntityQuery[0]
assert ('layer', '==', '0') == tuple(result.AttribQuery)
def test_single_quoted_attributes(self):
result = EntityQueryParser.parseString("LINE[layer=='0']", parseAll=True)
assert "LINE" == result.EntityQuery[0]
assert ('layer', '==', '0') == tuple(result.AttribQuery)
def test_attribute_name_with_underscore(self):
result = EntityQueryParser.parseString('HATCH[solid_fill==0]', parseAll=True)
assert "HATCH" == result.EntityQuery[0]
assert ('solid_fill', '==', 0) == tuple(result.AttribQuery)
def test_star_with_one_attribute(self):
result = EntityQueryParser.parseString('*[layer=="0"]', parseAll=True)
assert "*" == result.EntityQuery[0]
assert 3 == len(result.AttribQuery)
assert ('layer', '==', '0') == tuple(result.AttribQuery)
def test_relation_lt(self):
result = EntityQueryParser.parseString('*[layer<"0"]', parseAll=True)
assert ('layer', '<', '0') == tuple(result.AttribQuery)
def test_relation_le(self):
result = EntityQueryParser.parseString('*[layer<="0"]', parseAll=True)
assert ('layer', '<=', '0') == tuple(result.AttribQuery)
def test_relation_eq(self):
result = EntityQueryParser.parseString('*[layer=="0"]', parseAll=True)
assert ('layer', '==', '0') == tuple(result.AttribQuery)
def test_relation_ne(self):
result = EntityQueryParser.parseString('*[layer!="0"]', parseAll=True)
assert ('layer', '!=', '0') == tuple(result.AttribQuery)
def test_relation_ge(self):
result = EntityQueryParser.parseString('*[layer>="0"]', parseAll=True)
assert ('layer', '>=', '0') == tuple(result.AttribQuery)
def test_relation_gt(self):
result = EntityQueryParser.parseString('*[layer>="0"]', parseAll=True)
assert ('layer', '>=', '0') == tuple(result.AttribQuery)
def test_regex_match(self):
result = EntityQueryParser.parseString('*[layer?"0"]', parseAll=True)
assert ('layer', '?', '0') == tuple(result.AttribQuery)
def test_not_regex_match(self):
result = EntityQueryParser.parseString('*[layer!?"0"]', parseAll=True)
assert ('layer', '!?', '0') == tuple(result.AttribQuery)
def test_appended_ignore_case_option(self):
result = EntityQueryParser.parseString('*[layer=="IgnoreCase"]i', parseAll=True)
assert "i" == result.AttribQueryOptions
class TestInfixBoolQuery:
def test_not_operation(self):
result = InfixBoolQuery.parseString('!a!=1', parseAll=True)
op, relation = result.AttribQuery
assert '!' == op
assert ('a', '!=', 1) == tuple(relation)
def test_and_operation(self):
result = InfixBoolQuery.parseString('a != 1 & b != 2', parseAll=True)
rel1, op, rel2 = result.AttribQuery
assert ('a', '!=', 1) == tuple(rel1)
assert '&' == op
assert ('b', '!=', 2) == tuple(rel2)
def test_or_operation(self):
result = InfixBoolQuery.parseString('a != 1 | b != 2', parseAll=True)
rel1, op, rel2 = result.AttribQuery
assert ('a', '!=', 1) == tuple(rel1)
assert '|' == op
assert ('b', '!=', 2) == tuple(rel2)
def test_not_operation_with_brackets(self):
result = InfixBoolQuery.parseString('!(a!=1)', parseAll=True)
op, relation = result.AttribQuery
assert '!' == op
assert ('a', '!=', 1) == tuple(relation)
def test_operation_with_brackets(self):
result = InfixBoolQuery.parseString('(a != 1) & (b != 2)', parseAll=True)
rel1, op, rel2 = result.AttribQuery
assert ('a', '!=', 1) == tuple(rel1)
assert '&' == op
assert ('b', '!=', 2) == tuple(rel2)
def test_operation_with_nested_brackets(self):
result = InfixBoolQuery.parseString('((a != 1) & (b != 2))', parseAll=True)
rel1, op, rel2 = result.AttribQuery
assert ('a', '!=', 1) == tuple(rel1)
assert '&' == op
assert ('b', '!=', 2) == tuple(rel2)
```
#### File: tests/test_04_dxf_high_level_structs/test_411_acds_data.py
```python
import pytest
from ezdxf.sections.acdsdata import AcDsDataSection
from ezdxf import DXFKeyError
from ezdxf.lldxf.tags import internal_tag_compiler, group_tags
from ezdxf.lldxf.tagwriter import TagCollector, basic_tags_from_text
@pytest.fixture
def section():
entities = group_tags(internal_tag_compiler(ACDSSECTION))
return AcDsDataSection(None, entities)
def test_loader(section):
assert 'ACDSDATA' == section.name.upper()
assert len(section.entities) > 0
def test_acds_record(section):
records = [entity for entity in section.entities if entity.dxftype() == 'ACDSRECORD']
assert len(records) > 0
record = records[0]
assert record.has_section('ASM_Data') is True
assert record.has_section('AcDbDs::ID') is True
assert record.has_section('mozman') is False
with pytest.raises(DXFKeyError):
_ = record.get_section('mozman')
asm_data = record.get_section('ASM_Data')
binary_data = (tag for tag in asm_data if tag.code == 310)
length = sum(len(tag.value) for tag in binary_data)
assert asm_data[2].value == length
def test_write_dxf(section):
result = TagCollector.dxftags(section)
expected = basic_tags_from_text(ACDSSECTION)
assert result[:-1] == expected
ACDSSECTION = """0
SECTION
2
ACDSDATA
70
2
71
6
0
ACDSSCHEMA
90
0
1
AcDb3DSolid_ASM_Data
2
AcDbDs::ID
280
10
91
8
2
ASM_Data
280
15
91
0
101
ACDSRECORD
95
0
90
2
2
AcDbDs::TreatedAsObjectData
280
1
291
1
101
ACDSRECORD
95
0
90
3
2
AcDbDs::Legacy
280
1
291
1
101
ACDSRECORD
1
AcDbDs::ID
90
4
2
AcDs:Indexable
280
1
291
1
101
ACDSRECORD
1
AcDbDs::ID
90
5
2
AcDbDs::HandleAttribute
280
7
282
1
0
ACDSSCHEMA
90
1
1
AcDb_Thumbnail_Schema
2
AcDbDs::ID
280
10
91
8
2
Thumbnail_Data
280
15
91
0
101
ACDSRECORD
95
1
90
2
2
AcDbDs::TreatedAsObjectData
280
1
291
1
101
ACDSRECORD
95
1
90
3
2
AcDbDs::Legacy
280
1
291
1
101
ACDSRECORD
1
AcDbDs::ID
90
4
2
AcDs:Indexable
280
1
291
1
101
ACDSRECORD
1
AcDbDs::ID
90
5
2
AcDbDs::HandleAttribute
280
7
282
1
0
ACDSSCHEMA
90
2
1
AcDbDs::TreatedAsObjectDataSchema
2
AcDbDs::TreatedAsObjectData
280
1
91
0
0
ACDSSCHEMA
90
3
1
AcDbDs::LegacySchema
2
AcDbDs::Legacy
280
1
91
0
0
ACDSSCHEMA
90
4
1
AcDbDs::IndexedPropertySchema
2
AcDs:Indexable
280
1
91
0
0
ACDSSCHEMA
90
5
1
AcDbDs::HandleAttributeSchema
2
AcDbDs::HandleAttribute
280
7
91
1
284
1
0
ACDSRECORD
90
0
2
AcDbDs::ID
280
10
320
339
2
ASM_Data
280
15
94
1088
310
414349532042696E61727946696C652855000000000000020000000C00000007104175746F6465736B204175746F434144071841534D203231392E302E302E3536303020556E6B6E6F776E071853756E204D61792020342031353A34373A3233203230313406000000000000F03F068DEDB5A0F7C6B03E06BBBDD7D9DF7CDB
310
3D0D0961736D6865616465720CFFFFFFFF04FFFFFFFF070C3231392E302E302E35363030110D04626F64790C0200000004FFFFFFFF0CFFFFFFFF0C030000000CFFFFFFFF0CFFFFFFFF110E067265665F76740E036579650D066174747269620CFFFFFFFF04FFFFFFFF0CFFFFFFFF0CFFFFFFFF0C010000000C040000000C05
310
000000110D046C756D700C0600000004FFFFFFFF0CFFFFFFFF0CFFFFFFFF0C070000000C01000000110D0E6579655F726566696E656D656E740CFFFFFFFF04FFFFFFFF070567726964200401000000070374726904010000000704737572660400000000070361646A040000000007046772616404000000000709706F7374
310
636865636B0400000000070463616C6304010000000704636F6E760400000000070473746F6C06000000E001FD414007046E746F6C060000000000003E4007046473696C0600000000000000000708666C61746E6573730600000000000000000707706978617265610600000000000000000704686D617806000000000000
310
0000070667726964617206000000000000000007056D6772696404B80B0000070575677269640400000000070576677269640400000000070A656E645F6669656C6473110D0F7665727465785F74656D706C6174650CFFFFFFFF04FFFFFFFF0403000000040000000004010000000408000000110E067265665F76740E0365
310
79650D066174747269620CFFFFFFFF04FFFFFFFF0CFFFFFFFF0CFFFFFFFF0C030000000C040000000C05000000110D057368656C6C0C0800000004FFFFFFFF0CFFFFFFFF0CFFFFFFFF0CFFFFFFFF0C090000000CFFFFFFFF0C03000000110E067265665F76740E036579650D066174747269620CFFFFFFFF04FFFFFFFF0CFF
310
FFFFFF0CFFFFFFFF0C070000000C040000000C05000000110D04666163650C0A00000004FFFFFFFF0CFFFFFFFF0CFFFFFFFF0CFFFFFFFF0C070000000CFFFFFFFF0C0B0000000B0B110E05666D6573680E036579650D066174747269620CFFFFFFFF04FFFFFFFF0C0C0000000CFFFFFFFF0C09000000110E05746F7275730D
310
07737572666163650CFFFFFFFF04FFFFFFFF0CFFFFFFFF131D7B018BA58BA7C0600EB0424970BC4000000000000000001400000000000000000000000000000000000000000000F03F065087D2E2C5418940066050CEE5F3CA644014000000000000F03F000000000000000000000000000000000B0B0B0B0B110E06726566
310
5F76740E036579650D066174747269620CFFFFFFFF04FFFFFFFF0CFFFFFFFF0C0A0000000C090000000C040000000C05000000110E03456E640E026F660E0341534D0D0464617461
"""
```
#### File: tests/test_04_dxf_high_level_structs/test_421_new_drawings.py
```python
import pytest
import ezdxf
new = ezdxf.new
def test_new_AC1009():
doc = new('R12')
assert 'AC1009' == doc.dxfversion
def test_new_AC1015():
doc = new('R2000')
assert 'AC1015' == doc.dxfversion
def test_new_AC1018():
doc = new('R2004')
assert 'AC1018' == doc.dxfversion
def test_new_AC1021():
doc = new('R2007')
assert 'AC1021' == doc.dxfversion
def test_new_AC1024():
doc = new('R2010')
assert 'AC1024' == doc.dxfversion
def test_new_AC1027():
doc = new('R2013')
assert 'AC1027' == doc.dxfversion
def test_new_AC1032():
doc = new('R2018')
assert 'AC1032' == doc.dxfversion
def test_invalid_dxf_version():
with pytest.raises(ezdxf.const.DXFVersionError):
new('R13')
with pytest.raises(ezdxf.const.DXFVersionError):
new('R14')
with pytest.raises(ezdxf.const.DXFVersionError):
new('XYZ')
with pytest.raises(ezdxf.const.DXFVersionError):
new('AC1012')
with pytest.raises(ezdxf.const.DXFVersionError):
new('AC1013')
with pytest.raises(ezdxf.const.DXFVersionError):
new('AC1014')
```
#### File: tests/test_04_dxf_high_level_structs/test_422_drawing_object.py
```python
import pytest
from ezdxf.lldxf.tagger import internal_tag_compiler
from ezdxf.document import Drawing
from ezdxf import DXFValueError, decode_base64
def test_dxfversion_1():
doc = Drawing.from_tags(internal_tag_compiler(TEST_HEADER))
assert 'AC1009' == doc.dxfversion
@pytest.fixture(scope='module')
def dwg_r12():
return Drawing.new('AC1009')
def test_dxfversion_2(dwg_r12):
assert 'AC1009' == dwg_r12.dxfversion
def test_acad_release(dwg_r12):
assert 'R12' == dwg_r12.acad_release
def test_get_layer(dwg_r12):
layer = dwg_r12.layers.get('0')
assert '0' == layer.dxf.name
def test_error_getting_not_existing_layer(dwg_r12):
with pytest.raises(DXFValueError):
layer = dwg_r12.layers.get('TEST_NOT_EXISTING_LAYER')
def test_create_layer(dwg_r12):
layer = dwg_r12.layers.new('TEST_NEW_LAYER')
assert 'TEST_NEW_LAYER' == layer.dxf.name
def test_error_adding_existing_layer(dwg_r12):
with pytest.raises(DXFValueError):
layer = dwg_r12.layers.new('0')
def test_has_layer(dwg_r12):
assert '0' in dwg_r12.layers
def test_has_not_layer(dwg_r12):
assert 'TEST_LAYER_NOT_EXISTS' not in dwg_r12.layers
def test_removing_layer(dwg_r12):
dwg_r12.layers.new('TEST_NEW_LAYER_2')
assert 'TEST_NEW_LAYER_2' in dwg_r12.layers
dwg_r12.layers.remove('TEST_NEW_LAYER_2')
assert 'TEST_NEW_LAYER_2' not in dwg_r12.layers
def test_error_removing_not_existing_layer(dwg_r12):
with pytest.raises(DXFValueError):
dwg_r12.layers.remove('TEST_LAYER_NOT_EXISTS')
@pytest.fixture(scope='module')
def dwg_r2000():
return Drawing.new('AC1015')
def test_r2000_dxfversion(dwg_r2000):
assert 'AC1015' == dwg_r2000.dxfversion
def test_r2000_acad_release(dwg_r2000):
assert 'R2000' == dwg_r2000.acad_release
@pytest.fixture
def min_r12():
return Drawing.from_tags(internal_tag_compiler(MINIMALISTIC_DXF12))
def test_min_r12_header_section(min_r12):
assert hasattr(min_r12, 'header')
assert min_r12.header['$ACADVER'] == 'AC1009'
assert min_r12.header['$DWGCODEPAGE'] == 'ANSI_1252'
def test_min_r12_layers_table(min_r12):
assert hasattr(min_r12, 'layers')
assert len(min_r12.layers) == 2
assert '0' in min_r12.layers
assert 'Defpoints' in min_r12.layers
def test_min_r12_styles_table(min_r12):
assert hasattr(min_r12, 'styles')
assert len(min_r12.styles) == 1
assert 'Standard' in min_r12.styles
def test_min_r12_linetypes_table(min_r12):
assert hasattr(min_r12, 'linetypes')
assert len(min_r12.linetypes) == 3
assert 'continuous' in min_r12.linetypes
assert 'ByLayer' in min_r12.linetypes
assert 'ByBlock' in min_r12.linetypes
def test_min_r12_blocks_section(min_r12):
assert hasattr(min_r12, 'blocks')
assert len(min_r12.blocks) == 2
assert '*Model_Space' in min_r12.blocks
assert '*Paper_Space' in min_r12.blocks
def test_min_r12_entity_section(min_r12):
assert hasattr(min_r12, 'entities')
assert len(min_r12.entities) == 0
def test_chain_layout_and_block(dwg_r12, dwg_r2000):
for dwg in (dwg_r12, dwg_r2000):
msp = dwg.modelspace()
line_msp = msp.add_line((0, 0), (1, 1))
blk = dwg.blocks.new('TEST_CHAIN')
line_blk = blk.add_line((0, 0), (1, 1))
handles = list(e.dxf.handle for e in dwg.chain_layouts_and_blocks())
# check for unique handles
assert len(handles) == len(set(handles))
check = {line_msp.dxf.handle, line_blk.dxf.handle}
assert check.intersection(handles) == check
def test_base64_encoding_r12(dwg_r12):
data = dwg_r12.encode_base64()
doc = decode_base64(data)
assert doc.acad_release == 'R12'
def test_base64_encoding_r2000(dwg_r2000):
data = dwg_r2000.encode_base64()
doc = decode_base64(data)
assert doc.acad_release == 'R2000'
def test_set_drawing_units(dwg_r12):
dwg_r12.units = 6
assert dwg_r12.header['$INSUNITS'] == 6
dwg_r12.units = 5
assert dwg_r12.header['$INSUNITS'] == 5
MINIMALISTIC_DXF12 = """ 0
SECTION
2
ENTITIES
0
ENDSEC
0
EOF
"""
TEST_HEADER = """ 0
SECTION
2
HEADER
9
$ACADVER
1
AC1009
9
$DWGCODEPAGE
3
ANSI_1252
9
$HANDSEED
5
FF
0
ENDSEC
0
SECTION
2
ENTITIES
0
ENDSEC
0
EOF
"""
TESTCOPY = """ 0
SECTION
2
HEADER
9
$ACADVER
1
AC1018
9
$DWGCODEPAGE
3
ANSI_1252
9
$TDUPDATE
40
0.
9
$HANDSEED
5
FF
0
ENDSEC
0
SECTION
2
OBJECTS
0
ENDSEC
0
SECTION
2
FANTASYSECTION
1
everything should be copied
0
ENDSEC
0
SECTION
2
ALPHASECTION
1
everything should be copied
0
ENDSEC
0
SECTION
2
OMEGASECTION
1
everything should be copied
0
ENDSEC
0
EOF
"""
```
#### File: tests/test_04_dxf_high_level_structs/test_425_limits_and_extents.py
```python
from typing import cast
import pytest
import ezdxf
@pytest.fixture(scope='module', params=['R12', 'R2000'])
def doc(request):
return ezdxf.new(request.param)
def test_new_doc_extents(doc):
extmin = doc.header["$EXTMIN"]
extmax = doc.header["$EXTMAX"]
assert extmin == (1e20, 1e20, 1e20)
assert extmax == (-1e20, -1e20, -1e20)
def test_new_doc_limits(doc):
limmin = doc.header["$LIMMIN"]
limmax = doc.header["$LIMMAX"]
assert limmin == (0, 0)
assert limmax == (420, 297)
def test_default_modelspace_extents(doc):
msp = doc.modelspace()
extmin = msp.dxf.extmin
extmax = msp.dxf.extmax
assert extmin == (1e20, 1e20, 1e20)
assert extmax == (-1e20, -1e20, -1e20)
def test_default_modelspace_limits(doc):
msp = doc.modelspace()
limmin = msp.dxf.limmin
limmax = msp.dxf.limmax
assert limmin == (0, 0)
assert limmax == (420, 297)
def test_default_layout1_extents(doc):
layout1 = doc.layout('Layout1')
extmin = layout1.dxf.extmin
extmax = layout1.dxf.extmax
assert extmin == (1e20, 1e20, 1e20)
assert extmax == (-1e20, -1e20, -1e20)
def test_default_layout1_limits(doc):
layout1 = doc.layout('Layout1')
limmin = layout1.dxf.limmin
limmax = layout1.dxf.limmax
assert limmin == (0, 0)
assert limmax == (420, 297)
layout1.reset_paper_limits()
assert limmin == (0, 0)
assert limmax == (420, 297)
def test_reset_modelspace_extents(doc):
extmin = (-100, -100, -100)
extmax = (100, 100, 100)
msp = doc.modelspace()
msp.reset_extents(extmin, extmax)
assert msp.dxf.extmin == extmin
assert msp.dxf.extmax == extmax
doc.update_extents() # is automatically called by Drawing.write()
assert doc.header["$EXTMIN"] == extmin
assert doc.header["$EXTMAX"] == extmax
# reset to default values:
msp.reset_extents()
assert msp.dxf.extmin == (1e20, 1e20, 1e20)
assert msp.dxf.extmax == (-1e20, -1e20, -1e20)
def test_reset_modelspace_limits(doc):
limmin = (-10, -10)
limmax = (300, 50)
msp = doc.modelspace()
msp.reset_limits(limmin, limmax)
assert msp.dxf.limmin == limmin
assert msp.dxf.limmax == limmax
doc.update_limits() # is automatically called by Drawing.write()
assert doc.header["$LIMMIN"] == limmin
assert doc.header["$LIMMAX"] == limmax
# reset to default values:
msp.reset_limits()
width = msp.dxf.paper_width
height = msp.dxf.paper_height
assert msp.dxf.limmin == (0, 0)
assert msp.dxf.limmax == (width, height)
def test_default_active_msp_vport_config(doc):
# A viewport configuration is always a list of one or more VPORT entities:
vport_config = doc.viewports.get('*ACTIVE')
assert len(vport_config) == 1
vport = vport_config[0]
assert vport.dxf.center == (344.2, 148.5)
assert vport.dxf.height == 297
def test_default_active_layout1_viewport(doc):
layout1 = doc.layout("Layout1")
assert len(layout1.viewports()) == 0, "no default viewport expected"
def test_reset_layout1_active_viewport(doc):
doc = ezdxf.new()
layout1 = cast('Paperspace', doc.layout("Layout1"))
viewport = layout1.reset_main_viewport()
assert viewport.dxf.center == (202.5, 128.5)
paper_width = layout1.dxf.paper_width
paper_height = layout1.dxf.paper_height
assert viewport.dxf.width == paper_width * 1.1 # AutoCAD default factor
assert viewport.dxf.height == paper_height * 1.1 # AutoCAD default factor
if __name__ == '__main__':
pytest.main([__file__])
```
#### File: tests/test_05_tools/test_507_dxf_pretty_printer.py
```python
import ezdxf
from ezdxf.pp.pprint import readfile, dxfpp
from ezdxf.pp.rawpp import rawpp
def test_dxf_drawing_to_html(tmpdir):
name = tmpdir.join('test.dxf')
doc = ezdxf.new()
doc.saveas(name)
tagger = readfile(name)
# checks only if pretty printer is still working
result = dxfpp(tagger, 'test.dxf')
assert len(result) > 0
# checks only if pretty printer is still working
result = rawpp(readfile(name), filename='test.dxf')
assert len(result) > 0
```
#### File: tests/test_05_tools/test_511_bit_stream.py
```python
import pytest
from ezdxf.tools.binarydata import BitStream, EndOfBufferError
def test_read_bit():
data = b'\xaa'
bs = BitStream(data)
assert bs.read_bit() == 1
assert bs.read_bit() == 0
assert bs.read_bit() == 1
assert bs.read_bit() == 0
assert bs.read_bit() == 1
assert bs.read_bit() == 0
assert bs.read_bit() == 1
assert bs.read_bit() == 0
with pytest.raises(EndOfBufferError):
_ = bs.read_bit()
def test_read_bits():
data = b'\x0f\x0f'
bs = BitStream(data)
assert bs.read_bits(4) == 0
assert bs.read_bits(2) == 3
assert bs.read_bits(3) == 6
assert bs.read_bits(4) == 1
assert bs.read_bits(3) == 7
def test_read_unsigned_byte():
data = b'\x0f\x0f'
bs = BitStream(data)
assert bs.read_bits(4) == 0
assert bs.read_unsigned_byte() == 0xf0
def test_read_signed_byte():
data = b'\x0f\xf0'
bs = BitStream(data)
assert bs.read_bits(4) == 0
assert bs.read_signed_byte() == -1
def test_read_unsigned_short():
# little endian!
data = b'\x0c\xda\xb0'
bs = BitStream(data)
assert bs.read_bits(4) == 0
assert bs.read_unsigned_short() == 0xabcd
assert bs.read_bits(4) == 0
def test_read_aligned_unsigned_short():
# little endian!
data = b'\x00\xcd\xab'
bs = BitStream(data)
assert bs.read_unsigned_byte() == 0
assert bs.read_unsigned_short() == 0xabcd
def test_read_unsigned_long():
# little endian!
data = b'\x00\x0e\xfc\xda\xb0'
bs = BitStream(data)
assert bs.read_bits(4) == 0
assert bs.read_unsigned_long() == 0xabcdef00
assert bs.read_bits(4) == 0
def test_read_aligned_unsigned_long():
# little endian!
data = b'\x00\xef\xcd\xab'
bs = BitStream(data)
assert bs.read_unsigned_long() == 0xabcdef00
def test_read_bitshort():
bs = BitStream(b'\xe0')
assert bs.read_bit_short() == 256 # 11
assert bs.read_bit_short() == 0 # 10
bs = BitStream(b'\x00\xff\xff')
bs.read_bits(6)
assert bs.read_bit_short() == -1
assert BitStream(b'\x7f\x00').read_bit_short() == 252
def test_read_signed_modular_chars():
bs = BitStream(bytes([
0b11101001, 0b10010111, 0b11100110, 0b00110101,
0b10000010, 0b00100100,
0b10000101, 0b01001011,
]))
mc = bs.read_signed_modular_chars()
assert mc == 112823273
mc = bs.read_signed_modular_chars()
assert mc == 4610
mc = bs.read_signed_modular_chars()
assert mc == -1413
def test_read_unsigned_modular_chars():
bs = BitStream(bytes([
0b11101001, 0b10010111, 0b11100110, 0b00110101,
0b10000010, 0b00100100,
0b10000101, 0b01001011,
]))
mc = bs.read_unsigned_modular_chars()
assert mc == 112823273
mc = bs.read_unsigned_modular_chars()
assert mc == 4610
mc = bs.read_unsigned_modular_chars()
assert mc == 9605
def test_read_modular_shorts():
bs = BitStream(bytes([
0b00110001, 0b11110100, 0b10001101, 0b00000000,
]))
ms = bs.read_modular_shorts()
assert ms == 4650033
def test_read_object_type():
assert BitStream(bytes([0b00000000, 0b01000000])).read_object_type() == 1
assert BitStream(bytes([0b01000000, 0b01000000])).read_object_type() == 1 + 0x1f0
assert BitStream(bytes([0b10000000, 0b01000000, 0b01000000])).read_object_type() == 257
assert BitStream(bytes([0b11000000, 0b01000000, 0b01000000])).read_object_type() == 257
if __name__ == '__main__':
pytest.main([__file__])
```
#### File: tests/test_05_tools/test_512_pattern.py
```python
import pytest
from ezdxf.tools import pattern
def test_load_iso_pattern():
p = pattern.load()
assert p['ANSI31'][0] == [45.0, (0.0, 0.0), (-2.2450640303, 2.2450640303),
[]]
def test_load_scaled_iso_pattern():
p = pattern.load(factor=2)
assert p['ANSI31'][0] == [45.0, (0.0, 0.0), (-4.4901280606, 4.4901280606),
[]]
def test_load_imperial_pattern():
p = pattern.load(measurement=0)
assert p['ANSI31'][0] == [45.0, (0.0, 0.0), (-0.0883883476, 0.0883883476),
[]]
def test_scale_pattern():
p = pattern.load()
ansi31 = p['ANSI31']
s = pattern.scale_pattern(ansi31, 2, angle=90)
angle, base, offset, lines = s[0]
assert angle == 135
assert base == (0, 0)
assert offset == (-4.4901280606, -4.4901280606)
def test_scale_all_pattern():
r = pattern.scale_all(pattern.ISO_PATTERN)
assert len(r) == len(pattern.ISO_PATTERN)
TEST_PATTERN = """; Hatch Patterns adapted to ISO scaling
;; Note: Dummy pattern description used for 'Solid fill'.
*SOLID, Solid fill
45, 0,0, 0,.125
*ANSI31, ANSI Iron, Brick, Stone masonry
45, 0,0, 0,3.175
*ANSI32, ANSI Steel
45, 0,0, 0,9.525
45, 4.490128053,0, 0,9.525
*ANSI33, ANSI Bronze, Brass, Copper
45, 0,0, 0,6.35
45, 4.490128053,0, 0,6.35, 3.175,-1.5875
*ANSI34, ANSI Plastic, Rubber
45, 0,0, 0,19.05
45, 4.490128053,0, 0,19.05
45, 8.9802561314,0, 0,19.05
45, 13.4703841844,0, 0,19.05
*ANSI35, ANSI Fire brick, Refractory material
45, 0,0, 0,6.35
45, 4.490128053,0, 0,6.35, 7.9375,-1.5875,0,-1.5875
*ANSI36, ANSI Marble, Slate, Glass
45, 0,0, 5.55625,3.175, 7.9375,-1.5875,0,-1.5875
*ANSI37, ANSI Lead, Zinc, Magnesium, Sound/Heat/Elec Insulation
45, 0,0, 0,3.175
135, 0,0, 0,3.175
*ANSI38, ANSI Aluminum
45, 0,0, 0,3.175
135, 0,0, 6.35,3.175, 7.9375,-4.7625
"""
def test_parse_pattern_file():
result = pattern.parse(TEST_PATTERN)
assert len(result) == 9
assert result['SOLID'] == [
[45.0, (0.0, 0.0), (-0.0883883476, 0.0883883476), []]]
assert result['ANSI33'] == [
[45.0, (0.0, 0.0), (-4.4901280605, 4.4901280605), []],
[45.0, (4.490128053, 0.0), (-4.4901280605, 4.4901280605),
[3.175, -1.5875]]
]
def analyse(name):
return pattern.PatternAnalyser(pattern.ISO_PATTERN[name])
class TestPatternAnalyser:
@pytest.mark.parametrize('name', [
'ISO02W100', 'DASH', 'CLAY', 'FLEXIBLE'
])
def test_has_horizontal_lines(self, name):
result = analyse(name)
assert result.has_angle(0) is True
@pytest.mark.parametrize('name', [
'GOST_WOOD', 'V_ZINC',
])
def test_has_vertical_lines(self, name):
result = analyse(name)
assert result.has_angle(90) is True
@pytest.mark.parametrize('name', [
'ANSI31', 'BRICK_INSULATING', 'BUTTERFLY', 'CROSSES'
])
def test_has_45_deg_lines(self, name):
result = analyse(name)
assert result.has_angle(45) is True
@pytest.mark.parametrize('name', [
'BUTTERFLY', 'CROSSES'
])
def test_has_135_deg_lines(self, name):
result = analyse(name)
assert result.has_angle(135) is True
@pytest.mark.parametrize('name', [
'DIAMONDS', 'ESCHER'
])
def test_has_60_deg_lines(self, name):
result = analyse(name)
assert result.has_angle(60) is True
@pytest.mark.parametrize('name', [
'ANSI31', 'BRICK_EXISTING'
])
def test_all_45_deg_lines(self, name):
result = analyse(name)
assert result.all_angles(45) is True
@pytest.mark.parametrize('name', [
'ANSI31', 'BRICK_EXISTING'
])
def test_all_solid_lines(self, name):
result = analyse(name)
assert result.all_solid_lines() is True
def test_analyse_ansi31(self):
result = analyse('ANSI31')
assert result.has_angle(45) is True
assert result.all_angles(45) is True
assert result.all_solid_lines() is True
assert result.has_dashed_line() is False
def test_analyse_checker(self):
result = analyse('CHECKER')
assert result.has_angle(0) is True
assert result.has_angle(90) is True
assert result.all_dashed_lines() is True
def test_rotated_checker(self):
pat = pattern.ISO_PATTERN['CHECKER']
result = pattern.PatternAnalyser(pattern.scale_pattern(pat, 2, 45))
assert result.has_angle(45) is True
assert result.has_angle(135) is True
def test_analyse_crosses(self):
result = analyse('CROSSES')
assert result.has_angle(45) is True
assert result.has_angle(135) is True
assert result.all_dashed_lines() is True
@pytest.mark.parametrize('angle,expected', [
(0, 0),
(45, 45),
(90, 90),
(135, 135),
(22, 15), # round to nearest 15° main angle
(23, 30), # round to nearest 15° main angle
(45, 45), # round to nearest 15° main angle
(180, 0), # only 1st and 2nd quadrant
(270, 90), # only 1st and 2nd quadrant
])
def test_round_angle_15_deg(angle, expected):
assert pattern.round_angle_15_deg(angle) == expected
if __name__ == '__main__':
pytest.main([__file__])
```
#### File: tests/test_06_math/test_642_construction_line.py
```python
from ezdxf.math import ConstructionLine, Vec2
class TestConstructionLine:
def test_is_vertical(self):
assert ConstructionLine((0, 0), (10, 0)).is_vertical is False
assert ConstructionLine((5, -5), (5, 5)).is_vertical is True
def test_left_of_line(self):
line = ConstructionLine((0, 0), (0.1, 1))
assert line.is_point_left_of_line(Vec2(-1, 0)) is True
assert line.is_point_left_of_line(Vec2(1, 0)) is False
assert line.is_point_left_of_line(Vec2(-1, -1)) is True
line = ConstructionLine((0, 0), (0, -1))
assert line.is_point_left_of_line(Vec2(1, 0)) is True
line = ConstructionLine((0, 0), (-1, .1))
assert line.is_point_left_of_line(Vec2(-1, 0)) is True
line = ConstructionLine((0, 0), (10, 0))
assert line.is_point_left_of_line(Vec2(0, 0)) is False
assert line.is_point_left_of_line(Vec2(10, 0)) is False
assert line.is_point_left_of_line(Vec2(10, 1)) is True
assert line.is_point_left_of_line(Vec2(10, -1)) is False
line = ConstructionLine((10, 0), (0, 0))
assert line.is_point_left_of_line(Vec2(0, 0)) is False
assert line.is_point_left_of_line(Vec2(10, 0)) is False
assert line.is_point_left_of_line(Vec2(10, 1)) is False
assert line.is_point_left_of_line(Vec2(10, -1)) is True
line = ConstructionLine((0, 0), (0, 10))
assert line.is_point_left_of_line(Vec2(0, 0)) is False
assert line.is_point_left_of_line(Vec2(0, 10)) is False
assert line.is_point_left_of_line(Vec2(1, 10)) is False
assert line.is_point_left_of_line(Vec2(-1, 10)) is True
line = ConstructionLine((0, 10), (0, 0))
assert line.is_point_left_of_line(Vec2(0, 0)) is False
assert line.is_point_left_of_line(Vec2(0, 10)) is False
assert line.is_point_left_of_line(Vec2(1, 10)) is True
assert line.is_point_left_of_line(Vec2(-1, 10)) is False
def test_intersect_horizontal_line(self):
line = ConstructionLine((0, 0), (10, 0))
assert line.intersect(ConstructionLine((0, 0), (10, 0))) is None
assert line.intersect(ConstructionLine((0, 1), (10, 1))) is None
assert line.intersect(ConstructionLine((0, -1), (10, 1))) == (5, 0)
assert line.intersect(ConstructionLine((5, 5), (5, -5))) == (5, 0)
assert line.intersect(ConstructionLine((5, 5), (5, 1))) is None
assert line.intersect(ConstructionLine((0, 0), (5, 5))) == (0, 0)
def test_intersect_vertical_line(self):
line = ConstructionLine((0, 0), (0, 10))
assert line.intersect(ConstructionLine((0, 0), (0, 10))) is None
assert line.intersect(ConstructionLine((1, 0), (1, 10))) is None
assert line.intersect(ConstructionLine((-1, 0), (1, 10))) == (0, 5)
assert line.intersect(ConstructionLine((-1, 0), (1, 0))) == (0, 0)
assert line.intersect(ConstructionLine((-1, 10), (1, 10))) == (0, 10)
assert line.intersect(ConstructionLine((-1, 11), (1, 11))) is None
def test_bounding_box(self):
line = ConstructionLine((0, 0), (7, 10))
bbox = line.bounding_box
assert bbox.extmin == (0, 0)
assert bbox.extmax == (7, 10)
def test_translate(self):
line = ConstructionLine((0, 0), (0, 10))
line.translate(3, 7)
assert line.start == (3, 7)
assert line.end == (3, 17)
bbox = line.bounding_box
assert bbox.extmin == (3, 7)
assert bbox.extmax == (3, 17)
```
#### File: tests/test_06_math/test_646_offset_vertices_2d.py
```python
from ezdxf.math import Vec2, offset_vertices_2d
PRECISION = 1e-3
def test_0_offset():
vertices = [(1, 2, 3), (5, 2, 3)]
result = list(offset_vertices_2d(vertices, 0))
assert result[0] == (1, 2)
assert result[1] == (5, 2)
def test_2_horiz_vertices_left_offset():
vertices = [(1, 2), (5, 2)]
result = list(offset_vertices_2d(vertices, 1))
assert result[0] == (1, 3)
assert result[1] == (5, 3)
def test_2_horiz_vertices_right_offset():
vertices = [(1, 2), (5, 2)]
result = list(offset_vertices_2d(vertices, -1))
assert result[0] == (1, 1)
assert result[1] == (5, 1)
def test_2_vert_vertices_left_offset():
vertices = [(1, 2), (1, 5)]
result = list(offset_vertices_2d(vertices, 1))
assert result[0] == (0, 2)
assert result[1] == (0, 5)
def test_2_vert_vertices_right_offset():
vertices = [(1, 2), (1, 5)]
result = list(offset_vertices_2d(vertices, -1))
assert result[0] == (2, 2)
assert result[1] == (2, 5)
def test_3_horiz_collinear_vertices():
vertices = [(1, 2), (5, 2), (9, 2)]
result = list(offset_vertices_2d(vertices, 1))
assert result[0] == (1, 3)
assert result[1] == (5, 3)
assert result[2] == (9, 3)
def test_3_vert_collinear_vertices():
vertices = [(1, 2), (1, 5), (1, 9)]
result = list(offset_vertices_2d(vertices, 1))
assert result[0] == (0, 2)
assert result[1] == (0, 5)
assert result[2] == (0, 9)
def test_3_vertices():
vertices = [(0, 0), (300, 150), (450, 50)]
result = list(offset_vertices_2d(vertices, 10))
assert result[0].isclose(Vec2((-4.4721, 8.9443)), abs_tol=PRECISION)
assert result[1].isclose(Vec2((300.7184, 161.5396)), abs_tol=PRECISION)
assert result[2].isclose(Vec2((455.547, 58.3205)), abs_tol=PRECISION)
def test_closed_square_inside():
vertices = [(0, 0), (5, 0), (5, 5), (0, 5)]
result = list(offset_vertices_2d(vertices, 1, closed=True))
assert result[0] == (1, 1)
assert result[1] == (4, 1)
assert result[2] == (4, 4)
assert result[3] == (1, 4)
def test_closed_triangle_inside():
vertices = [(0, 0), (5, 0), (2.5, 5)]
result = list(offset_vertices_2d(vertices, 1, closed=True))
assert result[0].isclose(Vec2((1.618, 1)), abs_tol=PRECISION)
assert result[1].isclose(Vec2((3.382, 1)), abs_tol=PRECISION)
assert result[2].isclose(Vec2((2.5, 2.7639)), abs_tol=PRECISION)
def test_closed_shape_with_collinear_last_segment():
vertices = [(0, 0), (5, 0), (5, 5), (-5, 5), (-5, 0)]
result = list(offset_vertices_2d(vertices, 1, closed=True))
assert len(result) == len(vertices)
assert result[0] == (0, 1)
assert result[1] == (4, 1)
assert result[2] == (4, 4)
assert result[3] == (-4, 4)
assert result[4] == (-4, 1)
def test_3_horiz_collinear_vertices_closed():
vertices = [(1, 2), (5, 2), (9, 2)]
result = list(offset_vertices_2d(vertices, 1, closed=True))
assert len(result) == len(vertices)+2 # get 2 extra vertices
# but the first vertex, would be expected as last vertex
assert result[0] == (1, 1)
assert result[1] == (1, 3)
assert result[2] == (5, 3)
# closing segment: (9, 2) -> (1, 2)
assert result[3] == (9, 3)
assert result[4] == (9, 1)
```
#### File: tests/test_07_render/test_703_render_mesh.py
```python
import pytest
from math import radians
from ezdxf.math import Vec3, BoundingBox
from ezdxf.render.forms import cube
from ezdxf.render.mesh import MeshVertexMerger, MeshBuilder, MeshTransformer, MeshAverageVertexMerger
from ezdxf.addons import SierpinskyPyramid
from ezdxf.layouts import VirtualLayout
def test_vertex_merger_indices():
merger = MeshVertexMerger()
indices = merger.add_vertices([(1, 2, 3), (4, 5, 6)])
indices2 = merger.add_vertices([(1, 2, 3), (4, 5, 6)])
assert indices == indices2
def test_vertex_merger_vertices():
merger = MeshVertexMerger()
merger.add_vertices([(1, 2, 3), (4, 5, 6)])
merger.add_vertices([(1, 2, 3), (4, 5, 6)])
assert merger.vertices == [(1, 2, 3), (4, 5, 6)]
def test_vertex_merger_index_of():
merger = MeshVertexMerger()
merger.add_vertices([(1, 2, 3), (4, 5, 6)])
assert merger.index((1, 2, 3)) == 0
assert merger.index((4, 5, 6)) == 1
with pytest.raises(IndexError):
merger.index((7, 8, 9))
def test_average_vertex_merger_indices():
merger = MeshAverageVertexMerger()
indices = merger.add_vertices([(1, 2, 3), (4, 5, 6)])
indices2 = merger.add_vertices([(1, 2, 3), (4, 5, 6)])
assert indices == indices2
def test_average_vertex_merger_vertices():
merger = MeshAverageVertexMerger()
merger.add_vertices([(1, 2, 3), (4, 5, 6)])
merger.add_vertices([(1, 2, 3), (4, 5, 6)])
assert merger.vertices == [(1, 2, 3), (4, 5, 6)]
def test_average_vertex_merger_index_of():
merger = MeshAverageVertexMerger()
merger.add_vertices([(1, 2, 3), (4, 5, 6)])
assert merger.index((1, 2, 3)) == 0
assert merger.index((4, 5, 6)) == 1
with pytest.raises(IndexError):
merger.index((7, 8, 9))
def test_mesh_builder(msp):
pyramid = SierpinskyPyramid(level=4, sides=3)
pyramid.render(msp, merge=False)
meshes = msp.query('MESH')
assert len(meshes) == 256
def test_vertex_merger():
pyramid = SierpinskyPyramid(level=4, sides=3)
faces = pyramid.faces()
mesh = MeshVertexMerger()
for vertices in pyramid:
mesh.add_mesh(vertices=vertices, faces=faces)
assert len(mesh.vertices) == 514
assert len(mesh.faces) == 1024
def test_average_vertex_merger():
pyramid = SierpinskyPyramid(level=4, sides=3)
faces = pyramid.faces()
mesh = MeshAverageVertexMerger()
for vertices in pyramid:
mesh.add_mesh(vertices=vertices, faces=faces)
assert len(mesh.vertices) == 514
assert len(mesh.faces) == 1024
REGULAR_FACE = Vec3.list([(0, 0, 0), (1, 0, 1), (1, 1, 1), (0, 1, 0)])
IRREGULAR_FACE = Vec3.list([(0, 0, 0), (1, 0, 1), (1, 1, 0), (0, 1, 0)])
def test_has_none_planar_faces():
mesh = MeshBuilder()
mesh.add_face(REGULAR_FACE)
assert mesh.has_none_planar_faces() is False
mesh.add_face(IRREGULAR_FACE)
assert mesh.has_none_planar_faces() is True
def test_scale_mesh():
mesh = cube(center=False)
mesh.scale(2, 3, 4)
bbox = BoundingBox(mesh.vertices)
assert bbox.extmin.isclose((0, 0, 0))
assert bbox.extmax.isclose((2, 3, 4))
def test_rotate_x():
mesh = cube(center=False)
mesh.rotate_x(radians(90))
bbox = BoundingBox(mesh.vertices)
assert bbox.extmin.isclose((0, -1, 0))
assert bbox.extmax.isclose((1, 0, 1))
@pytest.fixture
def msp():
return VirtualLayout()
@pytest.fixture(scope='module')
def cube_polyface():
layout = VirtualLayout()
p = layout.add_polyface()
p.append_faces(cube().faces_as_vertices())
return p
def test_from_empty_polyface(msp):
empty_polyface = msp.add_polyface()
b = MeshBuilder.from_polyface(empty_polyface)
assert len(b.vertices) == 0
assert len(b.faces) == 0
def test_from_cube_polyface(cube_polyface):
b = MeshBuilder.from_polyface(cube_polyface)
assert len(b.vertices) == 24 # unoptimized mesh builder
assert len(b.faces) == 6
def test_render_polyface(cube_polyface, msp):
t = MeshTransformer.from_polyface(cube_polyface)
assert len(t.vertices) == 24 # unoptimized mesh builder
assert len(t.faces) == 6
t.render_polyface(msp)
new_polyface = msp[-1]
assert new_polyface.dxftype() == 'POLYLINE'
assert new_polyface.is_poly_face_mesh is True
assert len(new_polyface.vertices) == 8 + 6
assert new_polyface.vertices[0] is not cube_polyface.vertices[0]
def test_from_polymesh(msp):
polymesh = msp.add_polymesh(size=(4, 4))
b = MeshBuilder.from_polyface(polymesh)
n = polymesh.dxf.n_count
m = polymesh.dxf.m_count
nfaces = (n - 1) * (m - 1)
assert len(b.vertices) == nfaces * 4 # unoptimized mesh builder
assert len(b.faces) == nfaces
def test_from_polyface_type_error(msp):
polyline = msp.add_polyline3d([(0, 0, 0), (1, 0, 0)])
with pytest.raises(TypeError):
MeshBuilder.from_polyface(polyline)
line = msp.add_line(start=(0, 0, 0), end=(1, 0, 0))
with pytest.raises(TypeError):
MeshBuilder.from_polyface(line)
@pytest.fixture
def polyface_181_1(msp):
e = msp.new_entity(
'POLYLINE',
dxfattribs={
'flags': 48,
'm_count': 2,
'n_count': 6,
},
)
e.append_vertex((25041.94191089287, 29272.95055566061, 0.0), dxfattribs={'flags': 64})
e.append_vertex((25020.29127589287, 29285.45055566061, 0.0), dxfattribs={'flags': 64})
e.append_vertex((25020.29127589287, 29310.45055566061, 0.0), dxfattribs={'flags': 64})
e.append_vertex((25041.94191089287, 29322.95055566061, 0.0), dxfattribs={'flags': 64})
e.append_vertex((25063.59254589287, 29310.45055566061, 0.0), dxfattribs={'flags': 64})
e.append_vertex((25063.59254589287, 29285.45055566061, 0.0), dxfattribs={'flags': 64})
e.append_vertex((25041.94191089287, 29272.95055566061, 50.0), dxfattribs={'flags': 64})
e.append_vertex((25020.29127589287, 29285.45055566061, 50.0), dxfattribs={'flags': 64})
e.append_vertex((25020.29127589287, 29310.45055566061, 50.0), dxfattribs={'flags': 64})
e.append_vertex((25041.94191089287, 29322.95055566061, 50.0), dxfattribs={'flags': 64})
e.append_vertex((25063.59254589287, 29310.45055566061, 50.0), dxfattribs={'flags': 64})
e.append_vertex((25063.59254589287, 29285.45055566061, 50.0), dxfattribs={'flags': 64})
return e
def test_from_polyface_182_1(polyface_181_1):
mesh = MeshVertexMerger.from_polyface(polyface_181_1)
assert len(mesh.vertices) == 12
@pytest.fixture
def polyface_181_2(msp):
e = msp.new_entity(
'POLYLINE',
dxfattribs={
'flags': 16,
'm_count': 6,
'n_count': 3,
},
)
e.append_vertex((16606.65151901649, 81.88147523282441, 2099.9999999999995), dxfattribs={'flags': 64})
e.append_vertex((16606.65151901649, 81.88147523282441, 1199.9999999999998), dxfattribs={'flags': 64})
e.append_vertex((16606.65151901649, 81.88147523282441, 1199.9999999999998), dxfattribs={'flags': 64})
e.append_vertex((16606.65151901649, 1281.8814752328244, 2099.9999999999995), dxfattribs={'flags': 64})
e.append_vertex((16606.65151901649, 1281.8814752328244, 1199.9999999999998), dxfattribs={'flags': 64})
e.append_vertex((16606.65151901649, 1281.8814752328244, 1199.9999999999998), dxfattribs={'flags': 64})
e.append_vertex((16626.65151901649, 1281.8814752328244, 2099.9999999999995), dxfattribs={'flags': 64})
e.append_vertex((16626.65151901649, 1281.8814752328244, 1199.9999999999998), dxfattribs={'flags': 64})
e.append_vertex((16606.65151901649, 1281.8814752328244, 1199.9999999999998), dxfattribs={'flags': 64})
e.append_vertex((16626.65151901649, 81.88147523282441, 2099.9999999999995), dxfattribs={'flags': 64})
e.append_vertex((16626.65151901649, 81.88147523282441, 1199.9999999999998), dxfattribs={'flags': 64})
e.append_vertex((16606.65151901649, 81.88147523282441, 1199.9999999999998), dxfattribs={'flags': 64})
e.append_vertex((16626.65151901649, 81.88147523282441, 2099.9999999999995), dxfattribs={'flags': 64})
e.append_vertex((16626.65151901649, 81.88147523282441, 2099.9999999999995), dxfattribs={'flags': 64})
e.append_vertex((16606.65151901649, 81.88147523282441, 2099.9999999999995), dxfattribs={'flags': 64})
e.append_vertex((16626.65151901649, 1281.8814752328244, 2099.9999999999995), dxfattribs={'flags': 64})
e.append_vertex((16626.65151901649, 1281.8814752328244, 2099.9999999999995), dxfattribs={'flags': 64})
e.append_vertex((16606.65151901649, 1281.8814752328244, 2099.9999999999995), dxfattribs={'flags': 64})
return e
def test_from_polyface_182_2(polyface_181_2):
mesh = MeshVertexMerger.from_polyface(polyface_181_2)
assert len(mesh.vertices) == 8
```
#### File: tests/test_07_render/test_704_render_linear_dimension.py
```python
import ezdxf
import pytest
from ezdxf.render.dimension import LinearDimension, DimStyleOverride
@pytest.fixture(scope='module')
def dwg():
return ezdxf.new('R2007', setup=True)
def test_linear_dimension_with_one_tolerance(dwg):
msp = dwg.modelspace()
dimline = msp.add_linear_dim(base=(0, 10), p1=(0, 0), p2=(100, 0))
override = {
'dimlfac': 1,
'dimtol': 1,
'dimtfac': .5,
'dimtolj': 0,
'dimtp': 0.01,
'dimtm': 0.01,
}
style = DimStyleOverride(dimline.dimension, override)
renderer = LinearDimension(dimline.dimension, override=style)
assert renderer.text == '100'
assert renderer.text_decimal_separator == '.'
assert renderer.tol_decimal_places == 4 # default value
assert renderer.tol_text == '±0.0100'
assert renderer.tol_valign == 0
assert renderer.compile_mtext() == r"\A0;100{\H0.50x;±0.0100}"
def test_linear_dimension_with_two_tolerances(dwg):
msp = dwg.modelspace()
dimline = msp.add_linear_dim(base=(0, 10), p1=(0, 0), p2=(101, 0))
override = {
'dimlfac': 1,
'dimtol': 1,
'dimtfac': .5,
'dimtolj': 1,
'dimtp': 0.02,
'dimtm': 0.03,
}
style = DimStyleOverride(dimline.dimension, override)
renderer = LinearDimension(dimline.dimension, override=style)
assert renderer.text == '101'
assert renderer.text_decimal_separator == '.'
assert renderer.tol_decimal_places == 4 # default value
assert renderer.tol_text_upper == '+0.0200'
assert renderer.tol_text_lower == '-0.0300'
assert renderer.tol_valign == 1
assert renderer.compile_mtext() == r"\A1;101{\H0.50x;\S+0.0200^ -0.0300;}"
def test_linear_dimension_with_limits(dwg):
msp = dwg.modelspace()
dimline = msp.add_linear_dim(base=(0, 10), p1=(0, 0), p2=(101, 0))
override = {
'dimlfac': 1,
'dimlim': 1,
'dimtfac': .5,
'dimtp': 0.02,
'dimtm': 0.03,
}
style = DimStyleOverride(dimline.dimension, override)
renderer = LinearDimension(dimline.dimension, override=style)
assert renderer.text == '101'
assert renderer.text_decimal_separator == '.'
assert renderer.tol_decimal_places == 4 # default value
assert renderer.tol_text_upper == '101.0200'
assert renderer.tol_text_lower == '100.9700'
assert renderer.compile_mtext() == r"{\H0.50x;\S101.0200^ 100.9700;}"
```
#### File: tests/test_07_render/test_711_points.py
```python
import pytest
from ezdxf.render import point
from ezdxf.entities import Point
from ezdxf.math.shape import Shape2d
def pnt(location=(0, 0), angle: float = 0):
return Point.new(dxfattribs={
'angle': angle,
'location': location,
})
def test_dimensionless_point():
loc = (2, 3)
p = pnt(location=loc)
result = point.virtual_entities(p, pdmode=0)
line = result[0]
assert line.dxftype() == 'LINE'
assert line.dxf.start.isclose(loc)
assert line.dxf.end.isclose(loc)
def test_none_point():
p = pnt()
result = point.virtual_entities(p, pdmode=1)
assert len(result) == 0
def test_cross_point():
p = pnt()
result = point.virtual_entities(p, pdmode=2)
line1, line2 = result
assert line1.dxf.start == (-1, 0)
assert line1.dxf.end == (+1, 0)
assert line2.dxf.start == (0, -1)
assert line2.dxf.end == (0, +1)
def test_x_cross_point():
p = pnt()
result = point.virtual_entities(p, pdmode=3)
line1, line2 = result
assert line1.dxf.start == (-1, -1)
assert line1.dxf.end == (+1, +1)
assert line2.dxf.start == (-1, +1)
assert line2.dxf.end == (+1, -1)
def test_tick_point():
p = pnt()
result = point.virtual_entities(p, pdmode=4)
line1 = result[0]
assert line1.dxf.start == (0, 0)
assert line1.dxf.end == (0, 0.5)
def test_square_point():
p = pnt()
result = point.virtual_entities(p, pdmode=65)
line1, line2, line3, line4 = result
lower_left = (-0.5, -0.5)
assert line1.dxf.start == lower_left
lower_right = (0.5, -0.5)
assert line1.dxf.end == lower_right
assert line2.dxf.start == lower_right
upper_right = (0.5, 0.5)
assert line2.dxf.end == upper_right
assert line3.dxf.start == upper_right
upper_left = (-0.5, 0.5)
assert line3.dxf.end == upper_left
assert line4.dxf.start == upper_left
assert line4.dxf.end == lower_left
def test_circle_point():
p = pnt()
result = point.virtual_entities(p, pdmode=33)
circle = result[0]
assert circle.dxf.center == (0, 0)
assert circle.dxf.radius == 0.5
def test_rotated_cross_point():
expected = Shape2d([(-1, 0), (1, 0), (0, -1), (0, 1)])
expected.rotate(-30) # count-clockwise rotation
s1, e1, s2, e2 = expected.vertices
p = pnt(angle=30) # clockwise angle!!
result = point.virtual_entities(p, pdmode=2)
line1, line2 = result
assert line1.dxf.start.isclose(s1)
assert line1.dxf.end.isclose(e1)
assert line2.dxf.start.isclose(s2)
assert line2.dxf.end.isclose(e2)
if __name__ == '__main__':
pytest.main([__file__])
``` |
{
"source": "jpsantos-motofil/jog_control",
"score": 2
} |
#### File: jog_controller/script/joy_to_jog.py
```python
import rospy
from jog_msgs.msg import JogFrame
from jog_msgs.msg import JogJoint
from sensor_msgs.msg import Joy
class joy_to_jog_frame:
def __init__(self):
self.enable_button = rospy.get_param('~enable_button', 4)
self.angular_button = rospy.get_param('~angular_button', 5)
self.frame_mode_button = rospy.get_param('~frame_mode_button', 8)
self.joint_mode_button = rospy.get_param('~joint_mode_button', 9)
self.axis_linear = rospy.get_param(
'~axis_linear', {'x': 0, 'y': 1, 'z': 4})
self.axis_angular = rospy.get_param(
'~axis_angular', {'x': 0, 'y': 1, 'z': 4})
self.axis_joints = rospy.get_param(
'~axis_joints', {'J1': 1, 'J2': 2, 'J3': 3, 'J4': 4, 'J5': 5, 'J6': 6})
self.scale_linear = rospy.get_param(
'~scale_linear', {'x': 0.05, 'y': 0.05, 'z': 0.05})
self.scale_angular = rospy.get_param(
'~scales_angular', {'x': 0.05, 'y': 0.05, 'z': 0.05})
self.scale_joints = rospy.get_param('~scale_joints', {
'J1': 0.05, 'J2': 0.05, 'J3': 0.05, 'J4': 0.05, 'J5': 0.05, 'J6': 0.05})
self.pub_frm = rospy.Publisher('jog_frame', JogFrame, queue_size=1)
self.pub_jnt = rospy.Publisher('jog_joint', JogJoint, queue_size=1)
self.mode = True # True = frame, False = joint
# Convert to JogFrame and republish
def callback(self, joy):
if joy.buttons[self.frame_mode_button]:
self.mode = True
rospy.loginfo('Mode: Frame')
if joy.buttons[self.joint_mode_button]:
self.mode = False
rospy.loginfo('Mode: Joint')
if not joy.buttons[self.enable_button]:
return
if self.mode: #Frame_jog
msg_frm = JogFrame()
msg_frm.header.stamp = rospy.Time.now()
msg_frm.header.frame_id = rospy.get_param('~frame_id', 'base_link')
msg_frm.group_name = rospy.get_param('~group_name', 'manipulator')
msg_frm.link_name = rospy.get_param('~link_name', 'tool0')
if joy.buttons[self.angular_button]:
msg_frm.angular_delta.x = self.scale_angular['x']*joy.axes[self.axis_angular['x']]
msg_frm.angular_delta.y = self.scale_angular['y']*joy.axes[self.axis_angular['y']]
msg_frm.angular_delta.z = self.scale_angular['z']*joy.axes[self.axis_angular['z']]
else:
# These buttons are binary
msg_frm.linear_delta.x = self.scale_linear['x']*joy.axes[self.axis_linear['x']]
msg_frm.linear_delta.y = self.scale_linear['y']*joy.axes[self.axis_linear['y']]
msg_frm.linear_delta.z = self.scale_linear['z']*joy.axes[self.axis_linear['z']]
msg_frm.avoid_collisions = True
self.pub_frm.publish(msg_frm)
else: #Joint_jog
msg_jnt = JogJoint()
msg_jnt.header.stamp = rospy.Time.now()
msg_jnt.header.frame_id = rospy.get_param('~frame_id', 'base_link')
msg_jnt.joint_names = rospy.get_param("/jog_joint_node/joint_names","['joint1', 'joint2', 'joint3', 'joint4', 'joint5', 'joint6']")
# These buttons are binary
msg_jnt.deltas = [0]*6
msg_jnt.deltas[0] = self.scale_joints['J1']*(joy.axes[self.axis_joints['J1']])
msg_jnt.deltas[1] = self.scale_joints['J2']*(joy.axes[self.axis_joints['J2']])
msg_jnt.deltas[2] = self.scale_joints['J3']*(joy.axes[self.axis_joints['J3']])
msg_jnt.deltas[3] = self.scale_joints['J4']*(joy.axes[self.axis_joints['J4']])
msg_jnt.deltas[4] = self.scale_joints['J5']*(joy.axes[self.axis_joints['J5']])
msg_jnt.deltas[5] = self.scale_joints['J6']*(joy.axes[self.axis_joints['J6']])
self.pub_jnt.publish(msg_jnt)
def republish(self):
rospy.Subscriber(rospy.get_param('~sub_topic', 'joy'), Joy, self.callback)
rospy.spin()
if __name__ == '__main__':
rospy.init_node('joy_to_jog_frame', anonymous=True)
republisher = joy_to_jog_frame()
republisher.republish()
``` |
{
"source": "jpscaletti/hecto",
"score": 4
} |
#### File: hecto/utils/prompt.py
```python
__all__ = (
"prompt",
"prompt_bool",
)
no_value = object()
def required(value):
if not value:
raise ValueError()
return value
def prompt(question, default=no_value, default_show=None, validator=required, **kwargs):
"""
Prompt for a value from the command line. A default value can be provided,
which will be used if no text is entered by the user. The value can be
validated, and possibly changed by supplying a validator function. Any
extra keyword arguments to this function will be passed along to the
validator. If the validator raises a ValueError, the error message will be
printed and the user asked to supply another value.
"""
if default_show:
question += f" [{default_show}] "
elif default and default is not no_value:
question += f" [{default}] "
else:
question += " "
while True:
resp = input(question)
if not resp:
if default is None:
return None
if default is not no_value:
resp = default
try:
return validator(resp, **kwargs)
except ValueError as e:
if str(e):
print(str(e))
def prompt_bool(question, default=False, yes="y", no="n"):
please_answer = f' Please answer "{yes}" or "{no}"'
def validator(value):
if value:
value = str(value).lower()[0]
if value == yes:
return True
elif value == no:
return False
else:
raise ValueError(please_answer)
if default is None:
default = no_value
default_show = yes + "/" + no
elif default:
default = yes
default_show = yes.upper() + "/" + no
else:
default = no
default_show = yes + "/" + no.upper()
return prompt(
question, default=default, default_show=default_show, validator=validator
)
```
#### File: hecto/tests/conftest.py
```python
from hashlib import sha1
from pathlib import Path
from tempfile import mkdtemp
from unittest import mock
import errno
import filecmp
import os
import shutil
import hecto
import pytest
import six
@pytest.fixture(scope="session")
def PROJECT_TEMPLATE():
return Path(__file__).parent / "demo"
@pytest.fixture(scope="session")
def DATA():
return {
"py3": True,
"make_secret": lambda: sha1(os.urandom(48)).hexdigest(),
"myvar": "awesome",
"what": "world",
"project_name": "Hecto",
"version": "1.0.0",
"description": "A library for rendering projects templates",
}
@pytest.fixture(scope="session")
def render(PROJECT_TEMPLATE, DATA):
def render(dst, **kwargs):
kwargs.setdefault("quiet", True)
hecto.copy(PROJECT_TEMPLATE, dst, data=DATA, **kwargs)
return render
@pytest.fixture(scope="session")
def assert_file(PROJECT_TEMPLATE):
def assert_file(dst, *path):
p1 = os.path.join(str(dst), *path)
p2 = os.path.join(str(PROJECT_TEMPLATE), *path)
assert filecmp.cmp(p1, p2)
return assert_file
@pytest.fixture(scope="session")
def make_folder():
def make_folder(folder):
if not folder.exists():
try:
os.makedirs(str(folder))
except OSError as e: # pragma: no cover
if e.errno != errno.EEXIST:
raise
return make_folder
@pytest.fixture()
def dst(request):
"""Return a real temporary folder path which is unique to each test
function invocation. This folder is deleted after the test has finished.
"""
dst = mkdtemp()
dst = Path(dst).resolve()
request.addfinalizer(lambda: shutil.rmtree(str(dst), ignore_errors=True))
return dst
class AppendableStringIO(six.StringIO):
def append(self, text):
pos = self.tell()
self.seek(0, os.SEEK_END)
self.write(text)
self.seek(pos)
@pytest.fixture()
def stdin():
buffer = AppendableStringIO()
with mock.patch("sys.stdin", buffer):
yield buffer
``` |
{
"source": "jpscaletti/solution",
"score": 4
} |
#### File: solution/fields/collection.py
```python
from __future__ import absolute_import
import inspect
import re
from .field import Field
from .text import Text
class Collection(Text):
"""A field that takes an open number of values of the same kind.
For example, a list of comma separated tags or email addresses.
:param sep:
String to separate each value.
When joining the values to render, it is used as-is. When splitting
the user input, however, is tranformed first to a regexp
when the spaces around the separator are ignored.
:param filters:
List of callables (can be validators). If a value do not pass one
of these (the callable return `False`), it is filtered out from the
final result.
:param validate:
An list of validators. This will evaluate the current `value` when
the method `validate` is called.
:param default:
Default value.
:param prepare:
An optional function that takes the current value as a string
and preprocess it before rendering.
:param clean:
An optional function that takes the value already converted to
python and return a 'cleaned' version of it. If the value can't be
cleaned `None` must be returned instead.
:param hide_value:
Do not render the current value a a string. Useful with passwords
fields.
:param locale:
Default locale for this field. Overwrite the form locale.
:param tz:
Default timezone for this field. Overwrite the form timezone.
"""
_type = 'text'
def __init__(self, sep=', ', filters=None, **kwargs):
kwargs.setdefault('default', [])
self.sep = sep
self.rxsep = r'\s*%s\s*' % re.escape(self.sep.replace(' ', ''))
filters = filters or []
self.filters = [f() if inspect.isclass(f) else f for f in filters]
super(Collection, self).__init__(**kwargs)
def as_dict(self):
dd = Field.as_dict(self)
dd['value'] = self._split_values(self.str_value) or []
return dd
def _clean_data(self, str_value, file_data, obj_value):
if isinstance(str_value, (list, tuple)):
if len(str_value):
str_value = str_value[0]
else:
str_value = None
if str_value:
str_value = self.sep.join(self._split_values(str_value))
if not isinstance(obj_value, (list, tuple)):
if obj_value:
obj_value = [obj_value]
else:
obj_value = None
return (str_value, None, obj_value)
def str_to_py(self, **kwargs):
if self.str_value is None:
return None
py_values = self._split_values(self.str_value)
if not self.filters:
return py_values
final_values = []
for val in py_values:
for f in self.filters:
if not f(val):
break
else:
# only executed if the loop `for f in self.filters` has
# exited normally, so the value has passed all filters.
final_values.append(val)
return final_values
def py_to_str(self, **kwargs):
if not self.obj_value:
return self.default or u''
return self.sep.join(self.obj_value)
def _split_values(self, str_value):
if not str_value:
return []
values = re.split(self.rxsep, str_value.strip())
return list(filter(lambda x: x != u'', values))
```
#### File: solution/fields/color.py
```python
from __future__ import absolute_import
import re
from .. import validators as v
from .text import Text
class Color(Text):
"""A color field.
:param validate:
An list of validators. This will evaluate the current `value` when
the method `validate` is called.
:param default:
Default value.
:param prepare:
An optional function that takes the current value as a string
and preprocess it before rendering.
:param clean:
An optional function that takes the value already converted to
python and return a 'cleaned' version of it. If the value can't be
cleaned `None` must be returned instead.
:param hide_value:
Do not render the current value a a string. Useful with passwords
fields.
"""
_type = 'color'
default_validator = v.IsColor
rx_colors = re.compile(
r'#?(?P<hex>[0-9a-f]{3,8})|'
r'rgba?\((?P<r>[0-9]+)\s*,\s*(?P<g>[0-9]+)\s*,\s*(?P<b>[0-9]+)'
r'(?:\s*,\s*(?P<a>\.?[0-9]+))?\)',
re.IGNORECASE)
def str_to_py(self, **kwargs):
if not self.str_value:
return None
str_value = self.str_value.strip().replace(' ', '').lower()
m = self.rx_colors.match(str_value)
if not m:
return None
md = m.groupdict()
if md['hex']:
return normalize_hex(md['hex'])
return normalize_rgb(md['r'], md['g'], md['b'], md.get('a'))
def normalize_hex(hex_color):
"""Transform a xxx hex color to xxxxxx.
"""
hex_color = hex_color.replace('#', '').lower()
length = len(hex_color)
if length in (6, 8):
return '#' + hex_color
if length not in (3, 4):
return None
strhex = u'#%s%s%s' % (
hex_color[0] * 2,
hex_color[1] * 2,
hex_color[2] * 2)
if length == 4:
strhex += hex_color[3] * 2
return strhex
def normalize_rgb(r, g, b, a):
"""Transform a rgb[a] color to #hex[a].
"""
r = int(r, 10)
g = int(g, 10)
b = int(b, 10)
if a:
a = float(a) * 256
if r > 255 or g > 255 or b > 255 or (a and a > 255):
return None
color = '#%02x%02x%02x' % (r, g, b)
if a:
color += '%02x' % int(a)
return color
```
#### File: fields/file/file.py
```python
from werkzeug.datastructures import FileStorage
from solution.fields import Field
from solution.utils import Markup, get_html_attrs
from .helpers import FileSystemUploader
class File(Field):
""" An upload file field.
**Does not actually upload the file. Use its ``clean`` method for that.**
:param validate:
An list of validators. This will evaluate the current `value` when
the method `validate` is called.
:param default:
Default value.
:param prepare:
An optional function that takes the current value as a string
and preprocess it before rendering.
:param clean:
An optional function that takes the value already converted to
python and return a 'cleaned' version of it. If the value can't be
cleaned `None` must be returned instead.
"""
_type = 'file'
hide_value = True
def __init__(self, base_path='.', **kwargs):
# Backwards compatibility
kwargs.setdefault('clean', kwargs.get('upload'))
self.base_path = base_path
if base_path is None:
self.storage = None
else:
self.storage = FileSystemUploader(
base_path=base_path,
upload_to=kwargs.pop('upload_to', ''),
secret=kwargs.pop('secret', False),
prefix=kwargs.pop('prefix', ''),
allowed=kwargs.pop('allowed', None),
denied=kwargs.pop('denied', None),
max_size=kwargs.pop('max_size', None),
)
super(File, self).__init__(**kwargs)
def clean(self, value):
"""Takes a Werkzeug FileStorage, returns the relative path.
"""
if isinstance(value, FileStorage):
return self.storage.save(value)
return value
def str_to_py(self, **kwargs):
return self.str_value or self.file_data or self.obj_value
def __call__(self, **kwargs):
return self.as_input(**kwargs)
def as_input(self, **kwargs):
attrs = self.extra.copy()
attrs.update(kwargs)
attrs.setdefault('type', self._type)
attrs['name'] = self.name
if attrs['type'] != self._type:
attrs['value'] = self.to_string(**attrs)
if not self.optional and not self.obj_value:
attrs.setdefault('required', True)
html = u'<input %s>' % get_html_attrs(attrs)
return Markup(html)
```
#### File: fields/file/image.py
```python
from math import floor, ceil
from os.path import join
from .file import File
from .helpers import IMAGES
class Image(File):
""" Similar to a File Field but takes an tuple as size parameter and makes
sure the image is of that size.
"""
def __init__(self, base_path='.', size=None, **kwargs):
self.size = size
if size:
self.width = size[0]
self.height = size[1]
kwargs.setdefault('allowed', IMAGES)
super(Image, self).__init__(base_path, **kwargs)
def clean(self, value):
"""Passes the value to FileField and resizes the image at the path the parent
returns if needed.
"""
path = super(Image, self).clean(value)
if path and self.size:
self.resize_image(join(self.base_path, path))
return path
def resize_image(self, image_path):
import wand.image
with wand.image.Image(filename=image_path) as img:
result = Image.calculate_dimensions(
img.size, self.size
)
if result:
x, y, width, height = result
img.crop(x, y, width=width, height=height)
img.save(filename=image_path)
@staticmethod
def calculate_dimensions(image_size, desired_size):
"""Return the Tuple with the arguments to pass to Image.crop.
If the image is smaller than than the desired_size Don't do
anything. Otherwise, first calculate the (truncated) center and then
take half the width and height (truncated again) for x and y.
x0, y0: the center coordinates
"""
current_x, current_y = image_size
target_x, target_y = desired_size
if current_x < target_x and current_y < target_y:
return None
if current_x > target_x:
new_x0 = floor(current_x / 2)
new_x = new_x0 - ceil(target_x / 2)
new_width = target_x
else:
new_x = 0
new_width = current_x
if current_y > target_y:
new_y0 = floor(current_y / 2)
new_y = new_y0 - ceil(target_y / 2)
new_height = target_y
else:
new_y = 0
new_height = current_y
return (int(new_x), int(new_y), new_width, new_height)
```
#### File: solution/solution/form.py
```python
from copy import copy
import inspect
try:
import simplejson as json
except ImportError:
import json
from ._compat import itervalues
from .fields import Field
from .formset import FormSet
from .utils import FakeMultiDict, get_obj_value, set_obj_value, json_serial
class Form(object):
"""Declarative Form base class. Provides core behaviour like field
construction, validation, and data and error proxying.
:param data:
Used to pass data coming from the enduser, usually `request.form`,
`request.POST` or equivalent.
:param obj:
If `data` is empty or not provided, this object is checked for
attributes matching field names.
:param files:
Used to pass files coming from the enduser, usually `request.files`,
or equivalent.
:param locale:
Default locale for this form. Can be overwrited in each field.
:param tz:
Default timezone for this field. Can be overwrited in each field.
:param prefix:
If provided, all fields will have their name prefixed with the
value. Used to repeat the form in the same page.
:param backref:
.
"""
_model = None
_fields = None
_forms = None
_sets = None
_errors = None
_named_errors = None
_input_data = None
cleaned_data = None
changed_fields = None
def __init__(self, data=None, obj=None, files=None, locale='en', tz='utc',
prefix=u'', backref=None, parent=None):
backref = backref or parent
if self._model is not None:
assert inspect.isclass(self._model)
data = data or {}
if not hasattr(data, 'getlist'):
data = FakeMultiDict(data)
files = files or {}
if not hasattr(files, 'getlist'):
files = FakeMultiDict(files)
obj = obj or {}
if isinstance(obj, dict):
obj = FakeMultiDict(obj)
self._locale = locale
self._tz = tz
prefix = prefix or u''
if prefix and not prefix.endswith(('_', '-', '.', '+', '|')):
prefix += u'-'
self._prefix = prefix
self._backref = backref
self.cleaned_data = {}
self.changed_fields = []
self.validated = False
self._obj = obj
self._errors = {}
self._named_errors = {}
self._init_fields()
# Even when there is no data we need this initialisation
self._init_data(data, obj, files)
def _init_fields(self):
"""Creates the `_fields`, `_forms` asn `_sets` dicts.
Any properties which begin with an underscore or are not `Field`,
`Form` or `FormSet` **instances** are ignored by this method.
"""
fields = {}
forms = {}
sets = {}
for name in dir(self):
if name.startswith('_'):
continue
field = getattr(self, name)
is_field = isinstance(field, Field)
is_form = isinstance(field, Form) or (
inspect.isclass(field) and issubclass(field, Form))
is_set = isinstance(field, FormSet)
if is_field:
field = copy(field)
field.name = self._prefix + name
field.form = self
if field.prepare is None:
field.prepare = getattr(self, 'prepare_' + name, None)
if field.clean is None:
field.clean = getattr(self, 'clean_' + name, None)
fields[name] = field
setattr(self, name, field)
elif is_form:
forms[name] = field
elif is_set:
field._name = self._prefix + name # REALLY IMPORTANT
sets[name] = field
self._fields = fields
self._forms = forms
self._sets = sets
def as_dict(self):
dd = {
field.name: field.as_dict()
for field in self._fields.values()
}
dd.update({
name: form.as_dict()
for name, form in self._forms.items()
})
dd.update({
name: formset.as_dict()
for name, formset in self._sets.items()
})
dd.update({
'_{}_form'.format(name): formset.form.as_dict()
for name, formset in self._sets.items()
})
return dd
def as_json(self):
"""Useful for inserting the form data as a JavaScript object."""
return json.dumps(self.as_dict(), default=json_serial)
def prepare(self, data):
"""You can overwrite this method to store the logic of pre-processing
the input data.
"""
return data
def clean(self, cleaned_data):
"""You can overwrite this method to store the logic of post-processing
the cleaned data after validation.
You can delete fields but any field that isn't part of the form is
filtered out.
"""
return cleaned_data
def _init_data(self, data, obj, files):
"""Load the data into the form.
"""
data = self.prepare(data)
# Initialize sub-forms
for name, subform in self._forms.items():
obj_value = get_obj_value(obj, name)
if inspect.isclass(subform):
fclass = subform
else:
fclass = subform.__class__
subform_prefix = '{prefix}{name}.'.format(
prefix=self._prefix,
name=name.lower()
)
subform = fclass(
data,
obj_value,
files=files,
locale=self._locale,
tz=self._tz,
prefix=subform_prefix,
backref=getattr(subform, '_backref', None)
)
self._forms[name] = subform
setattr(self, name, subform)
self._input_data = self._input_data or subform._input_data
# Initialize form-sets
for name, formset in self._sets.items():
sclass = formset.__class__
objs = formset._objs or get_obj_value(obj, name)
formset_name = '{prefix}{name}'.format(
prefix=self._prefix,
name=name.lower()
)
formset = sclass(
form_class=formset._form_class,
name=formset_name,
data=data,
objs=objs,
files=files,
locale=self._locale,
tz=self._tz,
create_new=formset._create_new,
backref=formset._backref
)
self._sets[name] = formset
setattr(self, name, formset)
for _form in formset._forms:
self._input_data = self._input_data or _form._input_data
# Initialize fields
for name, field in self._fields.items():
subdata = data.getlist(self._prefix + name)
subfiles = files.getlist(self._prefix + name)
self._input_data = self._input_data or subdata or subfiles
obj_value = get_obj_value(obj, name)
was_deleted = self._prefix + name + '__deleted' in data
if was_deleted:
subdata = obj_value = subfiles = None
field.load_data(subdata, obj_value, file_data=subfiles,
locale=self._locale, tz=self._tz)
# delete field data
if was_deleted:
field._deleted = True
def reset(self):
for subform in self._forms.values():
subform.reset()
for formset in self._sets.values():
formset.reset()
for field in self._fields.values():
field.reset()
def __iter__(self):
"""Iterate form fields in arbitrary order.
"""
return itervalues(self._fields)
def __getitem__(self, name):
return self._fields[name]
def __contains__(self, name):
return (name in self._fields)
@property
def has_input_data(self):
return bool(self._input_data)
@property
def has_changed(self):
return len(self.changed_fields) > 0
def is_valid(self):
"""Return whether the current values of the form fields are all valid.
"""
self.cleaned_data = {}
self.changed_fields = []
self.validated = False
self._errors = {}
self._named_errors = {}
cleaned_data = {}
changed_fields = []
errors = {}
named_errors = {}
# Validate sub forms
for name, subform in self._forms.items():
if not subform.is_valid():
errors[name] = subform._errors
named_errors.update(subform._named_errors)
continue
if subform.has_changed:
changed_fields.append(name)
# Validate sub sets
for name, formset in self._sets.items():
if not formset.is_valid():
errors[name] = formset._errors
named_errors.update(formset._named_errors)
continue
if formset.has_changed:
changed_fields.append(name)
# Validate each field
for name, field in self._fields.items():
field.error = None
py_value = field.validate(self)
if field.error:
errors[name] = field.error
named_errors[field.name] = field.error
continue
cleaned_data[name] = py_value
if hasattr(field, '_deleted'):
cleaned_data[name] = None
field.has_changed = True
if field.has_changed:
changed_fields.append(name)
# Validate relation between fields
for name, field in self._fields.items():
field.validate(self, cleaned_data)
if field.error:
errors[name] = field.error
named_errors[field.name] = field.error
continue
if errors:
self._errors = errors
self._named_errors = named_errors
return False
self.changed_fields = changed_fields
self.cleaned_data = self.clean(cleaned_data)
self.validated = True
return True
def save(self, backref_obj=None):
"""Save the cleaned data to the initial object or creating a new one
(if a `model_class` was provided).
"""
if not self.validated:
assert self.is_valid()
if self._model and not self._obj:
obj = self._save_new_object(backref_obj)
else:
obj = self.save_to(self._obj)
for key, subform in self._forms.items():
data = subform.save(obj)
if self._model and not data:
continue
set_obj_value(obj, key, data)
for key, formset in self._sets.items():
data = formset.save(obj)
if self._model and not data:
continue
set_obj_value(obj, key, data)
return obj
def _save_new_object(self, backref_obj=None):
db = self._model.db
data = dict([
(key, val) for key, val in self.cleaned_data.items()
if (
(not isinstance(getattr(self, key), FormSet)) and
(not isinstance(getattr(self, key), Form))
)
])
if self._backref and backref_obj:
data[self._backref] = backref_obj
obj = self._model(**data)
db.add(obj)
return obj
def save_to(self, obj):
"""Save the cleaned data to an object.
"""
if isinstance(obj, dict):
obj = dict(obj)
for key in self.changed_fields:
if key in self.cleaned_data:
val = self.cleaned_data.get(key)
set_obj_value(obj, key, val)
return obj
def __repr__(self):
return '<%s>' % self.__class__.__name__
```
#### File: solution/validators/patterns.py
```python
from email.utils import parseaddr
import re
from .._compat import string_types, urlsplit, urlunsplit, to_unicode
from .validator import Validator
class Match(Validator):
"""Validates the field against a regular expression.
:param regex:
The regular expression string to use. Can also be a compiled regular
expression pattern.
:param flags:
The regexp flags to use. By default re.IGNORECASE.
Ignored if `regex` is not a string.
:param message:
Error message to raise in case of a validation error.
"""
message = u'This value doesn\'t seem to be valid.'
def __init__(self, regex, message=None, flags=re.IGNORECASE):
if isinstance(regex, string_types):
regex = re.compile(regex, flags)
self.regex = regex
if message is not None:
self.message = message
def __call__(self, py_value=None, form=None):
if py_value is None or py_value == u'':
return True
return self.regex.match(py_value or u'')
class ValidColor(Match):
"""Validates that the field is a string representing a rgb or rgba color
in the format `#rrggbb[aa]`.
:param message:
Error message to raise in case of a validation error.
"""
message = u'Enter a valid color.'
regex = re.compile(r'#[0-9a-f]{6,8}', re.IGNORECASE)
def __init__(self, message=None):
if message is not None:
self.message = message
IsColor = ValidColor
class ValidEmail(Validator):
"""Validates an email address.
Note that the purpose of this validator is to alert the user of a typing
mistake, so it uses a very permissive regexp. Even if the format is valid,
it cannot guarantee that the email is real.
:param message:
Error message to raise in case of a validation error.
"""
message = u'Enter a valid e-mail address.'
email_rx = re.compile(
r'^[A-Z0-9][A-Z0-9._%+-]*@[A-Z0-9][A-Z0-9\-\.]{0,61}\.[A-Z0-9]+$',
re.IGNORECASE)
def __init__(self, message=None):
if message is not None:
self.message = message
def __call__(self, py_value=None, form=None):
if py_value is None or py_value == u'':
return True
if '@' not in py_value:
return False
py_value = parseaddr(py_value)[-1]
if '.@' in py_value:
return False
try:
py_value = self._encode_idna(py_value)
except (UnicodeDecodeError, UnicodeError):
return False
return bool(self.email_rx.match(py_value))
def _encode_idna(self, py_value):
parts = py_value.split(u'@')
domain = parts[-1]
domain = domain.encode(u'idna')
parts[-1] = to_unicode(domain)
return u'@'.join(parts)
class ValidURL(Validator):
"""Simple regexp based URL validation. Much like the IsEmail validator, you
probably want to validate the URL later by other means if the URL must
resolve.
:param message:
Error message to raise in case of a validation error.
:param require_tld:
If true, then the domain-name portion of the URL must contain a .tld
suffix. Set this to false if you want to allow domains like
`localhost`.
"""
message = u'Enter a valid URL.'
url_rx = r'^([a-z]{3,7}:(//)?)?([^/:]+%s|([0-9]{1,3}\.){3}[0-9]{1,3})(:[0-9]+)?(\/.*)?$'
def __init__(self, message=None, require_tld=True):
tld_part = r'\.[a-z]{2,10}' if require_tld else u''
self.regex = re.compile(self.url_rx % tld_part, re.IGNORECASE)
if message is not None:
self.message = message
def __call__(self, py_value=None, form=None):
if py_value is None or py_value == u'':
return True
if self.regex.match(py_value):
return True
# Common case failed. Try for possible IDN domain-part
try:
py_value = self._encode_idna(py_value)
return bool(self.regex.match(py_value))
except (UnicodeDecodeError, UnicodeError):
return False
return False
def _encode_idna(self, py_value):
scheme, netloc, path, query, fragment = urlsplit(py_value)
netloc = netloc.encode('idna') # IDN -> ACE
return urlunsplit((scheme, netloc, path, query, fragment))
```
#### File: solution/validators/simple.py
```python
from .validator import Validator
from .._compat import string_types
class Required(Validator):
"""Validates that the field contains data.
:param message:
Error message to raise in case of a validation error.
"""
message = u'This field is required.'
def __call__(self, py_value=None, form=None):
if isinstance(py_value, string_types):
return bool(py_value.strip())
return py_value not in ('', None)
class IsNumber(Validator):
"""Validates that the field is a number (integer or floating point).
:param message:
Error message to raise in case of a validation error.
"""
message = u'Enter a number.'
def __call__(self, py_value=None, form=None):
if py_value is None or py_value == u'':
return True
try:
float(py_value)
except Exception:
return False
return True
```
#### File: solution/validators/validator.py
```python
class Validator(object):
"""Base field Validator.
:param message:
Error message to raise in case of a validation error.
"""
message = u'Invalid value.'
def __init__(self, message=None):
if message is not None:
self.message = message
```
#### File: solution/validators/values.py
```python
from .._compat import to_unicode, string_types
from .validator import Validator
class LongerThan(Validator):
"""Validates the length of a value is longer or equal than minimum.
:param length:
The minimum required length of the value.
:param message:
Error message to raise in case of a validation error
"""
message = u'Field must be at least %s character long.'
def __init__(self, length, message=None):
assert isinstance(length, int)
self.length = length
if message is None:
message = self.message % (length,)
self.message = message
def __call__(self, py_value=None, form=None):
if py_value is None or py_value == u'':
return True
py_value = to_unicode(py_value)
return len(py_value) >= self.length
class ShorterThan(Validator):
"""Validates the length of a value is shorter or equal than maximum.
:param length:
The maximum allowed length of the value.
:param message:
Error message to raise in case of a validation error
"""
message = u'Field cannot be longer than %s character.'
def __init__(self, length, message=None):
assert isinstance(length, int)
self.length = length
if message is None:
message = self.message % (length,)
self.message = message
def __call__(self, py_value=None, form=None):
if py_value is None or py_value == u'':
return True
py_value = to_unicode(py_value or u'')
return len(py_value) <= self.length
class LessThan(Validator):
"""Validates that a value is less or equal than another.
This will work with integers, floats, decimals and strings.
:param value:
The maximum value acceptable.
:param message:
Error message to raise in case of a validation error
"""
message = u'Number must be less than %s.'
def __init__(self, value, message=None):
self.value = value
if message is None:
message = self.message % (value,)
self.message = message
def __call__(self, py_value=None, form=None):
if py_value is None or py_value == u'':
return True
if isinstance(py_value, string_types):
py_value = try_to_number(py_value)
if py_value is None:
return False
value = py_value or 0
return value <= self.value
class MoreThan(Validator):
"""Validates that a value is greater or equal than another.
This will work with any integers, floats, decimals and strings.
:param value:
The minimum value acceptable.
:param message:
Error message to raise in case of a validation error
"""
message = u'Number must be greater than %s.'
def __init__(self, value, message=None):
self.value = value
if message is None:
message = self.message % (value,)
self.message = message
def __call__(self, py_value=None, form=None):
if py_value is None or py_value == u'':
return True
if isinstance(py_value, string_types):
py_value = try_to_number(py_value)
if py_value is None:
return False
value = py_value or 0
return value >= self.value
class InRange(Validator):
"""Validates that a value is of a minimum and/or maximum value.
This will work with integers, floats, decimals and strings.
:param minval:
The minimum value acceptable.
:param maxval:
The maximum value acceptable.
:param message:
Error message to raise in case of a validation error
"""
message = u'Number must be between %s and %s.'
def __init__(self, minval, maxval, message=None):
self.minval = minval
self.maxval = maxval
if message is None:
message = self.message % (minval, maxval)
self.message = message
def __call__(self, py_value=None, form=None):
if py_value is None or py_value == u'':
return True
if isinstance(py_value, string_types):
py_value = try_to_number(py_value)
if py_value is None:
return False
value = py_value or 0
if value < self.minval:
return False
if value > self.maxval:
return False
return True
def try_to_number(value):
try:
return float(value)
except (ValueError, TypeError):
return value
```
#### File: solution/tests/test_fields_date.py
```python
from datetime import datetime
import pytz
import solution as f
to_unicode = f._compat.to_unicode
def _clean(form, value, **kwargs):
return value
def test_render_date():
field = f.Date(tz='America/Lima') # utc-5
field.name = u'abc'
field.load_data(obj_value=datetime(1979, 5, 30, 4, 0, 0))
assert field() == field.as_input()
assert (field(foo='bar') ==
u'<input foo="bar" name="abc" type="date" value="1979-05-29">')
assert (field.as_textarea(foo='bar') ==
u'<textarea foo="bar" name="abc">1979-05-29</textarea>')
assert (field(foo='bar', type='text') ==
u'<input foo="bar" name="abc" type="text" value="1979-05-29">')
def test_render_date_extra():
field = f.Date(tz='America/Lima', data_modal=True, aria_label='test',
foo='niet', clean=_clean)
field.name = u'abc'
field.load_data(obj_value=datetime(1979, 5, 30, 4, 0, 0))
assert field() == field.as_input()
assert (field(foo='bar') ==
u'<input aria-label="test" foo="bar" name="abc" type="date" value="1979-05-29" data-modal>')
assert (field.as_textarea(foo='bar') ==
u'<textarea aria-label="test" foo="bar" name="abc" data-modal>1979-05-29</textarea>')
assert (field(foo='bar', type='text') ==
u'<input aria-label="test" foo="bar" name="abc" type="text" value="1979-05-29" data-modal>')
def test_render_required():
field = f.Date(validate=[f.Required])
field.name = u'abc'
assert field() == u'<input name="abc" type="date" value="" required>'
assert field.as_textarea() == u'<textarea name="abc" required></textarea>'
def test_render_default():
field = f.Date(default=datetime(2013, 7, 28, 4, 0, 0)) # default tz is utc
field.name = u'abc'
assert field() == u'<input name="abc" type="date" value="2013-07-28">'
field = f.Date(tz='America/Lima', default=datetime(2013, 7, 28, 4, 0, 0))
field.name = u'abc'
assert field() == u'<input name="abc" type="date" value="2013-07-27">'
# def test_validate_date():
# field = f.Date(tz='America/Lima')
# assert field.validate() is None
# field = f.Date(tz='America/Lima')
# field.load_data(u'1979-05-13')
# assert field.validate() == datetime(1979, 5, 13, 5, 0, 0, tzinfo=pytz.utc)
# field = f.Date(tz='America/Lima')
# field.load_data([u'1979-05-13'])
# assert field.validate() == datetime(1979, 5, 13, 5, 0, 0, tzinfo=pytz.utc)
# field = f.Date(tz='America/Lima')
# field.load_data(u'invalid')
# assert field.validate() is None
def test_validate_date_with_default():
now = datetime.utcnow()
field = f.Date(default=now)
assert field.validate() == now
def test_form_tz():
class MyForm(f.Form):
mydate = f.Date()
dt = datetime(1979, 5, 30, 4, 0, 0)
form = MyForm({}, {'mydate': dt}, tz='America/Lima')
assert form.mydate.as_input() == u'<input name="mydate" type="date" value="1979-05-29">'
```
#### File: solution/tests/test_fields_image.py
```python
import solution as f
"""Test calculate_dimensions for n cases:
1) the height and width of the current image is smaller than the desired
dimensions.
2) the height of the current image is smaller than the desired height but the
width larger.
3) The width of the current image is smaller than the desired width but the
height is larger.
4) The width and height of the current image is larger than the desired
dimensions. """
def test_calculate_dimensions_case_1():
current_size = (300, 480)
desired_size = (600, 800)
assert None is f.Image.calculate_dimensions(current_size, desired_size)
def test_calculate_dimensions_case_2():
current_size = (600, 480)
desired_size = (300, 800)
assert (150, 0, 300, 480) == f.Image.calculate_dimensions(current_size, desired_size)
def test_calculate_dimensions_case_3():
current_size = (300, 800)
desired_size = (600, 480)
assert (0, 160, 300, 480) == f.Image.calculate_dimensions(current_size, desired_size)
def test_calculate_dimensions_case_4():
current_size = (600, 800)
desired_size = (300, 480)
assert (150, 160, 300, 480) == f.Image.calculate_dimensions(current_size, desired_size)
```
#### File: solution/tests/test_fields_simple_date.py
```python
from datetime import date
import solution as f
to_unicode = f._compat.to_unicode
def _clean(form, value, **kwargs):
return value
def test_render_date():
field = f.SimpleDate()
field.name = u'abc'
field.load_data(obj_value=date(1979, 5, 13))
assert field() == field.as_input()
assert (field(foo='bar') ==
u'<input foo="bar" name="abc" type="date" value="1979-05-13">')
assert (field.as_textarea(foo='bar') ==
u'<textarea foo="bar" name="abc">1979-05-13</textarea>')
assert (field(foo='bar', type='text') ==
u'<input foo="bar" name="abc" type="text" value="1979-05-13">')
def test_render_date_extra():
field = f.SimpleDate(data_modal=True, aria_label='test', foo='niet', clean=_clean)
field.name = u'abc'
field.load_data(obj_value=date(1979, 5, 13))
assert field() == field.as_input()
assert (field(foo='bar') ==
u'<input aria-label="test" foo="bar" name="abc" type="date" value="1979-05-13" data-modal>')
assert (field.as_textarea(foo='bar') ==
u'<textarea aria-label="test" foo="bar" name="abc" data-modal>1979-05-13</textarea>')
assert (field(foo='bar', type='text') ==
u'<input aria-label="test" foo="bar" name="abc" type="text" value="1979-05-13" data-modal>')
def test_render_required():
field = f.SimpleDate(validate=[f.Required])
field.name = u'abc'
assert field() == u'<input name="abc" type="date" value="" required>'
assert field.as_textarea() == u'<textarea name="abc" required></textarea>'
def test_render_default():
field = f.SimpleDate(default=date(2013, 7, 28))
field.name = u'abc'
assert field() == u'<input name="abc" type="date" value="2013-07-28">'
def test_validate_date():
field = f.SimpleDate()
assert field.validate() is None
field = f.SimpleDate()
field.load_data(u'1979-05-13')
assert field.validate() == date(1979, 5, 13)
field = f.SimpleDate()
field.load_data([u'1979-05-13'])
assert field.validate() == date(1979, 5, 13)
field = f.SimpleDate()
field.load_data(u'invalid')
assert field.validate() is None
def test_validate_date_with_default():
today = date.today()
field = f.SimpleDate(default=today)
assert field.validate() == today
```
#### File: solution/tests/test_fields_splitted_datetime.py
```python
from datetime import datetime
import pytz
import solution as f
to_unicode = f._compat.to_unicode
def _clean(form, value, **kwargs):
return value
def test_render_field():
field = f.SplittedDateTime(tz='America/Lima') # utc-5
field.name = u'abc'
field.load_data(obj_value=datetime(1979, 5, 30, 4, 20, 0))
assert field() == field.as_inputs()
assert (field(foo='bar') ==
u'<input foo="bar" name="abc" type="date" value="1979-05-29">'
u'<input foo="bar" name="abc" type="time" value="11:20 PM">')
assert (field(foo='bar', type='text') ==
u'<input foo="bar" name="abc" type="text" value="1979-05-29">'
u'<input foo="bar" name="abc" type="text" value="11:20 PM">')
assert (field.as_input_date(foo='bar') ==
u'<input foo="bar" name="abc" type="date" value="1979-05-29">')
assert (field.as_input_time(foo='bar') ==
u'<input foo="bar" name="abc" type="time" value="11:20 PM">')
def test_render_date_extra():
field = f.SplittedDateTime(tz='America/Lima', data_modal=True, aria_label='test',
foo='niet', clean=_clean)
field.name = u'abc'
field.load_data(obj_value=datetime(1979, 5, 30, 4, 20, 0))
assert (field(foo='bar') ==
u'<input aria-label="test" foo="bar" name="abc" type="date" value="1979-05-29" data-modal>'
u'<input aria-label="test" foo="bar" name="abc" type="time" value="11:20 PM" data-modal>')
def test_render_required():
field = f.SplittedDateTime(validate=[f.Required])
field.name = u'abc'
assert (field() ==
u'<input name="abc" type="date" value="" required>'
u'<input name="abc" type="time" value="" required>')
def test_render_default():
field = f.SplittedDateTime(default=datetime(2013, 7, 28, 16, 20, 0)) # default tz is utc
field.name = u'abc'
assert (field() ==
u'<input name="abc" type="date" value="2013-07-28">'
u'<input name="abc" type="time" value="4:20 PM">')
field = f.SplittedDateTime(tz='America/Lima', default=datetime(2013, 7, 28, 16, 20, 0))
field.name = u'abc'
assert (field() ==
u'<input name="abc" type="date" value="2013-07-28">'
u'<input name="abc" type="time" value="11:20 AM">')
def test_validate_date():
field = f.SplittedDateTime(tz='America/Lima')
assert field.validate() is None
field = f.SplittedDateTime(tz='America/Lima')
field.load_data([u'1979-05-13'])
assert field.validate() == datetime(1979, 5, 13, 5, 0, 0, tzinfo=pytz.utc)
field = f.SplittedDateTime(tz='America/Lima')
field.load_data([u'1979-05-13', u'8:14 PM'])
assert field.validate() == datetime(1979, 5, 14, 1, 14, 0, tzinfo=pytz.utc)
field = f.SplittedDateTime(tz='America/Lima')
field.load_data([u'1979-05-13', u'20:14'])
assert field.validate() == datetime(1979, 5, 14, 1, 14, 0, tzinfo=pytz.utc)
field = f.SplittedDateTime(tz='America/Lima')
field.load_data([u'invalid', u'20:14'])
assert field.validate() is None
def test_validate_date_with_default():
now = datetime.utcnow()
field = f.SplittedDateTime(default=now)
assert field.validate() == now
```
#### File: solution/tests/test_fields_time.py
```python
import datetime
import solution as f
to_unicode = f._compat.to_unicode
def _clean(form, value, **kwargs):
return value
def test_render_time():
field = f.Time()
field.name = u'abc'
field.load_data(obj_value=datetime.time(11, 55))
assert field() == field.as_input()
assert (field(foo='bar') ==
u'<input foo="bar" name="abc" type="time" value="11:55 AM">')
assert (field.as_textarea(foo='bar') ==
u'<textarea foo="bar" name="abc">11:55 AM</textarea>')
assert (field(foo='bar', type='text') ==
u'<input foo="bar" name="abc" type="text" value="11:55 AM">')
def test_render_time_extra():
field = f.Time(data_modal=True, aria_label='test', foo='niet', clean=_clean)
field.name = u'abc'
field.load_data(obj_value=datetime.time(11, 55))
assert field() == field.as_input()
assert (field(foo='bar') ==
u'<input aria-label="test" foo="bar" name="abc" type="time" value="11:55 AM" data-modal>')
assert (field.as_textarea(foo='bar') ==
u'<textarea aria-label="test" foo="bar" name="abc" data-modal>11:55 AM</textarea>')
assert (field(foo='bar', type='text') ==
u'<input aria-label="test" foo="bar" name="abc" type="text" value="11:55 AM" data-modal>')
def test_render_required():
field = f.Time(validate=[f.Required])
field.name = u'abc'
assert field() == u'<input name="abc" type="time" value="" required>'
assert field.as_textarea() == u'<textarea name="abc" required></textarea>'
def test_render_default():
field = f.Time(default=datetime.time(9, 16))
field.name = u'abc'
assert field() == u'<input name="abc" type="time" value="9:16 AM">'
field = f.Time(default=datetime.time(21, 16))
field.name = u'abc'
assert field() == u'<input name="abc" type="time" value="9:16 PM">'
def test_validate_time():
field = f.Time()
assert field.validate() is None
field = f.Time()
field.load_data(u'4:55 PM')
assert field.validate() == datetime.time(16, 55)
field = f.Time()
field.load_data(u'4:55:13 AM')
assert field.validate() == datetime.time(4, 55, 13)
field = f.Time()
field.load_data(u'invalid')
assert field.validate() is None
field = f.Time()
field.load_data(u'16:23 PM')
assert field.validate() is None
def test_validate_24hours():
field = f.Time()
field.load_data(u'16:23')
assert field.validate() == datetime.time(16, 23)
field = f.Time()
field.load_data(u'4:23')
assert field.validate() == datetime.time(4, 23)
field = f.Time()
field.load_data(u'16:23:55')
assert field.validate() == datetime.time(16, 23, 55)
def test_validate_time_with_default():
dt = datetime.time(4, 48, 16)
field = f.Time(default=dt)
assert field.validate() == dt
``` |
{
"source": "jpscaletti/sqlalchemy-wrapper",
"score": 3
} |
#### File: sqlalchemy-wrapper/sqla_wrapper/base_model.py
```python
from typing import Any
from sqlalchemy import inspect
__all__ = ("BaseModel", )
class BaseModel:
def fill(self, **attrs) -> Any:
"""Fill the object with the values of the attrs dict."""
for name in attrs:
setattr(self, name, attrs[name])
return self
def __repr__(self) -> str:
output = ["<", self.__class__.__name__, f" #{id(self)}"]
for attr in self._iter_attrs():
output.append(f"\n {self._repr_attr(attr)}")
output.append(">")
return "".join(output)
def _iter_attrs(self):
names = inspect(self.__class__).columns.keys()
for name in names:
yield (name, getattr(self, name))
def _repr_attr(self, attr):
name, value = attr
if hasattr(value, "isoformat"):
value = value.isoformat()
else:
value = repr(value)
return f"{name} = {value}"
```
#### File: sqlalchemy-wrapper/tests/test_session.py
```python
from sqlalchemy import * # noqa
def test_first(dbs, TestModelA):
dbs.add(TestModelA(title="Lorem"))
dbs.add(TestModelA(title="Ipsum"))
dbs.add(TestModelA(title="Sit"))
dbs.commit()
obj = dbs.first(TestModelA)
assert obj.title == "Lorem"
def test_create(dbs, TestModelA):
dbs.create(TestModelA, title="Remember")
dbs.commit()
obj = dbs.first(TestModelA)
assert obj.title == "Remember"
def test_all(dbs, TestModelA):
dbs.create(TestModelA, title="Lorem")
dbs.create(TestModelA, title="Ipsum")
dbs.commit()
obj_list = dbs.all(TestModelA)
assert len(obj_list) == 2
def test_create_or_first_using_create(dbs, TestModelA):
obj1 = dbs.create_or_first(TestModelA, title="Lorem Ipsum")
assert obj1
dbs.commit()
obj2 = dbs.create_or_first(TestModelA, title="Lorem Ipsum")
assert obj1 == obj2
def test_create_or_first_using_first(dbs, TestModelA):
obj1 = dbs.create(TestModelA, title="Lorem Ipsum")
assert obj1
dbs.commit()
obj2 = dbs.create_or_first(TestModelA, title="Lorem Ipsum")
assert obj1 == obj2
def test_first_or_create_using_first(dbs, TestModelA):
obj1 = dbs.create(TestModelA, title="Lorem Ipsum")
assert obj1
dbs.commit()
obj2 = dbs.first_or_create(TestModelA, title="Lorem Ipsum")
assert obj1 == obj2
def test_first_or_create_using_create(dbs, TestModelA):
assert dbs.first_or_create(TestModelA, id=1, title="Lorem Ipsum")
dbs.commit()
obj = dbs.first_or_create(TestModelA, title="Lorem Ipsum")
assert obj and obj.id == 1
def test_paginate(memdb):
class Product(memdb.Model):
__tablename__ = "products"
id = Column(Integer, primary_key=True)
memdb.create_all()
total = 980
with memdb.Session() as dbs:
for _ in range(total):
dbs.add(Product())
dbs.commit()
query = select(Product)
p = dbs.paginate(
query,
total=total,
page=2,
per_page=50,
)
assert p.num_pages == 20
assert p.items[0].id == 51
``` |
{
"source": "jpsca/monolith",
"score": 2
} |
#### File: monolith/demo/app.py
```python
from pathlib import Path
from flask import Flask, render_template, request
from models import db, seed_data, Cart, Product
from turbo import render_update, turbo_stream
app = Flask(__name__)
app.debug = True
app.config["RELOADER"] = True
app.config["SECRET_KEY"] = b'_5#y2L"F4Q8z\n\xec]/'
app.config["SQLALCHEMY_DATABASE_URI"] = "sqlite:///:memory:"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["SQLALCHEMY_RECORD_QUERIES"] = False
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.jinja_env.globals["stream"] = turbo_stream
app.jinja_env.filters["stream"] = turbo_stream
db.init_app(app)
with app.app_context():
seed_data()
@app.route("/")
def index():
return render_template(
"index.html",
products=Product.get_all(),
counter=Cart.count(),
)
@app.route("/about")
def about():
return render_template(
"about.html",
)
@app.route("/lazy")
def lazy():
return render_update("lazy.html")
@app.route("/cart")
def cart_index():
return render_update(
"cart/cart.html",
cart_items=Cart.get_all(),
)
@app.route("/cart/add", methods=["POST"])
def cart_add():
product_id = request.args.get("product_id", type=int)
product = Product.first(id=product_id)
if not product:
return ""
Cart.add(product)
return render_update(
"cart/add.html",
counter=Cart.count(),
)
@app.route("/cart/remove", methods=["POST"])
def cart_remove():
product_id = request.args.get("product_id", type=int)
product = Product.first(id=product_id)
if not product:
return ""
Cart.remove(product)
return render_update(
"cart/remove.html",
counter=Cart.count(),
cart_items=Cart.get_all(),
)
```
#### File: monolith/demo/models.py
```python
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.exc import IntegrityError
db = SQLAlchemy()
class Base(db.Model):
__abstract__ = True
@classmethod
def create(cls, **attrs):
obj = cls(**attrs)
db.session.add(obj)
db.session.commit()
return obj
@classmethod
def first(cls, **attrs):
"""Returns the first object found with these attributes."""
return db.session.query(cls).filter_by(**attrs).first()
@classmethod
def first_or_create(cls, **attrs):
"""Tries to find a record, and if none exists
it tries to creates a new one, adding the extra attributes.
"""
obj = cls.first(**attrs)
if obj:
return obj
return cls.create_or_first(**attrs)
@classmethod
def create_or_first(cls, **attrs):
"""Tries to create a new record, and if it fails
because already exists, return the first it founds."""
try:
return cls.create(**attrs)
except IntegrityError:
db.session.rollback()
return cls.first(**attrs)
class Product(Base):
__tablename__ = "products"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
price = db.Column(db.Integer, nullable=False, default=0)
@classmethod
def basequery(cls):
return db.session.query(cls)
@classmethod
def get_all(cls):
return cls.basequery().order_by(cls.name)
class Cart(Base):
__tablename__ = "carts"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, nullable=False)
product_id = db.Column(db.Integer, db.ForeignKey("products.id"), nullable=False)
name = db.Column(db.String, nullable=False, default="")
price = db.Column(db.Integer, nullable=False, default=0)
@classmethod
def basequery(cls, user_id=1):
return db.session.query(cls)
@classmethod
def get_all(cls, user_id=1):
return cls.basequery(user_id).all()
@classmethod
def count(cls, user_id=1):
return cls.basequery(user_id).count()
@classmethod
def add(cls, product, user_id=1):
item = Cart.first_or_create(
product_id=product.id,
user_id=1
)
item.name = product.name
item.price = product.price
db.session.commit()
@classmethod
def remove(cls, product, user_id=1):
item = Cart.first(product_id=product.id, user_id=1)
if item:
db.session.delete(item)
db.session.commit()
def seed_data():
db.create_all()
db.session.add(Product(name="Apple", price="140"))
db.session.add(Product(name="Pear", price="65"))
db.session.add(Product(name="Banana", price="90"))
db.session.commit()
``` |
{
"source": "jpsca/pforms",
"score": 3
} |
#### File: proper_forms/fields/date.py
```python
from .text import Text
from ..ftypes import type_date
__all__ = ("Date", )
class Date(Text):
"""A simple date field formatted as `YYYY-MM-dd`. Example: "1980-07-28".
"""
input_type = "date"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.error_messages.setdefault(
"type", "Date must have a YYYY-MM-dd format."
)
def prepare(self, object_value):
return [object_value.strftime("%Y-%m-%d")]
def type(self, value):
return type_date(value)
```
#### File: proper_forms/fields/date_time.py
```python
from datetime import datetime
from .text import Text
from ..ftypes import type_date, type_time
__all__ = ("DateTime",)
class DateTime(Text):
"""A simple date-time field formatted as `YYYY-MM-dd` with the time in 12 or
24-hours format, seconds optional.
Examples: "1980-07-28 5:03 AM", "2019-09-08 4:20:16 PM", "2019-09-08 16:34".
"""
input_type = "date"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.error_messages.setdefault(
"type",
"DateTime must have a YYYY-MM-dd with time in 12h or 24h format,"
" seconds optional.",
)
def prepare(self, object_value):
return [
self._prepare_date(object_value) + " " + self._prepare_time(object_value)
]
def _prepare_date(self, object_value):
return object_value.strftime("%Y-%m-%d")
def _prepare_time(self, object_value):
value = "{}:{:02d}".format(
object_value.hour if object_value.hour <= 12 else object_value.hour - 12,
object_value.minute,
)
if object_value.second:
value += ":{:02d}".format(object_value.second)
value += object_value.strftime(" %p")
return value
def type(self, value):
if " " not in value:
value += " 00:00" # So it always has a time
date_part, time_part = value.split(" ", maxsplit=1)
return datetime.combine(type_date(date_part), type_time(time_part))
```
#### File: proper_forms/fields/email.py
```python
from .text import Text
from ..ftypes import type_email
__all__ = ("Email", )
class Email(Text):
"""Validates and normalize an email address using the
JoshData/python-email-validator library.
Even if the format is valid, it cannot guarantee that the email is real, so the
purpose of this function is to alert the user of a typing mistake.
The normalizations include lowercasing the domain part of the email address
(domain names are case-insensitive), unicode "NFC" normalization of the whole
address (which turns characters plus combining characters into precomposed
characters where possible and replaces certain unicode characters (such as
angstrom and ohm) with other equivalent code points (a-with-ring and omega,
respectively)), replacement of fullwidth and halfwidth characters in the domain
part, and possibly other UTS46 mappings on the domain part.
Options:
check_dns (bool):
Check if the domain name in the email address resolves.
There is nothing to be gained by trying to actually contact an SMTP server,
so that's not done.
allow_smtputf8 (bool):
Accept non-ASCII characters in the local part of the address
(before the @-sign). These email addresses require that your mail
submission library and the mail servers along the route to the destination,
including your own outbound mail server, all support the
[SMTPUTF8 (RFC 6531)](https://tools.ietf.org/html/rfc6531) extension.
By default this is set to `False`.
"""
input_type = "email"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.error_messages.setdefault("type", "Doesn‘t look like a valid e-mail.")
def type(self, value, check_dns=False, allow_smtputf8=False):
return type_email(value, check_dns=check_dns, allow_smtputf8=allow_smtputf8)
```
#### File: proper_forms/fields/splitted.py
```python
from .text import Text
__all__ = ("Splitted", )
class Splitted(Text):
def __init__(self, *args, **kwargs):
assert not kwargs.get("collection"), "A splitted field cannot be a collection."
super().__init__(*args, **kwargs)
```
#### File: proper_forms/ftypes/boolean.py
```python
__all__ = ("type_boolean", )
default_false_values = ("", "none", "0", "no", "nope", "nah", "off", "false")
def type_boolean(value, false_values=default_false_values):
if value in (False, None):
return False
if str(value).strip().lower() in false_values:
return False
return True
```
#### File: proper_forms/ftypes/email.py
```python
from email_validator import validate_email
__all__ = ("type_email", )
def type_email(value, check_dns=False, allow_smtputf8=False):
"""Validates and normalize an email address using the
JoshData/python-email-validator library.
Even if the format is valid, it cannot guarantee that the email is real. The
purpose of this function is to alert the user of a typing mistake.
The normalizations include lowercasing the domain part of the email address
(domain names are case-insensitive), Unicode "NFC" normalization of the whole
address (which turns characters plus combining characters into precomposed
characters where possible and replaces certain Unicode characters (such as
angstrom and ohm) with other equivalent code points (a-with-ring and omega,
respectively)), replacement of fullwidth and halfwidth characters in the domain
part, and possibly other UTS46 mappings on the domain part.
Options:
check_dns (bool):
Check if the domain name in the email address resolves.
There is nothing to be gained by trying to actually contact an SMTP server,
so that's not done.
allow_smtputf8 (bool):
Accept non-ASCII characters in the local part of the address
(before the @-sign). These email addresses require that your mail
submission library and the mail servers along the route to the destination,
including your own outbound mail server, all support the
[SMTPUTF8 (RFC 6531)](https://tools.ietf.org/html/rfc6531) extension.
By default this is set to `False`.
"""
try:
v = validate_email(
value,
check_deliverability=check_dns,
allow_smtputf8=allow_smtputf8,
)
return v["email"]
except (ValueError, TypeError):
return None
```
#### File: tests/fields/test_fields.py
```python
from datetime import date, datetime, time
import pytest
from proper_forms import fields as f
TEST_DATA = [
(
f.Date,
"1573-09-11",
date(1573, 9, 11),
"invalid",
"Date must have a YYYY-MM-dd format.",
),
(
f.DateTime,
"1573-09-11 16:23",
datetime(1573, 9, 11, 16, 23),
"invalid",
"DateTime must have a YYYY-MM-dd with time in 12h or 24h format,"
" seconds optional.",
),
(
f.DateTime,
"1573-09-11 16:23:15",
datetime(1573, 9, 11, 16, 23, 15),
"invalid",
"DateTime must have a YYYY-MM-dd with time in 12h or 24h format,"
" seconds optional.",
),
(
f.DateTime,
"1573-09-11 4:23 AM",
datetime(1573, 9, 11, 4, 23),
"invalid",
"DateTime must have a YYYY-MM-dd with time in 12h or 24h format,"
" seconds optional.",
),
(
f.DateTime,
"1573-09-11 4:23:15 PM",
datetime(1573, 9, 11, 16, 23, 15),
"invalid",
"DateTime must have a YYYY-MM-dd with time in 12h or 24h format,"
" seconds optional.",
),
(
f.Email,
"<EMAIL>",
"<EMAIL>",
"invalid",
"Doesn‘t look like a valid e-mail.",
),
(
f.HexColor,
"#fef",
"#ffeeff",
"invalid",
"Enter color in #hex, rgb() or rgba() format.",
),
(
f.Integer,
"123",
123,
"invalid",
"Not a valid integer.",
),
(
f.Float,
"123.2",
123.2,
"invalid",
"Not a valid float number.",
),
(
f.Month,
"1973-05",
date(1973, 5, 1),
"invalid",
"Month must have a YYYY-MM format.",
),
(
f.Time,
"5:34 am",
time(5, 34, 0),
"invalid",
"Enter a time in a 12h or 24h format.",
),
(
f.Time,
"5:34:55 am",
time(5, 34, 55),
"invalid",
"Enter a time in a 12h or 24h format.",
),
(
f.Time,
"18:34",
time(18, 34, 0),
"invalid",
"Enter a time in a 12h or 24h format.",
),
(
f.Time,
"18:34:55",
time(18, 34, 55),
"invalid",
"Enter a time in a 12h or 24h format.",
),
(
f.URL,
"example.com",
"http://example.com",
"inv..alid",
"Doesn‘t look like a valid URL.",
),
]
@pytest.mark.parametrize("Field, valid, expected, invalid, error", TEST_DATA)
def test_fields(Field, valid, expected, invalid, error):
field = Field()
field.input_values = [valid]
assert field.validate() == expected
assert field.error is None
assert field.error_value is None
field.input_values = [invalid]
assert field.validate() is None
assert field.error == error
assert field.error_value == invalid
@pytest.mark.parametrize("Field, _valid, _expected, invalid, _error", TEST_DATA)
def test_required_filtered_values(Field, _valid, _expected, invalid, _error):
field = Field(required=True, strict=False)
field.input_values = [invalid]
assert field.validate() is None
assert field.error == "This field is required."
@pytest.mark.parametrize("Field, valid, expected, invalid, error", TEST_DATA)
def test_field_single(Field, valid, expected, invalid, error):
field = Field()
field.input_values = [valid]
assert field.validate() == expected
assert field.error is None
assert field.error_value is None
@pytest.mark.parametrize("Field, valid, expected, invalid, error", TEST_DATA)
def test_fields_multiple(Field, valid, expected, invalid, error):
field = Field(multiple=True)
field.input_values = [valid, valid, valid]
assert field.validate() == [expected, expected, expected]
assert field.error is None
assert field.error_value is None
field.input_values = [valid, invalid, valid]
assert field.validate() is None
assert field.error == error
assert field.error_value == invalid
@pytest.mark.parametrize("Field, valid, _expected, invalid, _error", TEST_DATA)
def test_fields_single_not_strict(Field, valid, _expected, invalid, _error):
field = Field(strict=False)
field.input_values = [invalid]
assert field.validate() is None
assert field.error is None
assert field.error_value is None
@pytest.mark.parametrize("Field, valid, expected, invalid, _error", TEST_DATA)
def test_fields_multiple_not_strict(Field, valid, expected, invalid, _error):
field = Field(multiple=True, strict=False)
field.input_values = [valid, invalid, valid]
assert field.validate() == [expected, expected]
assert field.error is None
assert field.error_value is None
TEST_DATA_PREPARE = [
(f.Date, ["1973-09-11"], date(1973, 9, 11)),
(f.DateTime, ["1973-09-11 4:24 PM"], datetime(1973, 9, 11, 16, 24)),
(f.DateTime, ["1973-09-11 4:24:15 AM"], datetime(1973, 9, 11, 4, 24, 15)),
(f.Integer, ["123"], 123),
(f.Float, ["123.2"], 123.2),
(f.Month, ["1973-05"], date(1973, 5, 1)),
(f.SplittedDateTime, ["1973-09-11", "5:34 AM"], datetime(1973, 9, 11, 5, 34, 0)),
(f.SplittedDateTime, ["1973-09-11", "4:20:17 PM"], datetime(1973, 9, 11, 16, 20, 17)),
(f.Time, ["5:34 AM"], time(5, 34, 0)),
(f.Time, ["4:20:17 PM"], time(16, 20, 17)),
]
@pytest.mark.parametrize("Field, expected, object_value", TEST_DATA_PREPARE)
def test_fields_prepare(Field, expected, object_value):
field = Field()
field.object_value = object_value
assert field.values == expected
assert field.error is None
assert field.error_value is None
@pytest.mark.parametrize("Field, expected, object_value", TEST_DATA_PREPARE)
def test_fields_updated(Field, expected, object_value):
field = Field()
field.input_values = expected
assert field.validate() == object_value
assert field.updated is True
@pytest.mark.parametrize("Field, expected, object_value", TEST_DATA_PREPARE)
def test_fields_not_updated(Field, expected, object_value):
field = Field()
field.input_values = expected
field.object_value = object_value
assert field.validate() == object_value
assert field.updated is False
def test_text():
field = f.Text()
field.input_values = ["lorem", "ipsum"]
assert field.validate() == "lorem"
assert field.error is None
assert field.error_value is None
field = f.Text(multiple=True)
field.input_values = ["lorem", "ipsum"]
assert field.validate() == ["lorem", "ipsum"]
def test_required_text():
field = f.Text(required=True)
field.input_values = []
assert field.validate() is None
assert field.error == "This field is required."
field = f.Text(required=True)
field.input_values = [""]
assert field.validate() is None
assert field.error == "This field is required."
def test_slug():
field = f.Slug()
field.input_values = ["This is a test ---", "meh"]
assert field.validate() == "this-is-a-test"
assert field.error is None
assert field.error_value is None
field = f.Slug(multiple=True, separator="_")
field.input_values = ["lorem ipsum", "This is a test ---"]
assert field.validate() == ["lorem_ipsum", "this_is_a_test"]
def test_boolean():
field = f.Boolean()
field.input_values = ["on"]
assert field.validate() is True
assert field.error is None
assert field.error_value is None
field.input_values = [""]
assert field.validate() is False
assert field.error is None
assert field.error_value is None
def test_boolean_single():
field = f.Boolean()
field.input_values = ["on", "yes", "no"]
assert field.validate() is True
assert field.error is None
assert field.error_value is None
field.input_values = ["", "yes", "1"]
assert field.validate() is False
assert field.error is None
assert field.error_value is None
def test_boolean_multiple():
field = f.Boolean(multiple=True)
field.input_values = ["on", "yes", "no"]
assert field.validate() == [True, True, False]
assert field.error is None
assert field.error_value is None
field.input_values = ["", "yes", "1"]
assert field.validate() == [False, True, True]
assert field.error is None
assert field.error_value is None
def test_splitted_date_time():
field = f.SplittedDateTime()
field.input_values = ["1973-09-11", "5:34 pm"]
assert field.validate() == datetime(1973, 9, 11, 17, 34, 0)
assert field.error is None
assert field.error_value is None
field.input_values = ["1973-09-11"]
assert field.validate() == datetime(1973, 9, 11, 0, 0, 0)
assert field.error is None
assert field.error_value is None
field.input_values = ["invalid"]
assert field.validate() is None
assert field.error == "Invalid type."
assert field.error_value == ("invalid", "00:00")
field.input_values = ["invalid", "5:34 pm"]
assert field.validate() is None
assert field.error == "Invalid type."
assert field.error_value == ("invalid", "5:34 pm")
def test_splitted_date_time_single():
field = f.SplittedDateTime()
field.input_values = ["2018-05-05", "16:30", "2019-05-05", "16:30"]
result = field.validate()
assert result == datetime(2018, 5, 5, 16, 30, 0)
assert field.error is None
assert field.error_value is None
def test_splitted_date_time_multiple():
field = f.SplittedDateTime(multiple=True)
field.input_values = ["2018-05-05", "16:30", "2019-05-05", "16:30"]
result = field.validate()
expected = [datetime(2018, 5, 5, 16, 30, 0), datetime(2019, 5, 5, 16, 30, 0)]
assert result == expected
assert field.error is None
assert field.error_value is None
def test_splitted_date_time_single_not_strict():
field = f.SplittedDateTime(strict=False)
field.input_values = ["invalid"]
assert field.validate() is None
assert field.error is None
assert field.error_value is None
def test_splitted_date_time_multiple_not_strict():
field = f.SplittedDateTime(multiple=True, strict=False)
field.input_values = ["2018-05-05", "16:30", "invalid", "lalala"]
result = field.validate()
expected = [datetime(2018, 5, 5, 16, 30, 0)]
assert result == expected
assert field.error is None
assert field.error_value is None
def test_splitted_fields_cannot_be_a_collection():
with pytest.raises(AssertionError):
f.SplittedDateTime(collection=True)
with pytest.raises(AssertionError):
f.Splitted(collection=True)
def test_splitted_fields_take_all_values():
class MyLittleSplitted(f.Splitted):
def _typecast_values(self, values):
self.called_with = values[:]
return values
field = MyLittleSplitted()
field.input_values = ["a", "b", "c", "d"]
assert field.validate() == "a"
assert field.called_with == ["a", "b", "c", "d"]
```
#### File: tests/fields/test_render_as_checkbox.py
```python
import proper_forms.fields as f
def test_text_as_checkbox():
field = f.Text(name="name")
assert field.as_checkbox() == '<input name="name" type="checkbox">'
def test_text_as_checkbox_with_label():
field = f.Text(name="name")
assert (
field.as_checkbox(label="I have read the TOS")
== '<label class="checkbox"><input name="name" type="checkbox">'
" I have read the TOS</label>"
)
def test_text_as_checkbox_checked():
field = f.Text(name="name")
field.input_values = ["hello"]
assert field.as_checkbox() == '<input name="name" type="checkbox" checked>'
def test_boolean_as_checkbox():
field = f.Boolean(name="name")
assert field.as_checkbox() == '<input name="name" type="checkbox">'
def test_boolean_as_checkbox_checked():
field = f.Boolean(name="name")
field.object_value = True
assert field.as_checkbox() == '<input name="name" type="checkbox" checked>'
def test_boolean_as_checkbox_force_checked():
field = f.Boolean(name="name")
assert (
field.as_checkbox(checked=True) == '<input name="name" type="checkbox" checked>'
)
def test_boolean_as_checkbox_custom_value():
field = f.Boolean(name="name")
assert (
field.as_checkbox(value="newsletter")
== '<input name="name" type="checkbox" value="newsletter">'
)
def test_boolean_as_checkbox_custom_value_checked():
field = f.Boolean(name="name")
field.input_values = ["newsletter"]
assert (
field.as_checkbox(value="newsletter")
== '<input name="name" type="checkbox" value="newsletter" checked>'
)
def test_boolean_as_checkbox_custom_str_value_checked():
field = f.Boolean(name="name")
field.input_values = [5]
assert (
field.as_checkbox(value="5")
== '<input name="name" type="checkbox" value="5" checked>'
)
def test_boolean_as_checkbox_custom_str_reverse_value_checked():
field = f.Boolean(name="name")
field.input_values = ["5"]
assert (
field.as_checkbox(value=5)
== '<input name="name" type="checkbox" value="5" checked>'
)
def test_boolean_as_checkbox_custom_values_checked():
field = f.Boolean(name="name", multiple=True)
field.input_values = ["alerts", "newsletter", "replies"]
assert (
field.as_checkbox(value="newsletter")
== '<input name="name" type="checkbox" value="newsletter" checked>'
)
def test_boolean_as_checkbox_custom_value_unchecked():
field = f.Boolean(name="name")
field.input_values = ["newsletter"]
assert (
field.as_checkbox(value="direct")
== '<input name="name" type="checkbox" value="direct">'
)
def test_boolean_as_checkbox_custom_values_unchecked():
field = f.Boolean(name="name", multiple=True)
field.input_values = ["alerts", "newsletter", "replies"]
assert (
field.as_checkbox(value="direct")
== '<input name="name" type="checkbox" value="direct">'
)
def test_boolean_as_checkbox_custom_value_object_unchecked():
field = f.Boolean(name="name")
field.object_value = True
assert (
field.as_checkbox(value="newsletter")
== '<input name="name" type="checkbox" value="newsletter">'
)
```
#### File: tests/fields/test_render.py
```python
import proper_forms.fields as f
def test_render_attrs():
field = f.Text()
attrs = {
"id": "text1",
"classes": "myclass",
"data_id": 1,
"checked": True,
"ignore": False,
}
assert (
str(field.render_attrs(**attrs))
== 'class="myclass" data-id="1" id="text1" checked'
)
def test_render_attrs_empty():
field = f.Text()
assert str(field.render_attrs()) == ""
def test_render_attrs_bad():
field = f.Text()
assert (
str(field.render_attrs(myattr="a'b\"><script>bad();</script>"))
== 'myattr="a\'b"><script>bad();</script>"'
)
def test_object_value():
field = f.Text(prepare=lambda x: [str(x * 2)])
field.object_value = 2
assert field.values == ["4"]
assert field.value == "4"
def test_input_values():
field = f.Text()
field.input_values = ["hello"]
assert field.values == ["hello"]
assert field.value == "hello"
def test_input_value_over_object_value():
field = f.Text()
field.input_values = ["foo"]
field.object_value = "bar"
assert field.values == ["foo"]
assert field.value == "foo"
def test_render_error():
field = f.Text(required=True)
assert str(field.render_error()) == ""
field.validate()
error = "This field is required."
assert str(field.render_error()) == f'<div class="error">{error}</div>'
assert str(field.render_error("p")) == f'<p class="error">{error}</p>'
assert (
str(field.render_error(classes="errorMessage"))
== f'<div class="errorMessage">{error}</div>'
)
```
#### File: tests/ftypes/test_type_date.py
```python
from datetime import date
import pytest
from proper_forms.ftypes import type_date
VALID_DATES = [
("2019-05-29", date(2019, 5, 29)),
("2019-5-29", date(2019, 5, 29)),
("2999-12-31", date(2999, 12, 31)),
("1970-01-01", date(1970, 1, 1)),
("1970-1-1", date(1970, 1, 1)),
("1-01-1", date(1, 1, 1)),
]
INVALID_DATES = [
"",
"qwertyuiop",
"2019/05/29",
"2019-13-01",
"2019-02-31",
"2019-02-99",
"-02-31",
"02-31",
"999999999-02-13",
]
@pytest.mark.parametrize("value, expected", VALID_DATES)
def test_type_date_valid(value, expected):
assert type_date(value) == expected
@pytest.mark.parametrize("value", INVALID_DATES)
def test_type_date_invalid(value):
assert type_date(value) is None
```
#### File: tests/ftypes/test_type_hex_color.py
```python
import pytest
from proper_forms.ftypes import type_hex_color
VALID_COLORS = [
("#2868c7", "#2868c7"),
("#FFF", "#ffffff"),
("#F2a", "#ff22aa"),
("#f2aa", "#ff22aaaa"),
("123", "#112233"),
("123456", "#123456"),
("rgb(0,0,0)", "#000000"),
("rgb(0, 0 ,0)", "#000000"),
("rgb( 0, 0 ,0 )", "#000000"),
("rgb(40,104,199)", "#2868c7"),
("rgba(40,104,199,0.5)", "#2868c780"),
("rgba(0,0,0)", "#000000"),
]
INVALID_COLORS = [
"",
"qwertyuiop",
"1",
"12",
"12345",
"#f",
"#ff",
"#abcde",
"#g2e",
"#ggff00",
"rgb(256,0,0)",
"rgb(0,-1,0)",
"rgba(0,0,0,9)",
]
@pytest.mark.parametrize("value, expected", VALID_COLORS)
def test_type_hex_color_valid(value, expected):
assert type_hex_color(value) == expected
@pytest.mark.parametrize("value", INVALID_COLORS)
def test_type_hex_color_invalid(value):
assert type_hex_color(value) is None
```
#### File: pforms/tests/test_form.py
```python
import proper_forms as f
def test_declare_form():
class ContactForm(f.Form):
subject = f.Text(required=True)
email = f.Email()
message = f.Text(required=True, error_messages={"required": "write something!"})
form = ContactForm()
assert sorted(form._fields) == [
"email",
"message",
"subject",
]
assert form.updated_fields is None
assert form.subject.name == "subject"
assert form.email.name == "email"
assert form.message.name == "message"
def test_form_independence():
class AForm(f.Form):
afield = f.Text()
form1 = AForm({"afield": "1"})
form2 = AForm({"afield": "2"})
form3 = AForm({"afield": "3"})
assert form1.afield != form2.afield
assert form1.afield != form3.afield
assert form2.afield != form3.afield
assert form1.afield.value == "1"
assert form2.afield.value == "2"
assert form3.afield.value == "3"
def test_form_independence_same_input():
class AForm(f.Form):
afield = f.Text()
input_data = {"afield": "text"}
form1 = AForm(input_data)
form2 = AForm(input_data)
form3 = AForm(input_data)
assert form1.afield != form2.afield
assert form1.afield != form3.afield
assert form2.afield != form3.afield
def test_form_independence_prefixes():
class AForm(f.Form):
afield = f.Text()
input_data = {
f"f1{f.SEP}afield": "1",
f"f2{f.SEP}afield": "2",
f"f3{f.SEP}afield": "3",
}
form1 = AForm(input_data, prefix="f1")
form2 = AForm(input_data, prefix="f2")
form3 = AForm(input_data, prefix="f3")
assert form1.afield != form2.afield
assert form1.afield != form3.afield
assert form2.afield != form3.afield
assert form1.afield.value == "1"
assert form2.afield.value == "2"
assert form3.afield.value == "3"
def test_declare_form_with_prefix():
class ContactForm(f.Form):
subject = f.Text(required=True)
email = f.Email()
message = f.Text(required=True, error_messages={"required": "write something!"})
form = ContactForm(prefix="myform")
assert sorted(form._fields) == [
"email",
"message",
"subject",
]
assert form.subject.name == f"myform{f.SEP}subject"
assert form.email.name == f"myform{f.SEP}email"
assert form.message.name == f"myform{f.SEP}message"
def test_validate_empty_form():
class MyForm(f.Form):
lorem = f.Text()
ipsum = f.Text()
form = MyForm()
assert form.validate() == {"lorem": None, "ipsum": None}
assert form.updated_fields == []
def test_validate_blank_form():
class MyForm(f.Form):
lorem = f.Text()
ipsum = f.Text()
form = MyForm({"lorem": "", "ipsum": ""})
assert form.validate() == {"lorem": "", "ipsum": ""}
assert sorted(form.updated_fields) == ["ipsum", "lorem"]
def test_validate_optional_form():
class MyForm(f.Form):
lorem = f.Text()
ipsum = f.Text()
form = MyForm({"lorem": "foo", "ipsum": "bar"})
assert form.validate() == {"lorem": "foo", "ipsum": "bar"}
assert sorted(form.updated_fields) == ["ipsum", "lorem"]
def test_load_object():
class ContactForm(f.Form):
subject = f.Text(required=True)
email = f.Email()
message = f.Text(required=True)
data = {
"subject": "Hello world",
"email": "<EMAIL>",
"message": "Lorem ipsum.",
}
form = ContactForm({}, data)
assert form.subject.value == data["subject"]
assert form.email.value == data["email"]
assert form.message.value == data["message"]
def test_load_object_instance():
class ContactForm(f.Form):
subject = f.Text(required=True)
email = f.Email()
message = f.Text(required=True)
class MyObject(object):
subject = "Hello world"
email = "<EMAIL>"
message = "Lorem ipsum."
obj = MyObject()
form = ContactForm({}, obj)
assert form.subject.value == obj.subject
assert form.email.value == obj.email
assert form.message.value == obj.message
def test_validate_form_input():
class ContactForm(f.Form):
subject = f.Text(required=True)
email = f.Email()
message = f.Text(required=True)
data = {
"subject": "Hello world",
"email": "<EMAIL>",
"message": "Lorem ipsum.",
}
form = ContactForm(data)
assert form.validate() == data
def test_validate_form_input_required():
class ContactForm(f.Form):
subject = f.Text(required=True)
message = f.Text(required=True)
data = {
"subject": "",
"message": "",
}
form = ContactForm(data)
assert form.validate() is None
def test_do_not_validate_form_object():
class ContactForm(f.Form):
subject = f.Text(required=True)
email = f.Email()
message = f.Text(required=True)
class MyObject(object):
subject = "Hello world"
email = "<EMAIL>"
message = "Lorem ipsum."
obj = MyObject()
form = ContactForm({}, obj)
assert form.validate() is None
def test_validate_form_error():
class ContactForm(f.Form):
subject = f.Text(required=True)
email = f.Email()
message = f.Text(required=True, error_messages={"required": "write something!"})
form = ContactForm({"email": "<EMAIL>"})
assert form.validate() is None
assert form.subject.error == "This field is required."
assert form.message.error == "write something!"
def test_idempotent_valid_is_valid():
class MyForm(f.Form):
lorem = f.Text()
form = MyForm()
assert form.validate()
assert form.validate()
assert form.validate()
def test_idempotent_invalid_is_valid():
class MyForm(f.Form):
lorem = f.Text(required=True)
form = MyForm()
assert form.validate() is None
assert form.validate() is None
assert form.validate() is None
def test_updated_fields_from_empty():
class MyForm(f.Form):
a = f.Text()
b = f.Text()
c = f.Text()
d = f.Text()
form = MyForm({"b": "foo", "d": "bar"})
assert form.validate()
assert sorted(form.updated_fields) == ["b", "d"]
def test_updated_fields_from_object():
class MyForm(f.Form):
a = f.Text()
b = f.Text()
c = f.Text()
d = f.Text()
form = MyForm(
{"a": "a", "b": "new", "c": "c", "d": "new"},
{"a": "a", "b": "b", "c": "c", "d": "d"},
)
assert form.validate()
assert sorted(form.updated_fields) == ["b", "d"]
def test_load_clean_and_prepare():
class MyForm(f.Form):
meh = f.Text()
def prepare_meh(self, object_value):
return [object_value]
def clean_meh(self, pyvalues):
return pyvalues
form = MyForm()
assert form.meh.custom_prepare == form.prepare_meh
assert form.meh.custom_clean == form.clean_meh
def test_dont_overwrite_field_clean_and_prepare():
def field_prepare(self, object_value):
return [object_value]
def field_clean(self, pyvalues):
return pyvalues
class MyForm(f.Form):
meh = f.Text(prepare=field_prepare, clean=field_clean)
def prepare_meh(self, object_value):
return [object_value]
def clean_meh(self, pyvalues):
return pyvalues
form = MyForm()
assert form.meh.custom_prepare == field_prepare
assert form.meh.custom_clean == field_clean
```
#### File: pforms/tests/test_validators.py
```python
from datetime import date, datetime
import pytest
from proper_forms import Date, DateTime, Field, Integer
from proper_forms import validators as v
def test_confirmed_message():
validator = v.Confirmed()
assert validator.message == "Values doesn't match."
assert v.Confirmed(message="custom").message == "custom"
def test_longer_than_message():
validator = v.LongerThan(5)
assert validator.message == "Field must be at least 5 character long."
assert v.LongerThan(5, message="custom").message == "custom"
def test_shorter_than_message():
validator = v.ShorterThan(5)
assert validator.message == "Field cannot be longer than 5 characters."
assert v.ShorterThan(5, message="custom").message == "custom"
def test_less_than_message():
validator = v.LessThan(10)
assert validator.message == "Number must be less than 10."
assert v.LessThan(10, message="custom").message == "custom"
def test_more_than_message():
validator = v.MoreThan(10)
assert validator.message == "Number must be greater than 10."
assert v.MoreThan(10, message="custom").message == "custom"
def test_in_range_message():
validator = v.InRange(1900, 2010)
assert validator.message == "Number must be between 1900 and 2010."
assert v.InRange(1900, 2010, message="custom").message == "custom"
def test_before_message():
dt = datetime(2017, 7, 5)
validator = v.Before(dt)
assert validator.message == "Enter a valid date before 2017-07-05."
assert v.Before(dt, message="custom").message == "custom"
def test_after_message():
dt = datetime(2017, 7, 5)
validator = v.After(dt)
assert validator.message == "Enter a valid date after 2017-07-05."
assert v.After(dt, message="custom").message == "custom"
def test_before_now_message():
validator = v.BeforeNow()
assert validator.message == "Enter a valid date in the past."
assert v.BeforeNow(message="custom").message == "custom"
def test_after_now_message():
validator = v.AfterNow()
assert validator.message == "Enter a valid date in the future."
assert v.AfterNow(message="custom").message == "custom"
DATA = [
[Field, v.Confirmed(), ["password", "password"], True],
[Field, v.Confirmed(), ["password", "password", "password"], True],
[Field, v.Confirmed(), ["password"], False],
[Field, v.Confirmed(), ["lorem", "ipsum"], False],
[Field, v.Confirmed(), ["password", "<PASSWORD>", "password"], False],
[Field, v.LongerThan(5), ["123456789"], True],
[Field, v.LongerThan(5), ["12345"], True],
[Field, v.LongerThan(5), ["abc"], False],
[Field, v.LongerThan(5), ["123456789", "qwertyuiop", "lorem ipsum"], True],
[Field, v.LongerThan(5), ["123456789", "abc", "lorem ipsum"], False],
[Field, v.ShorterThan(5), ["123"], True],
[Field, v.ShorterThan(5), ["12345"], True],
[Field, v.ShorterThan(5), ["qwertyuiop"], False],
[Field, v.ShorterThan(5), ["1234", "abc", "lorem"], True],
[Field, v.ShorterThan(5), ["1234", "abcdefghijk", "lorem"], False],
[Integer, v.LessThan(10), ["8"], True],
[Integer, v.LessThan(10), ["10"], True],
[Integer, v.LessThan(10), ["34"], False],
[Integer, v.LessThan(10), ["4", "3", "5"], True],
[Integer, v.LessThan(10), ["4", "3", "25"], False],
[Integer, v.MoreThan(10), ["20"], True],
[Integer, v.MoreThan(10), ["-1"], False],
[Integer, v.MoreThan(10), ["20", "13", "25"], True],
[Integer, v.MoreThan(10), ["8", "13", "25"], False],
[Integer, v.InRange(1900, 2010), ["1979"], True],
[Integer, v.InRange(1900, 2010), ["1900"], True],
[Integer, v.InRange(1900, 2010), ["2010"], True],
[Integer, v.InRange(1900, 2010), ["1820"], False],
[Integer, v.InRange(1900, 2010), ["3000"], False],
[Integer, v.InRange(1900, 2010), ["-1"], False],
[Integer, v.InRange(1900, 2010), ["1979", "1984", "2009"], True],
[Integer, v.InRange(1900, 2010), ["1979", "1984", "2019"], False],
[Date, v.Before(datetime(2017, 7, 5)), ["1979-05-05"], True],
[Date, v.Before(datetime(2017, 7, 5)), ["2019-07-16"], False],
[Date, v.Before(date(2017, 7, 5)), ["1979-05-05"], True],
[Date, v.Before(date(2017, 7, 5)), ["2019-07-16"], False],
[Date, v.After(datetime(2017, 7, 5)), ["2019-07-16"], True],
[Date, v.After(datetime(2017, 7, 5)), ["1979-05-05"], False],
[Date, v.After(date(2017, 7, 5)), ["2019-07-16"], True],
[Date, v.After(date(2017, 7, 5)), ["1979-05-05"], False],
[Date, v.BeforeNow(), ["1821-07-28"], True],
[Date, v.BeforeNow(), ["3000-01-01"], False],
[Date, v.AfterNow(), ["3000-01-01"], True],
[Date, v.AfterNow(), ["1821-07-28"], False],
[DateTime, v.Before(datetime(2017, 7, 5)), ["1979-05-05"], True],
[DateTime, v.Before(datetime(2017, 7, 5)), ["2019-07-16"], False],
[DateTime, v.Before(date(2017, 7, 5)), ["1979-05-05"], True],
[DateTime, v.Before(date(2017, 7, 5)), ["2019-07-16"], False],
[DateTime, v.After(datetime(2017, 7, 5)), ["2019-07-16"], True],
[DateTime, v.After(datetime(2017, 7, 5)), ["1979-05-05"], False],
[DateTime, v.After(date(2017, 7, 5)), ["2019-07-16"], True],
[DateTime, v.After(date(2017, 7, 5)), ["1979-05-05"], False],
[DateTime, v.BeforeNow(), ["1821-07-28"], True],
[DateTime, v.BeforeNow(), ["3000-01-01"], False],
[DateTime, v.AfterNow(), ["3000-01-01"], True],
[DateTime, v.AfterNow(), ["1821-07-28"], False],
]
@pytest.mark.parametrize("FieldClass, validator, input_values, result", DATA)
def test_validators(FieldClass, validator, input_values, result):
field = FieldClass(validator)
field.input_values = input_values
assert bool(field.validate()) is result
DATE_VALIDATORS = [
v.Before(datetime(2017, 7, 5)),
v.After(datetime(2017, 7, 5)),
v.BeforeNow(),
v.AfterNow(),
]
@pytest.mark.parametrize("validator", DATE_VALIDATORS)
def test_fail_if_not_date(validator):
field = Integer(validator)
field.input_values = ["1979"]
with pytest.raises(AssertionError):
field.validate()
``` |
{
"source": "jpsca/proper-cli",
"score": 4
} |
#### File: proper-cli/proper_cli/helpers.py
```python
def ask(question, default=None, alternatives=None):
"""Ask a question via input() and return their answer.
Arguments:
- question (str): The text of the question.
- default (any): Default value if no answer is provided.
- alternatives (str): Alternatives to display. eg: "Y/n"
"""
ops = alternatives or default
question += f" [{str(ops)}] " if ops else ""
while True:
resp = input(question)
if resp:
return resp
if default is not None:
return default
YES_CHOICES = ("y", "yes", "t", "true", "on", "1")
NO_CHOICES = ("n", "no", "f", "false", "off", "0")
def confirm(question, default=False, yes_choices=YES_CHOICES, no_choices=NO_CHOICES):
"""Ask a yes/no question via proper_cli.ask() and return their answer.
Arguments:
- question (str): Prompt question
- default (bool): Default value if no answer is provided.
- yes_choices (list): Default 'y', 'yes', '1', 'on', 'true', 't'
- no_choices (list): Default 'n', 'no', '0', 'off', 'false', 'f'
"""
yes_choices = yes_choices or YES_CHOICES
no_choices = no_choices or NO_CHOICES
default_value = yes_choices[0] if default else no_choices[0]
if default is None:
options = f"{yes_choices[0]}|{no_choices[0]}"
else:
if default:
options = f"{yes_choices[0].title()}/{no_choices[0]}"
else:
options = f"{yes_choices[0]}/{no_choices[0].title()}"
while True:
resp = ask(question, default_value, options)
if default is not None:
resp = resp or str(default)
resp = resp.lower()
if resp in yes_choices:
return True
if resp in no_choices:
return False
``` |
{
"source": "jpsca/properconf",
"score": 2
} |
#### File: properconf/tests/test_configdict.py
```python
import pytest
from properconf import ConfigDict
def test_dict_init():
config = ConfigDict({"a": 1, "B": 2, "foo": {"B": {"a": "r"}}})
assert config.a == 1
assert config.foo == {"B": {"a": "r"}}
assert config.foo.B.a == "r"
def test_iter_init():
config = ConfigDict([("a", 1), ("B", 2), ("foo", {"B": {"a": "r"}})])
assert config.a == 1
assert config.foo == {"B": {"a": "r"}}
assert config.foo.B.a == "r"
def test_do_not_set_attributes():
config = ConfigDict()
with pytest.raises(AttributeError):
config.foo = "bar"
def test_can_set_underscore_attributes():
config = ConfigDict()
config._foo = "bar"
assert config._foo == "bar"
def test_deep_update():
config = ConfigDict(
{
"auth": {"hash": "sha1", "rounds": 123},
"users": ["foo", "bar"],
"a": 1,
"foo": "bar",
}
)
config.update(
{
"auth": {"hash": "argon2"},
"users": ["lorem", "ipsum"],
"a": 2,
"fizz": {"buzz": 3},
}
)
assert config == {
"auth": {"hash": "argon2", "rounds": 123},
"users": ["lorem", "ipsum"],
"a": 2,
"foo": "bar",
"fizz": {"buzz": 3},
}
def test_dot_manual_update_diallowed():
config = ConfigDict({"a": {"b": "c"}})
with pytest.raises(AttributeError):
config.a.b = "x"
def test_manual_update():
config = ConfigDict({"a": {"b": "c"}})
config["a"]["b"] = "x"
assert config.a.b == "x"
config["a"] = "z"
assert config.a == "z"
``` |
{
"source": "jpsca/pyceo",
"score": 2
} |
#### File: pyceo/pastel/pastel.py
```python
import re
import sys
from contextlib import contextmanager
from .stack import StyleStack
from .style import Style
class Pastel(object):
TAG_REGEX = "[a-z][a-z0-9,_=;-]*"
FULL_TAG_REGEX = re.compile("(?isx)<(({}) | /({})?)>".format(TAG_REGEX, TAG_REGEX))
def __init__(self, colorized=False):
self._colorized = colorized
self._style_stack = StyleStack()
self._styles = {}
self.add_style("error", "white", "red")
self.add_style("info", "green")
self.add_style("comment", "yellow")
self.add_style("question", "black", "cyan")
@classmethod
def escape(cls, text):
return re.sub("(?is)([^\\\\]?)<", "\\1\\<", text)
@contextmanager
def colorized(self, colorized=None):
is_colorized = self.is_colorized()
if colorized is None:
colorized = sys.stdout.isatty() and is_colorized
self.with_colors(colorized)
yield
self.with_colors(is_colorized)
def with_colors(self, colorized):
self._colorized = colorized
def is_colorized(self):
return self._colorized
def add_style(self, name, fg=None, bg=None, options=None):
style = Style(fg, bg, options)
self._styles[name] = style
def has_style(self, name):
return name in self._styles
def style(self, name):
if self.has_style(name):
return self._styles[name]
def remove_style(self, name):
if not self.has_style(name):
raise ValueError("Invalid style {}".format(name))
del self._styles[name]
def colorize(self, message): # noqa
output = ""
tags = []
i = 0
for m in self.FULL_TAG_REGEX.finditer(message):
if i > 0:
p = tags[i - 1]
tags[i - 1] = (p[0], p[1], p[2], p[3], m.start(0))
tags.append((m.group(0), m.end(0), m.group(1), m.group(3), None))
i += 1
if not tags:
return message.replace("\\<", "<")
offset = 0
for t in tags:
prev_offset = offset
offset = t[1]
endpos = t[4] if t[4] else -1
text = t[0]
if prev_offset < offset - len(text):
output += self._apply_current_style(
message[prev_offset : offset - len(text)]
)
if offset != 0 and "\\" == message[offset - len(text) - 1]:
output += self._apply_current_style(text)
continue
# opening tag?
open = "/" != text[1]
if open:
tag = t[2]
else:
tag = t[3] if t[3] else ""
style = self._create_style_from_string(tag.lower())
if not open and not tag:
# </>
self._style_stack.pop()
elif style is False:
output += self._apply_current_style(text)
elif open:
self._style_stack.push(style)
else:
self._style_stack.pop(style)
# add the text up to the next tag
output += self._apply_current_style(message[offset:endpos])
offset += len(message[offset:endpos])
output += self._apply_current_style(message[offset:])
return output.replace("\\<", "<")
def _create_style_from_string(self, string):
if string in self._styles:
return self._styles[string]
matches = re.findall("([^=]+)=([^;]+)(;|$)", string.lower())
if not len(matches):
return False
style = Style()
for match in matches:
if match[0] == "fg":
style.set_foreground(match[1])
elif match[0] == "bg":
style.set_background(match[1])
else:
try:
for option in match[1].split(","):
style.set_option(option.strip())
except ValueError:
return False
return style
def _apply_current_style(self, text):
if self.is_colorized() and len(text):
return self._style_stack.get_current().apply(text)
else:
return text
```
#### File: pyceo/pastel/stack.py
```python
from .style import Style
class StyleStack(object):
def __init__(self, empty_style=None):
self.empty_style = empty_style or Style()
self.reset()
def reset(self):
self.styles = list()
def push(self, style):
self.styles.append(style)
def pop(self, style=None):
if not len(self.styles):
return self.empty_style
if not style:
return self.styles.pop()
for i, stacked_style in enumerate(reversed(self.styles)):
if style == stacked_style:
self.styles = self.styles[: len(self.styles) - 1 - i]
return stacked_style
raise ValueError("Incorrectly nested style tag found.")
def get_current(self):
if not len(self.styles):
return self.empty_style
return self.styles[-1]
``` |
{
"source": "jpsca/sqla-wrapper",
"score": 2
} |
#### File: sqla-wrapper/tests/test_base_model.py
```python
def test_fill(dbs, TestModelA):
obj = dbs.create(TestModelA, title="Remember")
obj.fill(title="lorem ipsum")
dbs.commit()
updated = dbs.first(TestModelA)
assert updated.title == "lorem ipsum"
``` |
{
"source": "jpsca/texteditor",
"score": 3
} |
#### File: texteditor/tests/test_texteditor.py
```python
import os
from unittest.mock import MagicMock
import pytest
import texteditor
from texteditor import EDITOR
def test_EDITOR_used():
os.environ[EDITOR] = "/path/to/myeditor"
_run = texteditor.run
texteditor.run = MagicMock()
texteditor.open()
args, kw = texteditor.run.call_args
cmd = args[0]
assert cmd[0] == "/path/to/myeditor"
assert cmd[-1].endswith(".txt") # the filename
texteditor.run = _run
def test_EDITOR_with_args():
os.environ[EDITOR] = "/path/to/myeditor --wait"
_run = texteditor.run
texteditor.run = MagicMock()
texteditor.open()
args, kw = texteditor.run.call_args
cmd = args[0]
assert cmd[0] == "/path/to/myeditor"
assert cmd[1] == "--wait"
assert cmd[-1].endswith(".txt") # the filename
texteditor.run = _run
def test_EDITOR_with_args_and_spaces():
os.environ[EDITOR] = "/path\\ to/myeditor --wait -n"
_run = texteditor.run
texteditor.run = MagicMock()
texteditor.open()
args, kw = texteditor.run.call_args
cmd = args[0]
assert cmd[0] == "/path to/myeditor"
assert cmd[1] == "--wait"
assert cmd[2] == "-n"
assert cmd[-1].endswith(".txt") # the filename
texteditor.run = _run
def test_EDITOR_with_quoted_cmd():
os.environ[EDITOR] = '"/path to/myeditor" --wait'
_run = texteditor.run
texteditor.run = MagicMock()
texteditor.open()
args, _ = texteditor.run.call_args
cmd = args[0]
assert cmd[0] == "/path to/myeditor"
texteditor.run = _run
def test_set_extension():
os.environ[EDITOR] = "myeditor"
_run = texteditor.run
texteditor.run = MagicMock()
texteditor.open(extension="md")
args, kw = texteditor.run.call_args
cmd = args[0]
assert cmd[0] == "myeditor"
assert cmd[-1].endswith(".md") # the filename
texteditor.run = _run
def test_use_filename():
os.environ[EDITOR] = "myeditor"
texteditor.run = MagicMock()
_run = texteditor.run
texteditor.open(filename="README.md")
args, kw = texteditor.run.call_args
cmd = args[0]
assert cmd[0] == "myeditor"
assert cmd[-1].endswith("README.md") # the filename
texteditor.run = _run
def test_get_editor():
os.environ[EDITOR] = ""
texteditor.run = MagicMock()
texteditor.open()
args, kw = texteditor.run.call_args
cmd = args[0]
assert cmd[0]
def test_no_editor_available():
os.environ[EDITOR] = ""
def find_nothing(_):
return None
_which = texteditor.which
texteditor.which = find_nothing
_run = texteditor.run
texteditor.run = MagicMock()
# inconceivable!
with pytest.raises(RuntimeError):
texteditor.open()
texteditor.which = _which
texteditor.run = _run
``` |
{
"source": "jpsecher/py-cli",
"score": 3
} |
#### File: py-cli/src/cli.py
```python
import argparse
from typing import Sequence
class CLI:
def __init__(self, description: str, args: Sequence[str]):
self._cli_args = args
self._parser = argparse.ArgumentParser(description=description)
def set_up_log(self) -> None:
pass
def logfile(self) -> str:
# TODO: yyyy-mm-dd/hh-mm-ss-hash-name.log
return self._args.logdir + '/ost.log'
def epilog(self) -> str:
return self._args.logdir
def parse_args(self) -> None:
self._parser.add_argument(
'-V', '--version', action='count', help='show application version')
self._parser.add_argument(
'-v', '--verbose', action='count', default=0,
help='Verbose output (repeat to increase)')
self._parser.add_argument(
'--logdir', default='logs', metavar='DIR',
help='logs are stored in this directory')
self._args = self._parser.parse_args(args=self._cli_args)
```
#### File: py-cli/src/test_cli.py
```python
from app import App
from constants import APP_VERSION
import pytest
import re
#from os import path
def test_unknown_arg(capfd):
# -----------------------------------------------------------------------
with pytest.raises(SystemExit):
App(['--unknown-arg'])
# -----------------------------------------------------------------------
out, err = capfd.readouterr()
assert not out
assert re.findall('usage:', err)
def test_no_args_prints_usage(capfd):
# -----------------------------------------------------------------------
with pytest.raises(SystemExit):
App([])
# -----------------------------------------------------------------------
out, err = capfd.readouterr()
assert not err
assert re.findall('usage:', out)
def test_version(capfd):
# -----------------------------------------------------------------------
App(['-V']).run()
# -----------------------------------------------------------------------
out, err = capfd.readouterr()
assert not err
assert out == f'{APP_VERSION}\n'
def test_verbose_on(capfd):
# -----------------------------------------------------------------------
App(['-v', '-V']).run()
# -----------------------------------------------------------------------
out, err = capfd.readouterr()
assert not err
assert out == 'Trivial command-line application v0.1\n0.1\n'
def test_logfile_location(tmp_path):
# -----------------------------------------------------------------------
app = App(['--logdir', str(tmp_path), '-V'])
# -----------------------------------------------------------------------
assert re.match(str(tmp_path), app.logfile())
# def test_logfile_created(tmp_path):
# # -----------------------------------------------------------------------
# App(['--logdir', str(tmp_path), '-V']).run()
# # -----------------------------------------------------------------------
# assert
# def test_create_default(tmp_path):
# file = tmp_path / 'myfile'
# # ----------------------------------------------------------------------
# fs.create(file)
# # ----------------------------------------------------------------------
# assert path.exists(file)
``` |
{
"source": "jpsecher/pyvisa",
"score": 2
} |
#### File: pyvisa/ctwrapper/cthelper.py
```python
import ctypes
import os
import sys
if sys.platform == "win32":
FUNCTYPE, Library = ctypes.WINFUNCTYPE, ctypes.WinDLL
else:
FUNCTYPE, Library = ctypes.CFUNCTYPE, ctypes.CDLL
# On Linux, find Library returns the name not the path.
# This excerpt provides a modified find_library.
if os.name == "posix" and sys.platform.startswith("linux"):
# Andreas Degert's find functions, using gcc, /sbin/ldconfig, objdump
def define_find_libary():
import errno
import re
import tempfile
def _findlib_gcc(name):
expr = r"[^\(\)\s]*lib%s\.[^\(\)\s]*" % re.escape(name)
fdout, ccout = tempfile.mkstemp()
os.close(fdout)
cmd = (
"if type gcc >/dev/null 2>&1; then CC=gcc; else CC=cc; fi;"
"$CC -Wl,-t -o " + ccout + " 2>&1 -l" + name
)
trace = ""
try:
f = os.popen(cmd)
trace = f.read()
f.close()
finally:
try:
os.unlink(ccout)
except OSError as e:
if e.errno != errno.ENOENT:
raise
res = re.search(expr, trace)
if not res:
return None
return res.group(0)
def _findlib_ldconfig(name):
# NOTE: assuming GLIBC's ldconfig (with option -p)
expr = r"/[^\(\)\s]*lib%s\.[^\(\)\s]*" % re.escape(name)
with os.popen("/sbin/ldconfig -p 2>/dev/null") as pipe:
res = re.search(expr, pipe.read())
if not res:
# Hm, this works only for libs needed by the python executable.
cmd = "ldd %s 2>/dev/null" % sys.executable
with os.popen(cmd) as pipe:
res = re.search(expr, pipe.read())
if not res:
return None
return res.group(0)
def _find_library(name):
path = _findlib_ldconfig(name) or _findlib_gcc(name)
if path:
return os.path.realpath(path)
return path
return _find_library
find_library = define_find_libary()
else:
from ctypes.util import find_library
__all__ = ["find_library", "FUNCTYPE", "Library"]
```
#### File: pyvisa/ctwrapper/functions.py
```python
import warnings
from contextlib import contextmanager
from ctypes import (
POINTER,
byref,
c_double,
c_long,
c_void_p,
c_wchar_p,
create_string_buffer,
)
from functools import update_wrapper
from threading import Lock
from typing import Any, Callable, Optional, Tuple
from pyvisa import attributes, constants, ctwrapper, typing
from pyvisa.highlevel import ResourceInfo
from . import types
from .types import (
FUNCTYPE,
ViAccessMode,
ViAChar,
ViAddr,
ViAttr,
ViAttrState,
ViAUInt8,
ViAUInt16,
ViAUInt32,
ViAUInt64,
ViBoolean,
ViBuf,
ViBusAddress,
ViBusAddress64,
ViBusSize,
ViEvent,
ViEventFilter,
ViEventType,
ViFindList,
ViHndlr,
ViInt16,
ViJobId,
ViKeyId,
ViObject,
ViPAddr,
ViPBuf,
ViPBusAddress,
ViPEvent,
ViPEventType,
ViPFindList,
ViPJobId,
ViPSession,
ViPUInt8,
ViPUInt16,
ViPUInt32,
ViPUInt64,
ViRsrc,
ViSession,
ViStatus,
ViString,
ViUInt8,
ViUInt16,
ViUInt32,
ViUInt64,
buffer_to_text,
)
visa_functions = [
"assert_interrupt_signal",
"assert_trigger",
"assert_utility_signal",
"buffer_read",
"buffer_write",
"clear",
"close",
"disable_event",
"discard_events",
"enable_event",
"_find_next",
"_find_resources",
"flush",
"get_attribute",
"gpib_command",
"gpib_control_atn",
"gpib_control_ren",
"gpib_pass_control",
"gpib_send_ifc",
"in_16",
"in_32",
"in_8",
"install_handler",
"lock",
"map_address",
"map_trigger",
"memory_allocation",
"memory_free",
"move",
"move_asynchronously",
"move_in_16",
"move_in_32",
"move_in_8",
"move_out_16",
"move_out_32",
"move_out_8",
"open",
"open_default_resource_manager",
"out_16",
"out_32",
"out_8",
"parse_resource",
"parse_resource_extended",
"peek_16",
"peek_32",
"peek_8",
"poke_16",
"poke_32",
"poke_8",
"read",
"read_to_file",
"read_stb",
"set_attribute",
"set_buffer",
"status_description",
"terminate",
"uninstall_handler",
"unlock",
"unmap_address",
"unmap_trigger",
"usb_control_in",
"usb_control_out",
"vxi_command_query",
"wait_on_event",
"write",
"write_asynchronously",
"write_from_file",
"in_64",
"move_in_64",
"out_64",
"move_out_64",
"poke_64",
"peek_64",
]
__all__ = ["visa_functions", "set_signatures"] + visa_functions
VI_SPEC_VERSION = 0x00300000
#: Global lock to ensure that we cannot have one thread change the type while
#: another is trying to interact with VISA
ViHndlr_lock = Lock()
@contextmanager
def set_user_handle_type(library, user_handle: Any):
"""Set the type of the user handle to install and uninstall handler signature.
Parameters
----------
library :
The visa library wrapped by ctypes.
user_handle :
User handle used when registering an event handler. Use None for a void_p.
"""
with ViHndlr_lock:
# Actually, it's not necessary to change ViHndlr *globally*. However,
# I don't want to break symmetry too much with all the other VPP43
# routines.
global ViHndlr
if user_handle is None:
user_handle_p = c_void_p
else:
user_handle_p = POINTER(type(user_handle)) # type: ignore
ViHndlr = FUNCTYPE(ViStatus, ViSession, ViEventType, ViEvent, user_handle_p)
library.viInstallHandler.argtypes = [
ViSession,
ViEventType,
ViHndlr,
user_handle_p,
]
library.viUninstallHandler.argtypes = [
ViSession,
ViEventType,
ViHndlr,
user_handle_p,
]
yield
def set_signatures(
library, errcheck: Optional[Callable[[int, Callable, tuple], int]] = None
):
"""Set the signatures of most visa functions in the library.
All instrumentation related functions are specified here.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
The visa library wrapped by ctypes.
errcheck : Optional[Callable[[int, Callable, tuple], int]]
Error checking callable used for visa functions that return ViStatus.
It should be take three arguments (result, func, arguments).
See errcheck in ctypes.
"""
# Somehow hasattr(library, '_functions') segfaults in cygwin (See #131)
if "_functions" not in dir(library):
library._functions = []
library._functions_failed = []
def _applier(restype, errcheck_):
def _internal(function_name, argtypes, required=False):
try:
set_signature(library, function_name, argtypes, restype, errcheck_)
library._functions.append(function_name)
except AttributeError:
library._functions_failed.append(function_name)
if required:
raise
return _internal
# Visa functions with ViStatus return code
apply = _applier(ViStatus, errcheck)
apply("viAssertIntrSignal", [ViSession, ViInt16, ViUInt32])
apply("viAssertTrigger", [ViSession, ViUInt16])
apply("viAssertUtilSignal", [ViSession, ViUInt16])
apply("viBufRead", [ViSession, ViPBuf, ViUInt32, ViPUInt32])
apply("viBufWrite", [ViSession, ViBuf, ViUInt32, ViPUInt32])
apply("viClear", [ViSession])
apply("viClose", [ViObject])
apply("viDisableEvent", [ViSession, ViEventType, ViUInt16])
apply("viDiscardEvents", [ViSession, ViEventType, ViUInt16])
apply("viEnableEvent", [ViSession, ViEventType, ViUInt16, ViEventFilter])
apply("viFindNext", [ViSession, ViAChar])
apply("viFindRsrc", [ViSession, ViString, ViPFindList, ViPUInt32, ViAChar])
apply("viFlush", [ViSession, ViUInt16])
apply("viGetAttribute", [ViObject, ViAttr, c_void_p])
apply("viGpibCommand", [ViSession, ViBuf, ViUInt32, ViPUInt32])
apply("viGpibControlATN", [ViSession, ViUInt16])
apply("viGpibControlREN", [ViSession, ViUInt16])
apply("viGpibPassControl", [ViSession, ViUInt16, ViUInt16])
apply("viGpibSendIFC", [ViSession])
apply("viIn8", [ViSession, ViUInt16, ViBusAddress, ViPUInt8])
apply("viIn16", [ViSession, ViUInt16, ViBusAddress, ViPUInt16])
apply("viIn32", [ViSession, ViUInt16, ViBusAddress, ViPUInt32])
apply("viIn64", [ViSession, ViUInt16, ViBusAddress, ViPUInt64])
apply("viIn8Ex", [ViSession, ViUInt16, ViBusAddress64, ViPUInt8])
apply("viIn16Ex", [ViSession, ViUInt16, ViBusAddress64, ViPUInt16])
apply("viIn32Ex", [ViSession, ViUInt16, ViBusAddress64, ViPUInt32])
apply("viIn64Ex", [ViSession, ViUInt16, ViBusAddress64, ViPUInt64])
apply("viInstallHandler", [ViSession, ViEventType, ViHndlr, ViAddr])
apply("viLock", [ViSession, ViAccessMode, ViUInt32, ViKeyId, ViAChar])
apply(
"viMapAddress",
[ViSession, ViUInt16, ViBusAddress, ViBusSize, ViBoolean, ViAddr, ViPAddr],
)
apply("viMapTrigger", [ViSession, ViInt16, ViInt16, ViUInt16])
apply("viMemAlloc", [ViSession, ViBusSize, ViPBusAddress])
apply("viMemFree", [ViSession, ViBusAddress])
apply(
"viMove",
[
ViSession,
ViUInt16,
ViBusAddress,
ViUInt16,
ViUInt16,
ViBusAddress,
ViUInt16,
ViBusSize,
],
)
apply(
"viMoveAsync",
[
ViSession,
ViUInt16,
ViBusAddress,
ViUInt16,
ViUInt16,
ViBusAddress,
ViUInt16,
ViBusSize,
ViPJobId,
],
)
apply("viMoveIn8", [ViSession, ViUInt16, ViBusAddress, ViBusSize, ViAUInt8])
apply("viMoveIn16", [ViSession, ViUInt16, ViBusAddress, ViBusSize, ViAUInt16])
apply("viMoveIn32", [ViSession, ViUInt16, ViBusAddress, ViBusSize, ViAUInt32])
apply("viMoveIn64", [ViSession, ViUInt16, ViBusAddress, ViBusSize, ViAUInt64])
apply("viMoveIn8Ex", [ViSession, ViUInt16, ViBusAddress64, ViBusSize, ViAUInt8])
apply("viMoveIn16Ex", [ViSession, ViUInt16, ViBusAddress64, ViBusSize, ViAUInt16])
apply("viMoveIn32Ex", [ViSession, ViUInt16, ViBusAddress64, ViBusSize, ViAUInt32])
apply("viMoveIn64Ex", [ViSession, ViUInt16, ViBusAddress64, ViBusSize, ViAUInt64])
apply("viMoveOut8", [ViSession, ViUInt16, ViBusAddress, ViBusSize, ViAUInt8])
apply("viMoveOut16", [ViSession, ViUInt16, ViBusAddress, ViBusSize, ViAUInt16])
apply("viMoveOut32", [ViSession, ViUInt16, ViBusAddress, ViBusSize, ViAUInt32])
apply("viMoveOut64", [ViSession, ViUInt16, ViBusAddress, ViBusSize, ViAUInt64])
apply("viMoveOut8Ex", [ViSession, ViUInt16, ViBusAddress64, ViBusSize, ViAUInt8])
apply("viMoveOut16Ex", [ViSession, ViUInt16, ViBusAddress64, ViBusSize, ViAUInt16])
apply("viMoveOut32Ex", [ViSession, ViUInt16, ViBusAddress64, ViBusSize, ViAUInt32])
apply("viMoveOut64Ex", [ViSession, ViUInt16, ViBusAddress64, ViBusSize, ViAUInt64])
apply(
"viOpen", [ViSession, ViRsrc, ViAccessMode, ViUInt32, ViPSession], required=True
)
apply("viOpenDefaultRM", [ViPSession], required=True)
apply("viOut8", [ViSession, ViUInt16, ViBusAddress, ViUInt8])
apply("viOut16", [ViSession, ViUInt16, ViBusAddress, ViUInt16])
apply("viOut32", [ViSession, ViUInt16, ViBusAddress, ViUInt32])
apply("viOut64", [ViSession, ViUInt16, ViBusAddress, ViUInt64])
apply("viOut8Ex", [ViSession, ViUInt16, ViBusAddress64, ViUInt8])
apply("viOut16Ex", [ViSession, ViUInt16, ViBusAddress64, ViUInt16])
apply("viOut32Ex", [ViSession, ViUInt16, ViBusAddress64, ViUInt32])
apply("viOut64Ex", [ViSession, ViUInt16, ViBusAddress64, ViUInt64])
apply("viParseRsrc", [ViSession, ViRsrc, ViPUInt16, ViPUInt16])
apply(
"viParseRsrcEx",
[ViSession, ViRsrc, ViPUInt16, ViPUInt16, ViAChar, ViAChar, ViAChar],
)
apply("viRead", [ViSession, ViPBuf, ViUInt32, ViPUInt32])
apply("viReadAsync", [ViSession, ViPBuf, ViUInt32, ViPJobId])
apply("viReadSTB", [ViSession, ViPUInt16])
apply("viReadToFile", [ViSession, ViString, ViUInt32, ViPUInt32])
apply("viSetAttribute", [ViObject, ViAttr, ViAttrState])
apply("viSetBuf", [ViSession, ViUInt16, ViUInt32])
apply("viStatusDesc", [ViObject, ViStatus, ViAChar])
apply("viTerminate", [ViSession, ViUInt16, ViJobId])
apply("viUninstallHandler", [ViSession, ViEventType, ViHndlr, ViAddr])
apply("viUnlock", [ViSession])
apply("viUnmapAddress", [ViSession])
apply("viUnmapTrigger", [ViSession, ViInt16, ViInt16])
apply(
"viUsbControlIn",
[ViSession, ViInt16, ViInt16, ViUInt16, ViUInt16, ViUInt16, ViPBuf, ViPUInt16],
)
apply(
"viUsbControlOut",
[ViSession, ViInt16, ViInt16, ViUInt16, ViUInt16, ViUInt16, ViPBuf],
)
# The following "V" routines are *not* implemented in PyVISA, and will
# never be: viVPrintf, viVQueryf, viVScanf, viVSPrintf, viVSScanf
apply("viVxiCommandQuery", [ViSession, ViUInt16, ViUInt32, ViPUInt32])
apply("viWaitOnEvent", [ViSession, ViEventType, ViUInt32, ViPEventType, ViPEvent])
apply("viWrite", [ViSession, ViBuf, ViUInt32, ViPUInt32])
apply("viWriteAsync", [ViSession, ViBuf, ViUInt32, ViPJobId])
apply("viWriteFromFile", [ViSession, ViString, ViUInt32, ViPUInt32])
# Functions that return void.
apply = _applier(None, None)
apply("viPeek8", [ViSession, ViAddr, ViPUInt8])
apply("viPeek16", [ViSession, ViAddr, ViPUInt16])
apply("viPeek32", [ViSession, ViAddr, ViPUInt32])
apply("viPeek64", [ViSession, ViAddr, ViPUInt64])
apply("viPoke8", [ViSession, ViAddr, ViUInt8])
apply("viPoke16", [ViSession, ViAddr, ViUInt16])
apply("viPoke32", [ViSession, ViAddr, ViUInt32])
apply("viPoke64", [ViSession, ViAddr, ViUInt64])
def set_signature(
library,
function_name: str,
argtypes: tuple,
restype,
errcheck: Optional[Callable[[int, Callable, tuple], int]],
):
"""Set the signature of single function in a library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
function_name : str
Name of the function as appears in the header file.
argtypes : tuple
ctypes types to specify the argument types that the function accepts.
restype :
A ctypes type to specify the result type of the foreign function.
Use None for void, a function not returning anything.
errcheck : Optional[Callable[[int, Callable, tuple], int]]
Error checking callable used for visa functions that return ViStatus.
It should be take three arguments (result, func, arguments).
See errcheck in ctypes.
"""
func = getattr(library, function_name)
func.argtypes = argtypes
if restype is not None:
func.restype = restype
if errcheck is not None:
func.errcheck = errcheck
# The VPP-4.3.2 routines
# Usually, there is more than one way to pass parameters to ctypes calls. The
# ctypes policy used in this code goes as follows:
#
# * Null pointers are passed as "None" rather than "0". This is a little bit
# unfortunate, since the VPP specification calls this "VI_NULL", but I can't
# use "VI_NULL" since it's an integer and may not be compatible with a
# pointer type (don't know whether this is really dangerous).
#
# * Strings must have been created with "create_string_buffer" and are passed
# without any further conversion; they stand in the parameter list as is.
# The same applies to pseudo-string types as ViRsrc or VuBuf. Their Pythonic
# counterpats are strings as well.
#
# * All other types are explicitly cast using the types defined by ctypes'
# "restype".
#
# Further notes:
#
# * The following Python routines take and give handles as ctypes objects.
# Since the user shouldn't be interested in handle values anyway, I see no
# point in converting them to Python strings or integers.
#
# * All other parameters are natural Python types, i.e. strings (may contain
# binary data) and integers. The same is true for return values.
#
# * The original VPP function signatures cannot be realised in Python, at least
# not in a sensible way, because a) Python has no real call-by-reference, and
# b) Python allows for more elegant solutions, e.g. using len(buffer) instead
# of a separate "count" parameter, or using tuples as return values.
#
# Therefore, all function signatures have been carefully adjusted. I think
# this is okay, since the original standard must be adopted to at least C and
# Visual Basic anyway, with slight modifications. I also made the function
# names and parameters more legible, but in a way that it's perfectly clear
# which original function is meant.
#
# The important thing is that the semantics of functions and parameters are
# totally intact, and the inner order of parameters, too. There is a 1:1
# mapping.
def assert_interrupt_signal(library, session, mode, status_id):
"""Asserts the specified interrupt or signal.
Corresponds to viAssertIntrSignal function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
mode : constants.AssertSignalInterrupt
How to assert the interrupt.
status_id : int
Status value to be presented during an interrupt acknowledge cycle.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viAssertIntrSignal(session, mode, status_id)
def assert_trigger(library, session, protocol):
"""Assert software or hardware trigger.
Corresponds to viAssertTrigger function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
protocol : constants.TriggerProtocol
Trigger protocol to use during assertion.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viAssertTrigger(session, protocol)
def assert_utility_signal(library, session, line):
"""Assert or deassert the specified utility bus signal.
Corresponds to viAssertUtilSignal function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
line : constants.UtilityBusSignal
Specifies the utility bus signal to assert.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viAssertUtilSignal(session, line)
def buffer_read(library, session, count):
"""Reads data through the use of a formatted I/O read buffer.
The data can be read from a device or an interface.
Corresponds to viBufRead function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
count : int
Number of bytes to be read.
Returns
-------
dbytes
Data read
constants.StatusCode
Return value of the library call.
"""
buffer = create_string_buffer(count)
return_count = ViUInt32()
ret = library.viBufRead(session, buffer, count, byref(return_count))
return buffer.raw[: return_count.value], ret
def buffer_write(library, session, data):
"""Writes data to a formatted I/O write buffer synchronously.
Corresponds to viBufWrite function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
data : bytes
Data to be written.
Returns
-------
int
number of written bytes
constants.StatusCode
return value of the library call.
"""
return_count = ViUInt32()
# [ViSession, ViBuf, ViUInt32, ViPUInt32]
ret = library.viBufWrite(session, data, len(data), byref(return_count))
return return_count.value, ret
def clear(library, session):
"""Clears a device.
Corresponds to viClear function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viClear(session)
def close(library, session):
"""Closes the specified session, event, or find list.
Corresponds to viClose function of the VISA library.
Parameters
---------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : Union[VISASession, VISAEventContext, VISARMSession]
Unique logical identifier to a session, event, resource manager.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viClose(session)
def disable_event(library, session, event_type, mechanism):
"""Disable notification for an event type(s) via the specified mechanism(s).
Corresponds to viDisableEvent function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
event_type : constants.EventType
Event type.
mechanism : constants.EventMechanism
Event handling mechanisms to be disabled.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viDisableEvent(session, event_type, mechanism)
def discard_events(library, session, event_type, mechanism):
"""Discard event occurrences for a given type and mechanisms in a session.
Corresponds to viDiscardEvents function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
event_type : constans.EventType
Logical event identifier.
mechanism : constants.EventMechanism
Specifies event handling mechanisms to be discarded.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viDiscardEvents(session, event_type, mechanism)
def enable_event(library, session, event_type, mechanism, context=None):
"""Enable event occurrences for specified event types and mechanisms in a session.
Corresponds to viEnableEvent function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
event_type : constants.EventType
Logical event identifier.
mechanism : constants.EventMechanism
Specifies event handling mechanisms to be enabled.
context : None, optional
Unused parameter...
Returns
-------
constants.StatusCode
Return value of the library call.
"""
if context is None:
context = constants.VI_NULL
elif context != constants.VI_NULL:
warnings.warn("In enable_event, context will be set VI_NULL.")
context = constants.VI_NULL # according to spec VPP-4.3, section 3.7.3.1
return library.viEnableEvent(session, event_type, mechanism, context)
def _find_next(library, find_list: ViFindList) -> Tuple[str, constants.StatusCode]:
"""Get next resource from the list of resources.
The list of resources should be obtained from a previous call to find_resources().
Corresponds to viFindNext function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
find_list :
Describes a find list. This parameter must be created by find_resources().
Returns
-------
str
String identifying the location of a device
constants.StatusCode
Return value of the library call.
"""
instrument_description = create_string_buffer(constants.VI_FIND_BUFLEN)
ret = library.viFindNext(find_list, instrument_description)
return buffer_to_text(instrument_description), ret
def _find_resources(library, session: typing.VISARMSession, query: str):
"""Queries VISA to locate the resources associated with a specified interface.
Corresponds to viFindRsrc function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : typing.VISARMSession
Unique logical identifier to the ResourceManger session
(unused, just to uniform signatures).
query : str
A regular expression followed by an optional logical expression.
Use '?*' for all.
Returns
-------
ViFindList
Opaque object to pass to `_find_next` to access the other devices
resource name.
int
Number of identified devices.
str
Resource name of the first identified device
constants.StatusCode
Return value of the library call.
"""
find_list = ViFindList()
return_counter = ViUInt32()
instrument_description = create_string_buffer(constants.VI_FIND_BUFLEN)
ret = library.viFindRsrc(
session, query, byref(find_list), byref(return_counter), instrument_description
)
return find_list, return_counter.value, buffer_to_text(instrument_description), ret
def flush(library, session, mask):
"""Retrieves the state of an attribute.
Corresponds to viGetAttribute function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : Union[VISASession, VISAEventContext]
Unique logical identifier to a session, event, or find list.
attribute : Union[constants.ResourceAttribute, constants.EventAttribute]
Resource or event attribute for which the state query is made.
Returns
-------
Any
State of the queried attribute for a specified resource
constants.StatusCode
Return value of the library call.
"""
return library.viFlush(session, mask)
def get_attribute(library, session, attribute):
"""Retrieves the state of an attribute.
Corresponds to viGetAttribute function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : Union[VISASession, VISAEventContext]
Unique logical identifier to a session, event, or find list.
attribute : Union[constants.ResourceAttribute, constants.EventAttribute]
Resource or event attribute for which the state query is made.
Returns
-------
Any
State of the queried attribute for a specified resource
constants.StatusCode
Return value of the library call.
"""
attr = attributes.AttributesByID[attribute]
datatype = getattr(types, attr.visa_type)
if datatype == ViString:
attribute_state = create_string_buffer(256)
ret = library.viGetAttribute(session, attribute, attribute_state)
return buffer_to_text(attribute_state), ret
# There are only 2 buffer attribute, the one we do not handle if the one
# to async read that is handled at a higher since we pass the buffer ourself
elif datatype == ViBuf:
if attr.visa_name == "VI_ATTR_USB_RECV_INTR_DATA":
length = get_attribute(
library, session, constants.VI_ATTR_USB_RECV_INTR_SIZE
)
attribute_state = (ViUInt8 * length)()
ret = library.viGetAttribute(session, attribute, byref(attribute_state))
return list(attribute_state), ret
else:
raise AttributeError("%s cannot be accessed directly" % attr.visa_name)
else:
attribute_state = datatype()
ret = library.viGetAttribute(session, attribute, byref(attribute_state))
return attribute_state.value, ret
def gpib_command(library, session, data):
"""Write GPIB command bytes on the bus.
Corresponds to viGpibCommand function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
data : bytes
Data to write.
Returns
-------
int
Number of written bytes
constants.StatusCode
Return value of the library call.
"""
return_count = ViUInt32()
# [ViSession, ViBuf, ViUInt32, ViPUInt32]
ret = library.viGpibCommand(session, data, len(data), byref(return_count))
return return_count.value, ret
def gpib_control_atn(library, session, mode):
"""Specifies the state of the ATN line and the local active controller state.
Corresponds to viGpibControlATN function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
mode : constants.ATNLineOperation
State of the ATN line and optionally the local active controller state.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viGpibControlATN(session, mode)
def gpib_control_ren(library, session, mode):
"""Controls the state of the GPIB Remote Enable (REN) interface line.
Optionally the remote/local state of the device can also be set.
Corresponds to viGpibControlREN function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
mode : constants.RENLineOperation
State of the REN line and optionally the device remote/local state.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viGpibControlREN(session, mode)
def gpib_pass_control(library, session, primary_address, secondary_address):
"""Tell a GPIB device to become controller in charge (CIC).
Corresponds to viGpibPassControl function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
primary_address : int
Primary address of the GPIB device to which you want to pass control.
secondary_address : int
Secondary address of the targeted GPIB device.
If the targeted device does not have a secondary address, this parameter
should contain the value Constants.VI_NO_SEC_ADDR.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viGpibPassControl(session, primary_address, secondary_address)
def gpib_send_ifc(library, session):
"""Pulse the interface clear line (IFC) for at least 100 microseconds.
Corresponds to viGpibSendIFC function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viGpibSendIFC(session)
def in_8(library, session, space, offset, extended=False):
"""Reads in an 8-bit value from the specified memory space and offset.
Corresponds to viIn8* function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
space : constants.AddressSpace
Specifies the address space.
offset : int
Offset (in bytes) of the address or register from which to read.
extended : bool, optional
Use 64 bits offset independent of the platform, False by default.
Returns
-------
int
Data read from memory
constants.StatusCode
Return value of the library call.
"""
value_8 = ViUInt8()
if extended:
ret = library.viIn8Ex(session, space, offset, byref(value_8))
else:
ret = library.viIn8(session, space, offset, byref(value_8))
return value_8.value, ret
def in_16(library, session, space, offset, extended=False):
"""Reads in an 16-bit value from the specified memory space and offset.
Corresponds to viIn16* function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
space : constants.AddressSpace
Specifies the address space.
offset : int
Offset (in bytes) of the address or register from which to read.
extended : bool, optional
Use 64 bits offset independent of the platform, False by default.
Returns
-------
int
Data read from memory
constants.StatusCode
Return value of the library call.
"""
value_16 = ViUInt16()
if extended:
ret = library.viIn16Ex(session, space, offset, byref(value_16))
else:
ret = library.viIn16(session, space, offset, byref(value_16))
return value_16.value, ret
def in_32(library, session, space, offset, extended=False):
"""Reads in an 32-bit value from the specified memory space and offset.
Corresponds to viIn32* function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
space : constants.AddressSpace
Specifies the address space.
offset : int
Offset (in bytes) of the address or register from which to read.
extended : bool, optional
Use 64 bits offset independent of the platform, False by default.
Returns
-------
int
Data read from memory
constants.StatusCode
Return value of the library call.
"""
value_32 = ViUInt32()
if extended:
ret = library.viIn32Ex(session, space, offset, byref(value_32))
else:
ret = library.viIn32(session, space, offset, byref(value_32))
return value_32.value, ret
def in_64(library, session, space, offset, extended=False):
"""Reads in an 64-bit value from the specified memory space and offset.
Corresponds to viIn64* function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
space : constants.AddressSpace
Specifies the address space.
offset : int
Offset (in bytes) of the address or register from which to read.
extended : bool, optional
Use 64 bits offset independent of the platform, False by default.
Returns
-------
int
Data read from memory
constants.StatusCode
Return value of the library call.
"""
value_64 = ViUInt64()
if extended:
ret = library.viIn64Ex(session, space, offset, byref(value_64))
else:
ret = library.viIn64(session, space, offset, byref(value_64))
return value_64.value, ret
def install_handler(
library, session, event_type, handler, user_handle: Any
) -> Tuple[typing.VISAHandler, Any, Any, constants.StatusCode]:
"""Install handlers for event callbacks.
Corresponds to viInstallHandler function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
event_type : constants.EventType
Logical event identifier.
handler : VISAHandler
Reference to a handler to be installed by a client application.
user_handle : Any
Value specified by an application that can be used for identifying
handlers uniquely for an event type.
Returns
-------
handler : VISAHandler
Handler to be installed by a client application.
converted_user_handle :
Converted user handle to match the underlying library. This version
of the handle should be used in further call to the library.
converted_handler :
Converted version of the handler satisfying to backend library.
status_code : constants.StatusCode
Return value of the library call
"""
# Should be Optional[_CData] but that type cannot be imported
converted_user_handle: object = None
if user_handle is not None:
if isinstance(user_handle, int):
converted_user_handle = c_long(user_handle)
elif isinstance(user_handle, float):
converted_user_handle = c_double(user_handle)
elif isinstance(user_handle, str):
converted_user_handle = c_wchar_p(user_handle)
elif isinstance(user_handle, list):
for element in user_handle:
if not isinstance(element, int):
# Mypy cannot track the fact that the list has to contain float
converted_user_handle = (c_double * len(user_handle))( # type: ignore
*tuple(user_handle)
)
break
else:
converted_user_handle = (c_long * len(user_handle))(*tuple(user_handle))
else:
try:
# check if it is already a ctypes
byref(user_handle)
converted_user_handle = user_handle
except TypeError:
raise TypeError(
"Type not allowed as user handle: %s" % type(user_handle)
)
with set_user_handle_type(library, converted_user_handle):
if ctwrapper.WRAP_HANDLER:
# Wrap the handler to provide a non-wrapper specific interface
def handler_wrapper(
ctype_session, ctype_event_type, ctype_event_context, ctype_user_handle
):
handler(
ctype_session.value,
ctype_event_type,
ctype_event_context.value,
ctype_user_handle.contents
if ctype_user_handle
else ctype_user_handle,
)
return 0
update_wrapper(handler_wrapper, handler)
else:
handler_wrapper = handler
converted_handler = ViHndlr(handler_wrapper)
if user_handle is None:
ret = library.viInstallHandler(session, event_type, converted_handler, None)
else:
ret = library.viInstallHandler(
session,
event_type,
converted_handler,
byref(converted_user_handle), # type: ignore
)
return handler, converted_user_handle, converted_handler, ret
def lock(library, session, lock_type, timeout, requested_key=None):
"""Establishes an access mode to the specified resources.
Corresponds to viLock function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
lock_type : constants.Lock
Specifies the type of lock requested.
timeout : int
Absolute time period (in milliseconds) that a resource waits to get
unlocked by the locking session before returning an error.
requested_key : Optional[str], optional
Requested locking key in the case of a shared lock. For an exclusive
lock it should be None.
Returns
-------
Optional[str]
Key that can then be passed to other sessions to share the lock, or
None for an exclusive lock.
constants.StatusCode
Return value of the library call.
"""
if lock_type == constants.AccessModes.exclusive_lock:
requested_key = None
access_key = None
else:
access_key = create_string_buffer(256)
ret = library.viLock(session, lock_type, timeout, requested_key, access_key)
if access_key is None:
return None, ret
else:
return access_key.value, ret
def map_address(
library, session, map_space, map_base, map_size, access=False, suggested=None
):
"""Maps the specified memory space into the process's address space.
Corresponds to viMapAddress function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
map_space : constants.AddressSpace
Specifies the address space to map.
map_base : int
Offset (in bytes) of the memory to be mapped.
map_size : int
Amount of memory to map (in bytes).
access : False
Unused parameter.
suggested : Optional[int], optional
If not None, the operating system attempts to map the memory to the
address specified. There is no guarantee, however, that the memory
will be mapped to that address. This operation may map the memory
into an address region different from the suggested one.
Returns
-------
int
Address in your process space where the memory was mapped
constants.StatusCode
Return value of the library call.
"""
if access is False:
access = constants.VI_FALSE
elif access != constants.VI_FALSE:
warnings.warn("In enable_event, context will be set VI_NULL.")
access = constants.VI_FALSE
address = ViAddr()
ret = library.viMapAddress(
session, map_space, map_base, map_size, access, suggested, byref(address)
)
return address, ret
def map_trigger(library, session, trigger_source, trigger_destination, mode):
"""Map the specified trigger source line to the specified destination line.
Corresponds to viMapTrigger function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
trigger_source : constants.InputTriggerLine
Source line from which to map.
trigger_destination : constants.OutputTriggerLine
Destination line to which to map.
mode : None, optional
Always None for this version of the VISA specification.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viMapTrigger(session, trigger_source, trigger_destination, mode)
def memory_allocation(library, session, size, extended=False):
"""Allocate memory from a resource's memory region.
Corresponds to viMemAlloc* functions of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
size : int
Specifies the size of the allocation.
extended : bool, optional
Use 64 bits offset independent of the platform.
Returns
-------
int
offset of the allocated memory
constants.StatusCode
Return value of the library call.
"""
offset = ViBusAddress()
if extended:
ret = library.viMemAllocEx(session, size, byref(offset))
else:
ret = library.viMemAlloc(session, size, byref(offset))
return offset, ret
def memory_free(library, session, offset, extended=False):
"""Frees memory previously allocated using the memory_allocation() operation.
Corresponds to viMemFree* function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
offset : int
Offset of the memory to free.
extended : bool, optional
Use 64 bits offset independent of the platform.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
if extended:
return library.viMemFreeEx(session, offset)
else:
return library.viMemFree(session, offset)
def move(
library,
session,
source_space,
source_offset,
source_width,
destination_space,
destination_offset,
destination_width,
length,
):
"""Moves a block of data.
Corresponds to viMove function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
source_space : constants.AddressSpace
Specifies the address space of the source.
source_offset : int
Offset of the starting address or register from which to read.
source_width : constants.DataWidth
Specifies the data width of the source.
destination_space : constants.AddressSpace
Specifies the address space of the destination.
destination_offset : int
Offset of the starting address or register to which to write.
destination_width : constants.DataWidth
Specifies the data width of the destination.
length: int
Number of elements to transfer, where the data width of the
elements to transfer is identical to the source data width.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viMove(
session,
source_space,
source_offset,
source_width,
destination_space,
destination_offset,
destination_width,
length,
)
def move_asynchronously(
library,
session,
source_space,
source_offset,
source_width,
destination_space,
destination_offset,
destination_width,
length,
):
"""Moves a block of data asynchronously.
Corresponds to viMoveAsync function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
source_space : constants.AddressSpace
Specifies the address space of the source.
source_offset : int
Offset of the starting address or register from which to read.
source_width : constants.DataWidth
Specifies the data width of the source.
destination_space : constants.AddressSpace
Specifies the address space of the destination.
destination_offset : int
Offset of the starting address or register to which to write.
destination_width : constants.DataWidth
Specifies the data width of the destination.
length : int
Number of elements to transfer, where the data width of the
elements to transfer is identical to the source data width.
Returns
-------
VISAJobID
Job identifier of this asynchronous move operation
constants.StatusCode
Return value of the library call.
"""
job_id = ViJobId()
ret = library.viMoveAsync(
session,
source_space,
source_offset,
source_width,
destination_space,
destination_offset,
destination_width,
length,
byref(job_id),
)
return job_id, ret
def move_in_8(library, session, space, offset, length, extended=False):
"""Moves an 8-bit block of data to local memory.
Corresponds to viMoveIn8* functions of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
space : constants.AddressSpace
Address space from which to move the data.
offset : int
Offset (in bytes) of the address or register from which to read.
length : int
Number of elements to transfer, where the data width of
the elements to transfer is identical to the source data width.
extended : bool, optional
Use 64 bits offset independent of the platform, by default False.
Returns
-------
data : List[int]
Data read from the bus
status_code : constants.StatusCode
Return value of the library call.
"""
buffer_8 = (ViUInt8 * length)()
if extended:
ret = library.viMoveIn8Ex(session, space, offset, length, buffer_8)
else:
ret = library.viMoveIn8(session, space, offset, length, buffer_8)
return list(buffer_8), ret
def move_in_16(library, session, space, offset, length, extended=False):
"""Moves an 16-bit block of data to local memory.
Corresponds to viMoveIn816 functions of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
space : constants.AddressSpace
Address space from which to move the data.
offset : int
Offset (in bytes) of the address or register from which to read.
length : int
Number of elements to transfer, where the data width of
the elements to transfer is identical to the source data width.
extended : bool, optional
Use 64 bits offset independent of the platform, by default False.
Returns
-------
data : List[int]
Data read from the bus
status_code : constants.StatusCode
Return value of the library call.
"""
buffer_16 = (ViUInt16 * length)()
if extended:
ret = library.viMoveIn16Ex(session, space, offset, length, buffer_16)
else:
ret = library.viMoveIn16(session, space, offset, length, buffer_16)
return list(buffer_16), ret
def move_in_32(library, session, space, offset, length, extended=False):
"""Moves an 32-bit block of data to local memory.
Corresponds to viMoveIn32* functions of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
space : constants.AddressSpace
Address space from which to move the data.
offset : int
Offset (in bytes) of the address or register from which to read.
length : int
Number of elements to transfer, where the data width of
the elements to transfer is identical to the source data width.
extended : bool, optional
Use 64 bits offset independent of the platform, by default False.
Returns
-------
data : List[int]
Data read from the bus
status_code : constants.StatusCode
Return value of the library call.
"""
buffer_32 = (ViUInt32 * length)()
if extended:
ret = library.viMoveIn32Ex(session, space, offset, length, buffer_32)
else:
ret = library.viMoveIn32(session, space, offset, length, buffer_32)
return list(buffer_32), ret
def move_in_64(library, session, space, offset, length, extended=False):
"""Moves an 64-bit block of data to local memory.
Corresponds to viMoveIn8* functions of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
space : constants.AddressSpace
Address space from which to move the data.
offset : int
Offset (in bytes) of the address or register from which to read.
length : int
Number of elements to transfer, where the data width of
the elements to transfer is identical to the source data width.
extended : bool, optional
Use 64 bits offset independent of the platform, by default False.
Returns
-------
data : List[int]
Data read from the bus
status_code : constants.StatusCode
Return value of the library call.
"""
buffer_64 = (ViUInt64 * length)()
if extended:
ret = library.viMoveIn64Ex(session, space, offset, length, buffer_64)
else:
ret = library.viMoveIn64(session, space, offset, length, buffer_64)
return list(buffer_64), ret
def move_out_8(library, session, space, offset, length, data, extended=False):
"""Moves an 8-bit block of data from local memory.
Corresponds to viMoveOut8* functions of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
space : constants.AddressSpace
Address space into which move the data.
offset : int
Offset (in bytes) of the address or register from which to read.
length : int
Number of elements to transfer, where the data width of
the elements to transfer is identical to the source data width.
data : Iterable[int]
Data to write to bus.
extended : bool, optional
Use 64 bits offset independent of the platform, by default False.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
converted_buffer = (ViUInt8 * length)(*tuple(data))
if extended:
return library.viMoveOut8Ex(session, space, offset, length, converted_buffer)
else:
return library.viMoveOut8(session, space, offset, length, converted_buffer)
def move_out_16(library, session, space, offset, length, data, extended=False):
"""Moves an 16-bit block of data from local memory.
Corresponds to viMoveOut16* functions of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
space : constants.AddressSpace
Address space into which move the data.
offset : int
Offset (in bytes) of the address or register from which to read.
length : int
Number of elements to transfer, where the data width of
the elements to transfer is identical to the source data width.
data : Iterable[int]
Data to write to bus.
extended : bool, optional
Use 64 bits offset independent of the platform, by default False.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
converted_buffer = (ViUInt16 * length)(*tuple(data))
if extended:
return library.viMoveOut16Ex(session, space, offset, length, converted_buffer)
else:
return library.viMoveOut16(session, space, offset, length, converted_buffer)
def move_out_32(library, session, space, offset, length, data, extended=False):
"""Moves an 32-bit block of data from local memory.
Corresponds to viMoveOut32* functions of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
space : constants.AddressSpace
Address space into which move the data.
offset : int
Offset (in bytes) of the address or register from which to read.
length : int
Number of elements to transfer, where the data width of
the elements to transfer is identical to the source data width.
data : Iterable[int]
Data to write to bus.
extended : bool, optional
Use 64 bits offset independent of the platform, by default False.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
converted_buffer = (ViUInt32 * length)(*tuple(data))
if extended:
return library.viMoveOut32Ex(session, space, offset, length, converted_buffer)
else:
return library.viMoveOut32(session, space, offset, length, converted_buffer)
def move_out_64(library, session, space, offset, length, data, extended=False):
"""Moves an 64-bit block of data from local memory.
Corresponds to viMoveOut64* functions of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
space : constants.AddressSpace
Address space into which move the data.
offset : int
Offset (in bytes) of the address or register from which to read.
length : int
Number of elements to transfer, where the data width of
the elements to transfer is identical to the source data width.
data : Iterable[int]
Data to write to bus.
extended : bool, optional
Use 64 bits offset independent of the platform, by default False.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
converted_buffer = (ViUInt64 * length)(*tuple(data))
if extended:
return library.viMoveOut64Ex(session, space, offset, length, converted_buffer)
else:
return library.viMoveOut64(session, space, offset, length, converted_buffer)
# noinspection PyShadowingBuiltins
def open(
library,
session,
resource_name,
access_mode=constants.AccessModes.no_lock,
open_timeout=constants.VI_TMO_IMMEDIATE,
):
"""Opens a session to the specified resource.
Corresponds to viOpen function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISARMSession
Resource Manager session (should always be a session returned from
open_default_resource_manager()).
resource_name : str
Unique symbolic name of a resource.
access_mode : constants.AccessModes, optional
Specifies the mode by which the resource is to be accessed.
open_timeout : int
If the ``access_mode`` parameter requests a lock, then this
parameter specifies the absolute time period (in milliseconds) that
the resource waits to get unlocked before this operation returns an
error.
Returns
-------
VISASession
Unique logical identifier reference to a session
constants.StatusCode
Return value of the library call.
"""
try:
open_timeout = int(open_timeout)
except ValueError:
raise ValueError(
"open_timeout (%r) must be an integer (or compatible type)" % open_timeout
)
out_session = ViSession()
# [ViSession, ViRsrc, ViAccessMode, ViUInt32, ViPSession]
# ViRsrc converts from (str, unicode, bytes) to bytes
ret = library.viOpen(
session, resource_name, access_mode, open_timeout, byref(out_session)
)
return out_session.value, ret
def open_default_resource_manager(library):
"""This function returns a session to the Default Resource Manager resource.
Corresponds to viOpenDefaultRM function of the VISA library.
Returns
-------
VISARMSession
Unique logical identifier to a Default Resource Manager session
constants.StatusCode
Return value of the library call.
"""
session = ViSession()
ret = library.viOpenDefaultRM(byref(session))
return session.value, ret
def out_8(library, session, space, offset, data, extended=False):
"""Write an 8-bit value to the specified memory space and offset.
Corresponds to viOut8* functions of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
space : constants.AddressSpace
Address space into which to write.
offset : int
Offset (in bytes) of the address or register from which to read.
data : int
Data to write to bus.
extended : bool, optional
Use 64 bits offset independent of the platform.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
if extended:
return library.viOut8Ex(session, space, offset, data)
else:
return library.viOut8(session, space, offset, data)
def out_16(library, session, space, offset, data, extended=False):
"""Write a 16-bit value to the specified memory space and offset.
Corresponds to viOut16* functions of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
space : constants.AddressSpace
Address space into which to write.
offset : int
Offset (in bytes) of the address or register from which to read.
data : int
Data to write to bus.
extended : bool, optional
Use 64 bits offset independent of the platform.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
if extended:
return library.viOut16Ex(session, space, offset, data, extended=False)
else:
return library.viOut16(session, space, offset, data, extended=False)
def out_32(library, session, space, offset, data, extended=False):
"""Write a 32-bit value to the specified memory space and offset.
Corresponds to viOut32* functions of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
space : constants.AddressSpace
Address space into which to write.
offset : int
Offset (in bytes) of the address or register from which to read.
data : int
Data to write to bus.
extended : bool, optional
Use 64 bits offset independent of the platform.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
if extended:
return library.viOut32Ex(session, space, offset, data)
else:
return library.viOut32(session, space, offset, data)
def out_64(library, session, space, offset, data, extended=False):
"""Write a 64-bit value to the specified memory space and offset.
Corresponds to viOut64* functions of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
space : constants.AddressSpace
Address space into which to write.
offset : int
Offset (in bytes) of the address or register from which to read.
data : int
Data to write to bus.
extended : bool, optional
Use 64 bits offset independent of the platform.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
if extended:
return library.viOut64Ex(session, space, offset, data)
else:
return library.viOut64(session, space, offset, data)
def parse_resource(library, session, resource_name):
"""Parse a resource string to get the interface information.
Corresponds to viParseRsrc function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISARMSession
Resource Manager session (should always be the Default Resource
Manager for VISA returned from open_default_resource_manager()).
resource_name : str
Unique symbolic name of a resource.
Returns
-------
ResourceInfo
Resource information with interface type and board number
constants.StatusCode
Return value of the library call.
"""
interface_type = ViUInt16()
interface_board_number = ViUInt16()
# [ViSession, ViRsrc, ViPUInt16, ViPUInt16]
# ViRsrc converts from (str, unicode, bytes) to bytes
ret = library.viParseRsrc(
session, resource_name, byref(interface_type), byref(interface_board_number)
)
return (
ResourceInfo(
constants.InterfaceType(interface_type.value),
interface_board_number.value,
None,
None,
None,
),
ret,
)
def parse_resource_extended(library, session, resource_name):
"""Parse a resource string to get extended interface information.
Corresponds to viParseRsrcEx function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISARMSession
Resource Manager session (should always be the Default Resource
Manager for VISA returned from open_default_resource_manager()).
resource_name : str
Unique symbolic name of a resource.
Returns
-------
ResourceInfo
Resource information with interface type and board number
constants.StatusCode
Return value of the library call.
"""
interface_type = ViUInt16()
interface_board_number = ViUInt16()
resource_class = create_string_buffer(constants.VI_FIND_BUFLEN)
unaliased_expanded_resource_name = create_string_buffer(constants.VI_FIND_BUFLEN)
alias_if_exists = create_string_buffer(constants.VI_FIND_BUFLEN)
# [ViSession, ViRsrc, ViPUInt16, ViPUInt16, ViAChar, ViAChar, ViAChar]
# ViRsrc converts from (str, unicode, bytes) to bytes
ret = library.viParseRsrcEx(
session,
resource_name,
byref(interface_type),
byref(interface_board_number),
resource_class,
unaliased_expanded_resource_name,
alias_if_exists,
)
res = [
buffer_to_text(val)
for val in (resource_class, unaliased_expanded_resource_name, alias_if_exists)
]
if res[-1] == "":
res[-1] = None
return (
ResourceInfo(
constants.InterfaceType(interface_type.value),
interface_board_number.value,
*res
),
ret,
)
def peek_8(library, session, address):
"""Read an 8-bit value from the specified address.
Corresponds to viPeek8 function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
address : VISAMemoryAddress
Source address to read the value.
Returns
-------
int
Data read from bus
constants.StatusCode
Return value of the library call.
"""
value_8 = ViUInt8()
ret = library.viPeek8(session, address, byref(value_8))
return value_8.value, ret
def peek_16(library, session, address):
"""Read an 16-bit value from the specified address.
Corresponds to viPeek16 function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
address : VISAMemoryAddress
Source address to read the value.
Returns
-------
int
Data read from bus
constants.StatusCode
Return value of the library call.
"""
value_16 = ViUInt16()
ret = library.viPeek16(session, address, byref(value_16))
return value_16.value, ret
def peek_32(library, session, address):
"""Read an 32-bit value from the specified address.
Corresponds to viPeek32 function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
address : VISAMemoryAddress
Source address to read the value.
Returns
-------
int
Data read from bus
constants.StatusCode
Return value of the library call.
"""
value_32 = ViUInt32()
ret = library.viPeek32(session, address, byref(value_32))
return value_32.value, ret
def peek_64(library, session, address):
"""Read an 64-bit value from the specified address.
Corresponds to viPeek64 function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
address : VISAMemoryAddress
Source address to read the value.
Returns
-------
int
Data read from bus
constants.StatusCode
Return value of the library call.
"""
value_64 = ViUInt64()
ret = library.viPeek64(session, address, byref(value_64))
return value_64.value, ret
def poke_8(library, session, address, data):
"""Write an 8-bit value to the specified address.
Corresponds to viPoke8 function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
address : VISAMemoryAddress
Source address to read the value.
data : int
Data to write.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viPoke8(session, address, data)
def poke_16(library, session, address, data):
"""Write an 16-bit value to the specified address.
Corresponds to viPoke16 function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
address : VISAMemoryAddress
Source address to read the value.
data : int
Data to write.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viPoke16(session, address, data)
def poke_32(library, session, address, data):
"""Write an 32-bit value to the specified address.
Corresponds to viPoke32 function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
address : VISAMemoryAddress
Source address to read the value.
data : int
Data to write.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viPoke32(session, address, data)
def poke_64(library, session, address, data):
"""Write an 64-bit value to the specified address.
Corresponds to viPoke64 function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
address : VISAMemoryAddress
Source address to read the value.
data : int
Data to write.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viPoke64(session, address, data)
def read(library, session, count):
"""Reads data from device or interface synchronously.
Corresponds to viRead function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
count : int
Number of bytes to be read.
Returns
-------
bytes
Date read
constants.StatusCode
Return value of the library call.
"""
buffer = create_string_buffer(count)
return_count = ViUInt32()
ret = library.viRead(session, buffer, count, byref(return_count))
return buffer.raw[: return_count.value], ret
def read_stb(library, session):
"""Reads a status byte of the service request.
Corresponds to viReadSTB function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
Returns
-------
int
Service request status byte
constants.StatusCode
Return value of the library call.
"""
status = ViUInt16()
ret = library.viReadSTB(session, byref(status))
return status.value, ret
def read_to_file(library, session, filename, count):
"""Read data synchronously, and store the transferred data in a file.
Corresponds to viReadToFile function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
filename : str
Name of file to which data will be written.
count : int
Number of bytes to be read.
Returns
-------
int
Number of bytes actually transferred
constants.StatusCode
Return value of the library call.
"""
return_count = ViUInt32()
ret = library.viReadToFile(session, filename, count, return_count)
return return_count, ret
def set_attribute(library, session, attribute, attribute_state):
"""Set the state of an attribute.
Corresponds to viSetAttribute function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
attribute : constants.ResourceAttribute
Attribute for which the state is to be modified.
attribute_state : Any
The state of the attribute to be set for the specified object.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viSetAttribute(session, attribute, attribute_state)
def set_buffer(library, session, mask, size):
"""Set the size for the formatted I/O and/or low-level I/O communication buffer(s).
Corresponds to viSetBuf function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
mask : constants.BufferType
Specifies the type of buffer.
size : int
The size to be set for the specified buffer(s).
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viSetBuf(session, mask, size)
def status_description(library, session, status):
"""Return a user-readable description of the status code passed to the operation.
Corresponds to viStatusDesc function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
status : constants.StatusCode
Status code to interpret.
Returns
-------
str
User-readable string interpretation of the status code.
constants.StatusCode
Return value of the library call.
"""
description = create_string_buffer(256)
ret = library.viStatusDesc(session, status, description)
return buffer_to_text(description), ret
def terminate(library, session, degree, job_id):
"""Request a VISA session to terminate normal execution of an operation.
Corresponds to viTerminate function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
degree : None
Not used in this version of the VISA specification.
job_id : VISAJobId
Specifies an operation identifier. If a user passes None as the
job_id value to viTerminate(), a VISA implementation should abort
any calls in the current process executing on the specified vi.
Any call that is terminated this way should return VI_ERROR_ABORT.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viTerminate(session, degree, job_id)
def uninstall_handler(library, session, event_type, handler, user_handle=None):
"""Uninstall handlers for events.
Corresponds to viUninstallHandler function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
event_type : constants.EventType
Logical event identifier.
handler : VISAHandler
Handler to be uninstalled by a client application.
user_handle:
A value specified by an application that can be used for
identifying handlers uniquely in a session for an event.
The modified value of the user_handle as returned by install_handler
should be used instead of the original value.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
with set_user_handle_type(library, user_handle):
if user_handle is not None:
user_handle = byref(user_handle)
return library.viUninstallHandler(session, event_type, handler, user_handle)
def unlock(library, session):
"""Relinquish a lock for the specified resource.
Corresponds to viUnlock function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viUnlock(session)
def unmap_address(library, session):
"""Unmap memory space previously mapped by map_address().
Corresponds to viUnmapAddress function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viUnmapAddress(session)
def unmap_trigger(library, session, trigger_source, trigger_destination):
"""Undo a previous map between a trigger source line and a destination line.
Corresponds to viUnmapTrigger function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
trigger_source : constants.InputTriggerLine
Source line used in previous map.
trigger_destination : constants.OutputTriggerLine
Destination line used in previous map.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return library.viUnmapTrigger(session, trigger_source, trigger_destination)
def usb_control_in(
library,
session,
request_type_bitmap_field,
request_id,
request_value,
index,
length=0,
):
"""Perform a USB control pipe transfer from the device.
Corresponds to viUsbControlIn function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
request_type_bitmap_field : int
bmRequestType parameter of the setup stage of a USB control transfer.
request_id : int
bRequest parameter of the setup stage of a USB control transfer.
request_value : int
wValue parameter of the setup stage of a USB control transfer.
index : int
wIndex parameter of the setup stage of a USB control transfer.
This is usually the index of the interface or endpoint.
length : int, optional
wLength parameter of the setup stage of a USB control transfer.
This value also specifies the size of the data buffer to receive
the data from the optional data stage of the control transfer.
Returns
-------
bytes
The data buffer that receives the data from the optional data stage
of the control transfer
constants.StatusCode
Return value of the library call.
"""
buffer = create_string_buffer(length)
return_count = ViUInt16()
ret = library.viUsbControlIn(
session,
request_type_bitmap_field,
request_id,
request_value,
index,
length,
buffer,
byref(return_count),
)
return buffer.raw[: return_count.value], ret
def usb_control_out(
library,
session,
request_type_bitmap_field,
request_id,
request_value,
index,
data="",
):
"""Perform a USB control pipe transfer to the device.
Corresponds to viUsbControlOut function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
request_type_bitmap_field : int
bmRequestType parameter of the setup stage of a USB control transfer.
request_id : int
bRequest parameter of the setup stage of a USB control transfer.
request_value : int
wValue parameter of the setup stage of a USB control transfer.
index : int
wIndex parameter of the setup stage of a USB control transfer.
This is usually the index of the interface or endpoint.
data : bytes, optional
The data buffer that sends the data in the optional data stage of
the control transfer.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
length = len(data)
return library.viUsbControlOut(
session,
request_type_bitmap_field,
request_id,
request_value,
index,
length,
data,
)
def vxi_command_query(library, session, mode, command):
"""Send the device a miscellaneous command or query and/or retrieves the response to a previous query.
Corresponds to viVxiCommandQuery function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
mode : constants.VXICommands
Specifies whether to issue a command and/or retrieve a response.
command : int
The miscellaneous command to send.
Returns
-------
int
The response retrieved from the device
constants.StatusCode
Return value of the library call.
"""
response = ViUInt32()
ret = library.viVxiCommandQuery(session, mode, command, byref(response))
return response.value, ret
def wait_on_event(library, session, in_event_type, timeout):
"""Wait for an occurrence of the specified event for a given session.
Corresponds to viWaitOnEvent function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
in_event_type : constants.EventType
Logical identifier of the event(s) to wait for.
timeout : int
Absolute time period in time units that the resource shall wait for
a specified event to occur before returning the time elapsed error.
The time unit is in milliseconds.
Returns
-------
constants.EventType
Logical identifier of the event actually received
VISAEventContext
A handle specifying the unique occurrence of an event
constants.StatusCode
Return value of the library call.
"""
out_event_type = ViEventType()
out_context = ViEvent()
ret = library.viWaitOnEvent(
session, in_event_type, timeout, byref(out_event_type), byref(out_context)
)
return out_event_type.value, out_context, ret
def write(library, session, data):
"""Write data to device or interface synchronously.
Corresponds to viWrite function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
data : bytes
Data to be written.
Returns
-------
int
Number of bytes actually transferred
constants.StatusCode
Return value of the library call.
"""
return_count = ViUInt32()
# [ViSession, ViBuf, ViUInt32, ViPUInt32]
ret = library.viWrite(session, data, len(data), byref(return_count))
return return_count.value, ret
def write_asynchronously(library, session, data):
"""Write data to device or interface asynchronously.
Corresponds to viWriteAsync function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
data : bytes
Data to be written.
Returns
-------
VISAJobID
Job ID of this asynchronous write operation
constants.StatusCode
Return value of the library call.
"""
job_id = ViJobId()
# [ViSession, ViBuf, ViUInt32, ViPJobId]
ret = library.viWriteAsync(session, data, len(data), byref(job_id))
return job_id, ret
def write_from_file(library, session, filename, count):
"""Take data from a file and write it out synchronously.
Corresponds to viWriteFromFile function of the VISA library.
Parameters
----------
library : ctypes.WinDLL or ctypes.CDLL
ctypes wrapped library.
session : VISASession
Unique logical identifier to a session.
filename : str
Name of file from which data will be read.
count : int
Number of bytes to be written.
Returns
-------
int
Number of bytes actually transferred
constants.StatusCode
Return value of the library call.
"""
return_count = ViUInt32()
ret = library.viWriteFromFile(session, filename, count, return_count)
return return_count, ret
```
#### File: pyvisa/ctwrapper/types.py
```python
import ctypes as _ctypes
from .cthelper import FUNCTYPE
# Part One: Type Assignments for VISA and Instrument Drivers, see spec table
# 3.1.1.
#
# Remark: The pointer and probably also the array variants are of no
# significance in Python because there is no native call-by-reference.
# However, as long as I'm not fully sure about this, they won't hurt.
def _type_pair(ctypes_type):
return ctypes_type, _ctypes.POINTER(ctypes_type)
def _type_triplet(ctypes_type):
return _type_pair(ctypes_type) + (_ctypes.POINTER(ctypes_type),)
ViUInt64, ViPUInt64, ViAUInt64 = _type_triplet(_ctypes.c_uint64)
ViInt64, ViPInt64, ViAInt64 = _type_triplet(_ctypes.c_int64)
ViUInt32, ViPUInt32, ViAUInt32 = _type_triplet(_ctypes.c_uint32)
ViInt32, ViPInt32, ViAInt32 = _type_triplet(_ctypes.c_int32)
ViUInt16, ViPUInt16, ViAUInt16 = _type_triplet(_ctypes.c_ushort)
ViInt16, ViPInt16, ViAInt16 = _type_triplet(_ctypes.c_short)
ViUInt8, ViPUInt8, ViAUInt8 = _type_triplet(_ctypes.c_ubyte)
ViInt8, ViPInt8, ViAInt8 = _type_triplet(_ctypes.c_byte)
ViAddr, ViPAddr, ViAAddr = _type_triplet(_ctypes.c_void_p)
ViChar, ViPChar, ViAChar = _type_triplet(_ctypes.c_char)
ViByte, ViPByte, ViAByte = _type_triplet(_ctypes.c_ubyte)
ViBoolean, ViPBoolean, ViABoolean = _type_triplet(ViUInt16)
ViReal32, ViPReal32, ViAReal32 = _type_triplet(_ctypes.c_float)
ViReal64, ViPReal64, ViAReal64 = _type_triplet(_ctypes.c_double)
class ViString(object):
@classmethod
def from_param(cls, obj):
if isinstance(obj, str):
return bytes(obj, "ascii")
return obj
class ViAString(object):
@classmethod
def from_param(cls, obj):
return _ctypes.POINTER(obj)
ViPString = ViString
# This follows visa.h definition, but involves a lot of manual conversion.
# ViBuf, ViPBuf, ViABuf = ViPByte, ViPByte, _ctypes.POINTER(ViPByte)
ViBuf, ViPBuf, ViABuf = ViPString, ViPString, ViAString
def buffer_to_text(buf) -> str:
return buf.value.decode("ascii")
ViRsrc = ViString
ViPRsrc = ViString
ViARsrc = ViAString
ViKeyId, ViPKeyId = ViString, ViPString
ViStatus, ViPStatus, ViAStatus = _type_triplet(ViInt32)
ViVersion, ViPVersion, ViAVersion = _type_triplet(ViUInt32)
_ViObject, ViPObject, ViAObject = _type_triplet(ViUInt32)
_ViSession, ViPSession, ViASession = _type_triplet(ViUInt32)
class ViObject(_ViObject): # type: ignore
@classmethod
def from_param(cls, obj):
if obj is None:
raise ValueError("Session cannot be None. The resource might be closed.")
return _ViObject.from_param(obj)
ViSession = ViObject
ViAttr = ViUInt32
ViConstString = _ctypes.POINTER(ViChar)
# Part Two: Type Assignments for VISA only, see spec table 3.1.2. The
# difference to the above is of no significance in Python, so I use it here
# only for easier synchronisation with the spec.
ViAccessMode, ViPAccessMode = _type_pair(ViUInt32)
ViBusAddress, ViPBusAddress = _type_pair(ViUInt32)
ViBusAddress64, ViPBusAddress64 = _type_pair(ViUInt64)
ViBusSize = ViUInt32
ViAttrState, ViPAttrState = _type_pair(ViUInt32)
# The following is weird, taken from news:<EMAIL>
ViVAList = _ctypes.POINTER(_ctypes.c_char)
ViEventType, ViPEventType, ViAEventType = _type_triplet(ViUInt32)
ViPAttr = _ctypes.POINTER(ViAttr)
ViAAttr = ViPAttr
ViEventFilter = ViUInt32
ViFindList, ViPFindList = _type_pair(ViObject)
ViEvent, ViPEvent = _type_pair(ViObject)
ViJobId, ViPJobId = _type_pair(ViUInt32)
# Class of callback functions for event handling, first type is result type
ViHndlr = FUNCTYPE(ViStatus, ViSession, ViEventType, ViEvent, ViAddr)
```
#### File: pyvisa/resources/registerbased.py
```python
from typing import Iterable, List
from .. import constants
from .resource import Resource
class RegisterBasedResource(Resource):
"""Base class for resources that use register based communication."""
def read_memory(
self,
space: constants.AddressSpace,
offset: int,
width: constants.DataWidth,
extended: bool = False,
) -> int:
"""Read a value from the specified memory space and offset.
Parameters
----------
space : constants.AddressSpace
Specifies the address space from which to read.
offset : int
Offset (in bytes) of the address or register from which to read.
width : Union[Literal[8, 16, 32, 64], constants.DataWidth]
Number of bits to read (8, 16, 32 or 64).
extended : bool, optional
Use 64 bits offset independent of the platform.
Returns
-------
data : int
Data read from memory
Raises
------
ValueError
Raised if an invalid width is specified.
"""
return self.visalib.read_memory(self.session, space, offset, width, extended)[0]
def write_memory(
self,
space: constants.AddressSpace,
offset: int,
data: int,
width: constants.DataWidth,
extended: bool = False,
) -> constants.StatusCode:
"""Write a value to the specified memory space and offset.
Parameters
----------
space : constants.AddressSpace
Specifies the address space.
offset : int
Offset (in bytes) of the address or register from which to read.
data : int
Data to write to bus.
width : Union[Literal[8, 16, 32, 64], constants.DataWidth]
Number of bits to read.
extended : bool, optional
Use 64 bits offset independent of the platform, by default False.
Returns
-------
constants.StatusCode
Return value of the library call.
Raises
------
ValueError
Raised if an invalid width is specified.
"""
return self.visalib.write_memory(
self.session, space, offset, data, width, extended
)
def move_in(
self,
space: constants.AddressSpace,
offset: int,
length: int,
width: constants.DataWidth,
extended: bool = False,
) -> List[int]:
"""Move a block of data to local memory from the given address space and offset.
Corresponds to viMoveIn* functions of the VISA library.
Parameters
----------
space : constants.AddressSpace
Address space from which to move the data.
offset : int
Offset (in bytes) of the address or register from which to read.
length : int
Number of elements to transfer, where the data width of
the elements to transfer is identical to the source data width.
width : Union[Literal[8, 16, 32, 64], constants.DataWidth]
Number of bits to read per element.
extended : bool, optional
Use 64 bits offset independent of the platform, by default False.
Returns
-------
data : List[int]
Data read from the bus
status_code : constants.StatusCode
Return value of the library call.
Raises
------
ValueError
Raised if an invalid width is specified.
"""
return self.visalib.move_in(
self.session, space, offset, length, width, extended
)[0]
def move_out(
self,
space: constants.AddressSpace,
offset: int,
length: int,
data: Iterable[int],
width: constants.DataWidth,
extended: bool = False,
) -> constants.StatusCode:
"""Move a block of data from local memory to the given address space and offset.
Corresponds to viMoveOut* functions of the VISA library.
Parameters
----------
space : constants.AddressSpace
Address space into which move the data.
offset : int
Offset (in bytes) of the address or register from which to read.
length : int
Number of elements to transfer, where the data width of
the elements to transfer is identical to the source data width.
data : Iterable[int]
Data to write to bus.
width : Union[Literal[8, 16, 32, 64], constants.DataWidth]
Number of bits to per element.
extended : bool, optional
Use 64 bits offset independent of the platform, by default False.
Returns
-------
constants.StatusCode
Return value of the library call.
Raises
------
ValueError
Raised if an invalid width is specified.
"""
return self.visalib.move_out(
self.session, space, offset, length, data, width, extended
)
```
#### File: pyvisa/resources/resource.py
```python
import contextlib
import time
import warnings
from functools import update_wrapper
from typing import (
Any,
Callable,
ContextManager,
Iterator,
Optional,
Set,
Type,
TypeVar,
Union,
cast,
)
from typing_extensions import ClassVar, Literal
from .. import attributes, constants, errors, highlevel, logger, rname, typing, util
from ..attributes import Attribute
from ..events import Event
from ..typing import VISAEventContext, VISAHandler, VISASession
class WaitResponse:
"""Class used in return of wait_on_event.
It properly closes the context upon delete.
A call with event_type of 0 (normally used when timed_out is True) will store
None as the event and event type, otherwise it records the proper Event.
"""
#: Reference to the event object that was waited for.
event: Event
#: Status code returned by the VISA library
ret: constants.StatusCode
#: Did a timeout occurs
timed_out: bool
def __init__(
self,
event_type: constants.EventType,
context: Optional[VISAEventContext],
ret: constants.StatusCode,
visalib: highlevel.VisaLibraryBase,
timed_out: bool = False,
):
self.event = Event(visalib, event_type, context)
self._event_type = constants.EventType(event_type)
self._context = context
self.ret = ret
self._visalib = visalib
self.timed_out = timed_out
@property
def event_type(self) -> Optional[constants.EventType]:
warnings.warn(
"event_type is deprecated and will be removed in 1.12. "
"Use the event object instead.",
FutureWarning,
)
return self._event_type
@property
def context(self) -> Optional[VISAEventContext]:
warnings.warn(
"context is deprecated and will be removed in 1.12. "
"Use the event object instead to access the event attributes.",
FutureWarning,
)
return self._context
def __del__(self) -> None:
if self.event._context is not None:
try:
self._visalib.close(self.event._context)
self.event.close()
except errors.VisaIOError:
pass
T = TypeVar("T", bound="Resource")
class Resource(object):
"""Base class for resources.
Do not instantiate directly, use
:meth:`pyvisa.highlevel.ResourceManager.open_resource`.
"""
#: Reference to the resource manager used by this resource
resource_manager: highlevel.ResourceManager
#: Reference to the VISA library instance used by the resource
visalib: highlevel.VisaLibraryBase
#: VISA attribute descriptor classes that can be used to introspect the
#: supported attributes and the possible values. The "often used" ones
#: are generally directly available on the resource.
visa_attributes_classes: ClassVar[Set[Type[attributes.Attribute]]]
@classmethod
def register(
cls, interface_type: constants.InterfaceType, resource_class: str
) -> Callable[[Type[T]], Type[T]]:
"""Create a decorator to register a class.
The class is associated to an interface type, resource class pair.
Parameters
----------
interface_type : constants.InterfaceType
Interface type for which to register a wrapper class.
resource_class : str
Resource class for which to register a wrapper class.
Returns
-------
Callable[[Type[T]], Type[T]]
Decorator registering the class. Raises TypeError if some VISA
attributes are missing on the registered class.
"""
def _internal(python_class):
highlevel.ResourceManager.register_resource_class(
interface_type, resource_class, python_class
)
return python_class
return _internal
def __init__(
self, resource_manager: highlevel.ResourceManager, resource_name: str
) -> None:
self._resource_manager = resource_manager
self.visalib = self._resource_manager.visalib
# We store the resource name and use preferably the private attr over
# the public descriptor internally because the public descriptor
# requires a live instance the VISA library, which means it is much
# slower but also can cause issue in error reporting when accessing the
# repr
self._resource_name: str
try:
# Attempt to normalize the resource name. Can fail for aliases
self._resource_name = str(rname.ResourceName.from_string(resource_name))
except rname.InvalidResourceName:
self._resource_name = resource_name
self._logging_extra = {
"library_path": self.visalib.library_path,
"resource_manager.session": self._resource_manager.session,
"resource_name": self._resource_name,
"session": None,
}
#: Session handle.
self._session: Optional[VISASession] = None
@property
def session(self) -> VISASession:
"""Resource session handle.
Raises
------
errors.InvalidSession
Raised if session is closed.
"""
if self._session is None:
raise errors.InvalidSession()
return self._session
@session.setter
def session(self, value: Optional[VISASession]) -> None:
self._session = value
def __del__(self) -> None:
if self._session is not None:
self.close()
def __str__(self) -> str:
return "%s at %s" % (self.__class__.__name__, self._resource_name)
def __repr__(self) -> str:
return "<%r(%r)>" % (self.__class__.__name__, self._resource_name)
def __enter__(self) -> "Resource":
return self
def __exit__(self, *args) -> None:
self.close()
@property
def last_status(self) -> constants.StatusCode:
"""Last status code for this session."""
return self.visalib.get_last_status_in_session(self.session)
@property
def resource_info(self) -> highlevel.ResourceInfo:
"""Get the extended information of this resource."""
return self.visalib.parse_resource_extended(
self._resource_manager.session, self._resource_name
)[0]
# --- VISA attributes --------------------------------------------------------------
#: VISA attributes require the resource to be opened in order to get accessed.
#: Please have a look at the attributes definition for more details
#: Interface type of the given session.
interface_type: Attribute[
constants.InterfaceType
] = attributes.AttrVI_ATTR_INTF_TYPE()
#: Board number for the given interface.
interface_number: Attribute[int] = attributes.AttrVI_ATTR_INTF_NUM()
#: Resource class (for example, "INSTR") as defined by the canonical resource name.
resource_class: Attribute[str] = attributes.AttrVI_ATTR_RSRC_CLASS()
#: Unique identifier for a resource compliant with the address structure.
resource_name: Attribute[str] = attributes.AttrVI_ATTR_RSRC_NAME()
#: Resource version that identifies the revisions or implementations of a resource.
implementation_version: Attribute[int] = attributes.AttrVI_ATTR_RSRC_IMPL_VERSION()
#: Current locking state of the resource.
lock_state: Attribute[
constants.AccessModes
] = attributes.AttrVI_ATTR_RSRC_LOCK_STATE()
#: Version of the VISA specification to which the implementation is compliant.
spec_version: Attribute[int] = attributes.AttrVI_ATTR_RSRC_SPEC_VERSION()
#: Manufacturer name of the vendor that implemented the VISA library.
resource_manufacturer_name: Attribute[str] = attributes.AttrVI_ATTR_RSRC_MANF_NAME()
#: Timeout in milliseconds for all resource I/O operations.
timeout: Attribute[float] = attributes.AttrVI_ATTR_TMO_VALUE()
def ignore_warning(
self, *warnings_constants: constants.StatusCode
) -> ContextManager:
"""Ignoring warnings context manager for the current resource.
Parameters
----------
warnings_constants : constants.StatusCode
Constants identifying the warnings to ignore.
"""
return self.visalib.ignore_warning(self.session, *warnings_constants)
def open(
self,
access_mode: constants.AccessModes = constants.AccessModes.no_lock,
open_timeout: int = 5000,
) -> None:
"""Opens a session to the specified resource.
Parameters
----------
access_mode : constants.AccessModes, optional
Specifies the mode by which the resource is to be accessed.
Defaults to constants.AccessModes.no_lock.
open_timeout : int, optional
If the ``access_mode`` parameter requests a lock, then this parameter
specifies the absolute time period (in milliseconds) that the
resource waits to get unlocked before this operation returns an error.
Defaults to 5000.
"""
logger.debug("%s - opening ...", self._resource_name, extra=self._logging_extra)
with self._resource_manager.ignore_warning(
constants.StatusCode.success_device_not_present
):
self.session, status = self._resource_manager.open_bare_resource(
self._resource_name, access_mode, open_timeout
)
if status == constants.StatusCode.success_device_not_present:
# The device was not ready when we opened the session.
# Now it gets five seconds more to become ready.
# Every 0.1 seconds we probe it with viClear.
start_time = time.time()
sleep_time = 0.1
try_time = 5
while time.time() - start_time < try_time:
time.sleep(sleep_time)
try:
self.clear()
break
except errors.VisaIOError as error:
if error.error_code != constants.StatusCode.error_no_listeners:
raise
self._logging_extra["session"] = self.session
logger.debug(
"%s - is open with session %s",
self._resource_name,
self.session,
extra=self._logging_extra,
)
def before_close(self) -> None:
"""Called just before closing an instrument."""
self.__switch_events_off()
def close(self) -> None:
"""Closes the VISA session and marks the handle as invalid."""
try:
logger.debug("%s - closing", self._resource_name, extra=self._logging_extra)
self.before_close()
self.visalib.close(self.session)
logger.debug(
"%s - is closed", self._resource_name, extra=self._logging_extra
)
# Mypy is confused by the idea that we can set a value we cannot get
self.session = None # type: ignore
except errors.InvalidSession:
pass
def __switch_events_off(self) -> None:
"""Switch off and discrads all events."""
self.disable_event(
constants.EventType.all_enabled, constants.EventMechanism.all
)
self.discard_events(
constants.EventType.all_enabled, constants.EventMechanism.all
)
self.visalib.uninstall_all_visa_handlers(self.session)
def get_visa_attribute(self, name: constants.ResourceAttribute) -> Any:
"""Retrieves the state of an attribute in this resource.
One should prefer the dedicated descriptor for often used attributes
since those perform checks and automatic conversion on the value.
Parameters
----------
name : constants.ResourceAttribute
Resource attribute for which the state query is made.
Returns
-------
Any
The state of the queried attribute for a specified resource.
"""
return self.visalib.get_attribute(self.session, name)[0]
def set_visa_attribute(
self, name: constants.ResourceAttribute, state: Any
) -> constants.StatusCode:
"""Set the state of an attribute.
One should prefer the dedicated descriptor for often used attributes
since those perform checks and automatic conversion on the value.
Parameters
----------
name : constants.ResourceAttribute
Attribute for which the state is to be modified.
state : Any
The state of the attribute to be set for the specified object.
Returns
-------
constants.StatusCode
Return value of the library call.
"""
return self.visalib.set_attribute(self.session, name, state)
def clear(self) -> None:
"""Clear this resource."""
self.visalib.clear(self.session)
def install_handler(
self, event_type: constants.EventType, handler: VISAHandler, user_handle=None
) -> Any:
"""Install handlers for event callbacks in this resource.
Parameters
----------
event_type : constants.EventType
Logical event identifier.
handler : VISAHandler
Handler function to be installed by a client application.
user_handle :
A value specified by an application that can be used for identifying
handlers uniquely for an event type. Depending on the backend they
may be restriction on the possible values. Look at the backend
`install_visa_handler` for more details.
Returns
-------
Any
User handle in a format amenable to the backend. This is this
representation of the handle that should be used when unistalling
a handler.
"""
return self.visalib.install_visa_handler(
self.session, event_type, handler, user_handle
)
def wrap_handler(
self, callable: Callable[["Resource", Event, Any], None]
) -> VISAHandler:
"""Wrap an event handler to provide the signature expected by VISA.
The handler is expected to have the following signature:
handler(resource: Resource, event: Event, user_handle: Any) -> None.
The wrapped handler should be used only to handle events on the resource
used to wrap the handler.
"""
def event_handler(
session: VISASession,
event_type: constants.EventType,
event_context: typing.VISAEventContext,
user_handle: Any,
) -> None:
if session != self.session:
raise RuntimeError(
"When wrapping a handler, the resource used to wrap the handler"
"must be the same on which the handler will be installed."
f"Wrapping session: {self.session}, event on session: {session}"
)
event = Event(self.visalib, event_type, event_context)
try:
return callable(self, event, user_handle)
finally:
event.close()
update_wrapper(event_handler, callable)
return event_handler
def uninstall_handler(
self, event_type: constants.EventType, handler: VISAHandler, user_handle=None
) -> None:
"""Uninstalls handlers for events in this resource.
Parameters
----------
event_type : constants.EventType
Logical event identifier.
handler : VISAHandler
Handler function to be uninstalled by a client application.
user_handle : Any
The user handle returned by install_handler.
"""
self.visalib.uninstall_visa_handler(
self.session, event_type, handler, user_handle
)
def disable_event(
self, event_type: constants.EventType, mechanism: constants.EventMechanism
) -> None:
"""Disable notification for an event type(s) via the specified mechanism(s).
Parameters
----------
event_type : constants.EventType
Logical event identifier.
mechanism : constants.EventMechanism
Specifies event handling mechanisms to be disabled.
"""
self.visalib.disable_event(self.session, event_type, mechanism)
def discard_events(
self, event_type: constants.EventType, mechanism: constants.EventMechanism
) -> None:
"""Discards event occurrences for an event type and mechanism in this resource.
Parameters
----------
event_type : constants.EventType
Logical event identifier.
mechanism : constants.EventMechanism
Specifies event handling mechanisms to be disabled.
"""
self.visalib.discard_events(self.session, event_type, mechanism)
def enable_event(
self,
event_type: constants.EventType,
mechanism: constants.EventMechanism,
context: None = None,
) -> None:
"""Enable event occurrences for specified event types and mechanisms in this resource.
Parameters
----------
event_type : constants.EventType
Logical event identifier.
mechanism : constants.EventMechanism
Specifies event handling mechanisms to be enabled
context : None
Not currently used, leave as None.
"""
self.visalib.enable_event(self.session, event_type, mechanism, context)
def wait_on_event(
self,
in_event_type: constants.EventType,
timeout: int,
capture_timeout: bool = False,
) -> WaitResponse:
"""Waits for an occurrence of the specified event in this resource.
in_event_type : constants.EventType
Logical identifier of the event(s) to wait for.
timeout : int
Absolute time period in time units that the resource shall wait for
a specified event to occur before returning the time elapsed error.
The time unit is in milliseconds. None means waiting forever if
necessary.
capture_timeout : bool, optional
When True will not produce a VisaIOError(VI_ERROR_TMO) but instead
return a WaitResponse with timed_out=True.
Returns
-------
WaitResponse
Object that contains event_type, context and ret value.
"""
try:
event_type, context, ret = self.visalib.wait_on_event(
self.session, in_event_type, timeout
)
except errors.VisaIOError as exc:
if capture_timeout and exc.error_code == constants.StatusCode.error_timeout:
return WaitResponse(
in_event_type,
None,
constants.StatusCode.error_timeout,
self.visalib,
timed_out=True,
)
raise
return WaitResponse(event_type, context, ret, self.visalib)
def lock(
self,
timeout: Union[float, Literal["default"]] = "default",
requested_key: Optional[str] = None,
) -> str:
"""Establish a shared lock to the resource.
Parameters
----------
timeout : Union[float, Literal["default"]], optional
Absolute time period (in milliseconds) that a resource waits to get
unlocked by the locking session before returning an error.
Defaults to "default" which means use self.timeout.
requested_key : Optional[str], optional
Access key used by another session with which you want your session
to share a lock or None to generate a new shared access key.
Returns
-------
str
A new shared access key if requested_key is None, otherwise, same
value as the requested_key
"""
tout = cast(float, self.timeout if timeout == "default" else timeout)
clean_timeout = util.cleanup_timeout(tout)
return self.visalib.lock(
self.session, constants.Lock.shared, clean_timeout, requested_key
)[0]
def lock_excl(self, timeout: Union[float, Literal["default"]] = "default") -> None:
"""Establish an exclusive lock to the resource.
Parameters
----------
timeout : Union[float, Literal["default"]], optional
Absolute time period (in milliseconds) that a resource waits to get
unlocked by the locking session before returning an error.
Defaults to "default" which means use self.timeout.
"""
tout = cast(float, self.timeout if timeout == "default" else timeout)
clean_timeout = util.cleanup_timeout(tout)
self.visalib.lock(self.session, constants.Lock.exclusive, clean_timeout, None)
def unlock(self) -> None:
"""Relinquishes a lock for the specified resource."""
self.visalib.unlock(self.session)
@contextlib.contextmanager
def lock_context(
self,
timeout: Union[float, Literal["default"]] = "default",
requested_key: Optional[str] = "exclusive",
) -> Iterator[Optional[str]]:
"""A context that locks
Parameters
----------
timeout : Union[float, Literal["default"]], optional
Absolute time period (in milliseconds) that a resource waits to get
unlocked by the locking session before returning an error.
Defaults to "default" which means use self.timeout.
requested_key : Optional[str], optional
When using default of 'exclusive' the lock is an exclusive lock.
Otherwise it is the access key for the shared lock or None to
generate a new shared access key.
Yields
------
Optional[str]
The access_key if applicable.
"""
if requested_key == "exclusive":
self.lock_excl(timeout)
access_key = None
else:
access_key = self.lock(timeout, requested_key)
try:
yield access_key
finally:
self.unlock()
Resource.register(constants.InterfaceType.unknown, "")(Resource)
```
#### File: testsuite/keysight_assisted_tests/messagebased_resource_utils.py
```python
import ctypes
import gc
import logging
import time
import pytest
from pyvisa import constants, errors
from pyvisa.constants import EventType, ResourceAttribute
from pyvisa.resources import Resource
from .resource_utils import (
EventAwareResourceTestCaseMixin,
LockableResourceTestCaseMixin,
ResourceTestCase,
)
try:
import numpy as np # type: ignore
except ImportError:
np = None
class EventHandler:
"""Event handler."""
def __init__(self) -> None:
self.event_success = False
self.srq_success = False
self.io_completed = False
self.handle = None
self.session = None
def handle_event(self, session, event_type, event, handle=None):
"""Event handler
Ctypes handler are expected to return an interger.
"""
self.session = session
self.handle = handle
if event_type == EventType.service_request:
self.event_success = True
self.srq_success = True
return 0
if event_type == EventType.io_completion:
self.event_success = True
self.io_completed = True
return 0
else:
self.event_success = True
return 0
def simplified_handler(self, resource, event, handle=None):
"""Simplified handler that can be wrapped."""
self.session = resource.session
self.handle = handle
event_type = event.event_type
if event_type == EventType.service_request:
self.event_success = True
self.srq_success = True
return None
elif event_type == EventType.io_completion:
self.event_success = True
self.io_completed = True
return None
else:
self.event_success = True
return None
class MessagebasedResourceTestCase(ResourceTestCase):
"""Base test case for all message based resources."""
#: Type of resource being tested in this test case.
#: See RESOURCE_ADDRESSES in the __init__.py file of this package for
#: acceptable values
RESOURCE_TYPE = ""
# Any test involving communication involve to first write to glider the
# data then request it to send it back
def setup_method(self):
"""Create a resource using the address matching the type."""
super().setup_method()
self.instr.write_termination = "\n"
self.instr.read_termination = "\n"
self.instr.timeout = 100
def compare_user_handle(self, h1, h2):
"""Function comparing to user handle as passed to a callback.
We need such an indirection because we cannot safely always return
a Python object and most ctypes object do not compare equal.
"""
if isinstance(h1, ctypes.Structure):
return h1 == h2
elif hasattr(h1, "value"):
return h1.value == h2.value
else: # assume an array
return all((i == j for i, j in zip(h1, h2)))
def test_encoding(self):
"""Tets setting the string encoding."""
assert self.instr.encoding == "ascii"
self.instr.encoding = "utf-8"
with pytest.raises(LookupError):
self.instr.encoding = "test"
def test_termchars(self):
"""Test modifying the termchars."""
# Write termination
self.instr.write_termination = "\r\n"
assert self.instr.write_termination == "\r\n"
self.instr.read_termination = "\r\0"
assert self.instr.read_termination == "\r\0"
assert self.instr.get_visa_attribute(ResourceAttribute.termchar) == ord("\0")
assert self.instr.get_visa_attribute(ResourceAttribute.termchar_enabled)
# Disable read termination
self.instr.read_termination = None
assert self.instr.get_visa_attribute(ResourceAttribute.termchar) == ord("\n")
assert not self.instr.get_visa_attribute(ResourceAttribute.termchar_enabled)
# Ban repeated term chars
with pytest.raises(ValueError):
self.instr.read_termination = "\n\n"
def test_write_raw_read_bytes(self):
"""Test writing raw data and reading a specific number of bytes."""
# Reading all bytes at once
self.instr.write_raw(b"RECEIVE\n")
self.instr.write_raw(b"test\n")
count = self.instr.write_raw(b"SEND\n")
assert count == 5
self.instr.flush(constants.VI_READ_BUF)
msg = self.instr.read_bytes(5, chunk_size=2)
assert msg == b"test\n"
# Reading one byte at a time
self.instr.write_raw(b"RECEIVE\n")
self.instr.write_raw(b"test\n")
self.instr.write_raw(b"SEND\n")
for ch in b"test\n":
assert self.instr.read_bytes(1) == ch.to_bytes(1, "little")
# Breaking on termchar
self.instr.read_termination = "\r"
self.instr.write_raw(b"RECEIVE\n")
self.instr.write_raw(b"te\rst\r\n")
self.instr.write_raw(b"SEND\n")
assert self.instr.read_bytes(100, break_on_termchar=True) == b"te\r"
assert self.instr.read_bytes(100, break_on_termchar=True) == b"st\r"
assert self.instr.read_bytes(1) == b"\n"
# Breaking on end of message
self.instr.read_termination = "\n"
self.instr.write_raw(b"RECEIVE\n")
self.instr.write_raw(b"test\n")
self.instr.write_raw(b"SEND\n")
assert self.instr.read_bytes(100, break_on_termchar=True) == b"test\n"
def test_handling_exception_in_read_bytes(self, caplog):
"""Test handling exception in read_bytes (monkeypatching)"""
def false_read(session, size):
raise errors.VisaIOError(constants.VI_ERROR_ABORT)
read = self.instr.visalib.read
self.instr.visalib.read = false_read
with caplog.at_level(logging.DEBUG):
try:
self.instr.read_bytes(1)
except errors.VisaIOError:
pass
finally:
self.instr.visalib.read = read
assert "- exception while reading:" in caplog.records[1].message
def test_write_raw_read_raw(self):
"""Test writing raw data and reading an answer."""
self.instr.write_raw(b"RECEIVE\n")
self.instr.write_raw(b"test\n")
self.instr.write_raw(b"SEND\n")
assert self.instr.read_raw(size=2) == b"test\n"
def test_clear(self):
"""Test clearing the incoming buffer."""
self.instr.write_raw(b"RECEIVE\n")
self.instr.write_raw(b"test\n")
self.instr.write_raw(b"SEND\n")
self.instr.clear()
self.instr.timeout = 10
with pytest.raises(errors.VisaIOError):
self.instr.read_raw()
def test_write_read(self):
"""Test writing and reading."""
self.instr.write_termination = "\n"
self.instr.read_termination = "\r\n"
self.instr.write("RECEIVE")
with pytest.warns(UserWarning):
self.instr.write("test\r\n")
count = self.instr.write("SEND")
assert count == 5
assert self.instr.read() == "test"
# Missing termination chars
self.instr.read_termination = "\r\n"
self.instr.write("RECEIVE")
self.instr.write("test")
self.instr.write("SEND")
with pytest.warns(Warning):
assert self.instr.read() == "test\n"
# Dynamic termination
self.instr.write_termination = "\r"
self.instr.write("RECEIVE\n", termination=False)
self.instr.write("test\r", termination="\n")
self.instr.write("SEND", termination="\n")
assert self.instr.read(termination="\r") == "test"
# Test query
self.instr.write_termination = "\n"
self.instr.write("RECEIVE")
self.instr.write("test\r")
tic = time.time()
assert self.instr.query("SEND", delay=0.5) == "test"
assert time.time() - tic > 0.49
# Test handling repeated term char
self.instr.read_termination = "\n"
for char in ("\r", None):
self.instr.write_termination = "\n" if char else "\r"
self.instr.write("RECEIVE", termination="\n")
with pytest.warns(Warning):
self.instr.write("test\r", termination=char)
self.instr.write("", termination="\n")
self.instr.write("SEND", termination="\n")
assert self.instr.read() == "test\r\r"
# TODO not sure how to test encoding
def test_handling_exception_in_read_raw(self, caplog):
"""Test handling exception in read_bytes (monkeypatching)"""
def false_read(session, size):
raise errors.VisaIOError(constants.VI_ERROR_ABORT)
read = self.instr.visalib.read
self.instr.visalib.read = false_read
with caplog.at_level(logging.DEBUG):
try:
self.instr.read()
except errors.VisaIOError:
pass
finally:
self.instr.visalib.read = read
assert caplog.records
def test_write_ascii_values(self):
"""Test writing ascii values."""
# Standard separator
values = [1, 2, 3, 4, 5]
self.instr.write("RECEIVE")
count = self.instr.write_ascii_values("", values, "d")
assert count == 10
self.instr.write("SEND")
assert self.instr.read() == "1,2,3,4,5"
# Non standard separator and termination
self.instr.write_termination = "\r"
self.instr.write("RECEIVE", termination="\n")
self.instr.write_ascii_values("", values, "d", separator=";", termination=False)
self.instr.write("", termination="\n")
self.instr.write("SEND", termination="\n")
assert self.instr.read() == "1;2;3;4;5"
# Test handling repeated term char
for char in ("\r", None):
self.instr.write_termination = "\n" if char else "\r"
self.instr.write("RECEIVE", termination="\n")
with pytest.warns(Warning):
values = [1, 2, 3, 4, 5]
self.instr.write_ascii_values(
"\r", values, "s", separator=";", termination=char
)
self.instr.write("", termination="\n")
self.instr.write("SEND", termination="\n")
assert self.instr.read() == "\r1;2;3;4;5\r"
@pytest.mark.parametrize(
"hfmt, prefix", zip(("ieee", "hp", "empty"), (b"#212", b"#A\x0c\x00", b""))
)
def test_write_binary_values(self, hfmt, prefix):
"""Test writing binary data."""
values = [1, 2, 3, 4, 5, 6]
self.instr.write_termination = "\n"
self.instr.write("RECEIVE")
count = self.instr.write_binary_values("", values, "h", header_fmt=hfmt)
# Each interger encoded as h uses 2 bytes
assert count == len(prefix) + 12 + 1
self.instr.write("SEND")
msg = self.instr.read_bytes(13 + len(prefix))
assert msg == prefix + b"\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x00\n"
if hfmt == "hp":
fl_prefix = prefix[0:2] + prefix[-2::][::-1]
else:
fl_prefix = prefix
self.instr.write_termination = "\r"
self.instr.write("RECEIVE", termination="\n")
self.instr.write_binary_values(
"", values, "h", is_big_endian=True, termination=False, header_fmt=hfmt
)
self.instr.write("", termination="\n")
self.instr.write("SEND", termination="\n")
assert (
self.instr.read_bytes(13 + len(prefix))
== fl_prefix + b"\x00\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\n"
)
# Test handling repeated term char
for char in ("\r", None):
self.instr.write_termination = "\n" if char else "\r"
self.instr.write("RECEIVE", termination="\n")
with pytest.warns(Warning):
self.instr.write_binary_values(
"\r", values, "h", header_fmt=hfmt, termination=char
)
self.instr.write("", termination="\n")
self.instr.write("SEND", termination="\n")
msg = self.instr.read()
assert (
msg
== "\r"
+ prefix.decode("ascii")
+ "\x01\x00\x02\x00\x03\x00\x04\x00\x05\x00\x06\x00\r"
)
# Wrong header format
with pytest.raises(ValueError):
self.instr.write_binary_values("", values, "h", header_fmt="zxz")
def test_read_ascii_values(self):
"""Test reading ascii values."""
# Standard separator
self.instr.write("RECEIVE")
self.instr.write("1,2,3,4,5")
self.instr.write("SEND")
values = self.instr.read_ascii_values()
assert type(values[0]) is float
assert values == [1.0, 2.0, 3.0, 4.0, 5.0]
# Non standard separator and termination
self.instr.write("RECEIVE")
self.instr.write("1;2;3;4;5")
tic = time.time()
values = self.instr.query_ascii_values(
"SEND", converter="d", separator=";", delay=0.5
)
assert time.time() - tic > 0.5
assert type(values[0]) is int
assert values == [1, 2, 3, 4, 5]
# Numpy container
if np:
self.instr.write("RECEIVE")
self.instr.write("1,2,3,4,5")
self.instr.write("SEND")
values = self.instr.read_ascii_values(container=np.array)
expected = np.array([1.0, 2.0, 3.0, 4.0, 5.0])
assert values.dtype is expected.dtype
np.testing.assert_array_equal(values, expected)
@pytest.mark.parametrize("hfmt", ("ieee", "hp"))
def test_read_binary_values(self, hfmt):
"""Test reading binary data."""
# TODO test handling binary decoding issue (troublesome)
self.instr.read_termination = "\r"
# 3328 in binary short is \x00\r this way we can interrupt the
# transmission midway to test some corner cases
data = [1, 2, 3328, 3, 4, 5, 6, 7]
self.instr.write("RECEIVE")
self.instr.write_binary_values(
"", data, "h", header_fmt=hfmt, termination="\r\n"
)
self.instr.write("SEND")
new = self.instr.read_binary_values(
datatype="h",
is_big_endian=False,
header_fmt=hfmt,
expect_termination=True,
chunk_size=8,
)
self.instr.read_bytes(1)
assert data == new
self.instr.write("RECEIVE")
self.instr.write_binary_values(
"", data, "h", header_fmt=hfmt, is_big_endian=True
)
new = self.instr.query_binary_values(
"SEND",
datatype="h",
header_fmt=hfmt,
is_big_endian=True,
expect_termination=False,
chunk_size=8,
container=np.array if np else list,
)
self.instr.read_bytes(1)
if np:
np.testing.assert_array_equal(new, np.array(data, dtype=np.int16))
else:
assert data == new
def test_read_query_binary_values_invalid_header(self):
"""Test we properly handle an invalid header."""
data = [1, 2, 3328, 3, 4, 5, 6, 7]
self.instr.write("RECEIVE")
self.instr.write_binary_values(
"", data, "h", header_fmt="ieee", is_big_endian=True
)
self.instr.write("SEND")
with pytest.raises(ValueError):
self.instr.read_binary_values(
datatype="h",
is_big_endian=False,
header_fmt="invalid",
expect_termination=True,
chunk_size=8,
)
self.instr.write("RECEIVE")
self.instr.write_binary_values(
"", data, "h", header_fmt="ieee", is_big_endian=True
)
with pytest.raises(ValueError):
self.instr.query_binary_values(
"*IDN",
datatype="h",
is_big_endian=False,
header_fmt="invalid",
expect_termination=True,
chunk_size=8,
)
# Not sure how to test this
@pytest.mark.skip
def test_handling_malformed_binary(self):
""""""
pass
@pytest.mark.parametrize(
"hfmt, header", zip(("ieee", "hp", "empty"), ("#10", "#A\x00\x00", ""))
)
def test_read_binary_values_unreported_length(self, hfmt, header):
"""Test reading binary data."""
self.instr.read_termination = "\r"
# 3328 in binary short is \x00\r this way we can interrupt the
# transmission midway to test some corner cases
data = [1, 2, 3328, 3, 4, 5]
self.instr.write("RECEIVE")
self.instr.write(
header + "\x01\x00\x02\x00\x00\r\x03\x00\x04\x00\x05\x00",
termination="\r\n",
)
self.instr.write("SEND")
new = self.instr.read_binary_values(
datatype="h",
is_big_endian=False,
header_fmt=hfmt,
expect_termination=True,
chunk_size=6,
data_points=6,
)
self.instr.read_bytes(1)
assert data == new
self.instr.write("RECEIVE")
self.instr.write(
header + "\x00\x01\x00\x02\r\x00\x00\x03\x00\x04\x00\x05",
termination="\r\n",
)
new = self.instr.query_binary_values(
"SEND",
datatype="h",
header_fmt=hfmt,
is_big_endian=True,
expect_termination=False,
chunk_size=6,
container=np.array if np else list,
data_points=6,
)
self.instr.read_bytes(1)
if np:
np.testing.assert_array_equal(new, np.array(data, dtype=np.int16))
else:
assert data == new
# Check we do error on unreported/unspecified length
self.instr.write("RECEIVE")
self.instr.write(
header + "\x01\x00\x02\x00\x00\r\x03\x00\x04\x00\x05\x00",
termination="\r\n",
)
self.instr.write("SEND")
with pytest.raises(ValueError):
self.instr.read_binary_values(
datatype="h",
is_big_endian=False,
header_fmt=hfmt,
expect_termination=True,
chunk_size=6,
)
def test_delay_in_query_ascii(self):
"""Test handling of the delay argument in query_ascii_values."""
# Test using the instrument wide delay
self.instr.query_delay = 1.0
self.instr.write("RECEIVE")
self.instr.write("1,2,3,4,5")
tic = time.perf_counter()
values = self.instr.query_ascii_values("SEND")
assert time.perf_counter() - tic > 0.99
assert type(values[0]) is float
assert values == [1.0, 2.0, 3.0, 4.0, 5.0]
# Test specifying the delay
self.instr.query_delay = 0.0
self.instr.write("RECEIVE")
self.instr.write("1,2,3,4,5")
tic = time.perf_counter()
values = self.instr.query_ascii_values("SEND", delay=1.0)
assert time.perf_counter() - tic > 0.99
assert type(values[0]) is float
assert values == [1.0, 2.0, 3.0, 4.0, 5.0]
# Test specifying a 0 delay
self.instr.query_delay = 1.0
self.instr.write("RECEIVE")
self.instr.write("1,2,3,4,5")
tic = time.perf_counter()
values = self.instr.query_ascii_values("SEND", delay=0.0)
assert time.perf_counter() - tic < 0.99
assert type(values[0]) is float
assert values == [1.0, 2.0, 3.0, 4.0, 5.0]
def test_instrument_wide_delay_in_query_binary(self):
"""Test handling delay in query_ascii_values."""
header = "#10"
data = [1, 2, 3328, 3, 4, 5]
# Test using the instrument wide delay
self.instr.query_delay = 1.0
self.instr.write("RECEIVE")
self.instr.write(
header + "\x00\x01\x00\x02\r\x00\x00\x03\x00\x04\x00\x05",
termination="\r\n",
)
tic = time.perf_counter()
new = self.instr.query_binary_values(
"SEND",
datatype="h",
header_fmt="ieee",
is_big_endian=True,
expect_termination=False,
chunk_size=6,
data_points=6,
)
assert time.perf_counter() - tic > 0.99
assert data == new
def test_delay_args_in_query_binary(self):
"""Test handling of the delay argument in query_ascii_values."""
header = "#10"
data = [1, 2, 3328, 3, 4, 5]
self.instr.query_delay = 0.0
self.instr.write("RECEIVE")
self.instr.write(
header + "\x00\x01\x00\x02\r\x00\x00\x03\x00\x04\x00\x05",
termination="\r\n",
)
tic = time.perf_counter()
new = self.instr.query_binary_values(
"SEND",
datatype="h",
header_fmt="ieee",
is_big_endian=True,
expect_termination=False,
chunk_size=6,
data_points=6,
delay=1.0,
)
assert time.perf_counter() - tic > 0.99
assert data == new
def test_no_delay_args_in_query_binary(self):
"""Test handling of the delay argument in query_ascii_values."""
header = "#10"
data = [1, 2, 3328, 3, 4, 5]
self.instr.query_delay = 1.0
self.instr.write("RECEIVE")
self.instr.write(
header + "\x00\x01\x00\x02\r\x00\x00\x03\x00\x04\x00\x05",
termination="\r\n",
)
tic = time.perf_counter()
new = self.instr.query_binary_values(
"SEND",
datatype="h",
header_fmt="ieee",
is_big_endian=True,
expect_termination=False,
chunk_size=6,
data_points=6,
delay=0.0,
)
assert time.perf_counter() - tic < 1.0
assert data == new
def test_stb(self):
"""Test reading the status byte."""
assert 0 <= self.instr.stb <= 256
assert 0 <= self.instr.read_stb() <= 256
class EventAwareMessagebasedResourceTestCaseMixin(EventAwareResourceTestCaseMixin):
"""Mixin for message based resources supporting events."""
def test_manually_called_handlers(self):
"""Test calling manually even handler."""
class FalseResource(Resource):
session = None
visalib = None
_session = None
def __init__(self):
pass
fres = FalseResource()
fres2 = FalseResource()
fres2.session = 1
handler = EventHandler()
false_wrapped_handler = fres.wrap_handler(handler.simplified_handler)
false_wrapped_handler(None, EventType.clear, 1, 1)
assert handler.event_success
with pytest.raises(RuntimeError):
false_wrapped_handler(1, EventType.clear, 1, 1)
def test_handling_invalid_handler(self):
"""Test handling an error related to a wrong handler type."""
with pytest.raises(errors.VisaTypeError):
event_type = EventType.exception
self.instr.install_handler(event_type, 1, object())
def test_uninstalling_missing_visa_handler(self):
"""Test uninstalling a visa handler that was not registered."""
handler1 = EventHandler()
handler2 = EventHandler()
event_type = EventType.exception
self.instr.install_handler(event_type, handler1.handle_event)
with pytest.raises(errors.UnknownHandler):
self.instr.uninstall_handler(event_type, handler2.handle_event)
self.instr.uninstall_handler(event_type, handler1.handle_event)
with pytest.raises(errors.UnknownHandler):
self.instr.uninstall_handler(event_type, handler2.handle_event)
def test_handler_clean_up_on_resource_del(self):
"""Test that handlers are properly cleaned when a resource is deleted."""
handler = EventHandler()
event_type = EventType.exception
self.instr.install_handler(event_type, handler.handle_event)
self.instr = None
gc.collect()
assert not self.rm.visalib.handlers
def test_uninstall_all_handlers(self):
"""Test uninstall all handlers from all sessions."""
handler = EventHandler()
event_type = EventType.exception
self.instr.install_handler(event_type, handler.handle_event)
self.rm.visalib.uninstall_all_visa_handlers(None)
assert not self.rm.visalib.handlers
def test_manual_async_read(self):
"""Test handling IOCompletion event which has extra attributes."""
# Prepare message
self.instr.write_raw(b"RECEIVE\n")
self.instr.write_raw(b"test\n")
self.instr.write_raw(b"SEND\n")
# Enable event handling
event_type = EventType.io_completion
event_mech = constants.EventMechanism.queue
wait_time = 2000 # set time that program waits to receive event
self.instr.enable_event(event_type, event_mech, None)
try:
visalib = self.instr.visalib
buffer, job_id, status_code = visalib.read_asynchronously(
self.instr.session, 10
)
assert buffer is visalib.get_buffer_from_id(job_id)
response = self.instr.wait_on_event(event_type, wait_time)
finally:
self.instr.disable_event(event_type, event_mech)
assert response.event.status == constants.StatusCode.success
assert bytes(buffer) == bytes(response.event.buffer)
assert bytes(response.event.data) == b"test\n"
assert response.event.return_count == 5
assert response.event.operation_name == "viReadAsync"
def test_getting_unknown_buffer(self):
"""Test getting a buffer with a wrong ID."""
assert self.instr.visalib.get_buffer_from_id(1) is None
def test_wait_on_event_timeout(self):
"""Test waiting on a VISA event."""
event_type = EventType.service_request
event_mech = constants.EventMechanism.queue
# Emit a clear to avoid dealing with previous requests
self.instr.clear()
self.instr.enable_event(event_type, event_mech, None)
try:
response = self.instr.wait_on_event(event_type, 10, capture_timeout=True)
finally:
self.instr.disable_event(event_type, event_mech)
assert response.timed_out
assert response.event.event_type == event_type
with pytest.raises(errors.VisaIOError):
self.instr.enable_event(event_type, event_mech, None)
try:
response = self.instr.wait_on_event(event_type, 10)
finally:
self.instr.disable_event(event_type, event_mech)
def test_wait_on_event(self):
"""Test waiting on a VISA event."""
event_type = EventType.service_request
event_mech = constants.EventMechanism.queue
wait_time = 2000 # set time that program waits to receive event
self.instr.enable_event(event_type, event_mech, None)
self.instr.write("RCVSLOWSRQ")
self.instr.write("1")
self.instr.write("SENDSLOWSRQ")
try:
response = self.instr.wait_on_event(event_type, wait_time)
finally:
self.instr.disable_event(event_type, event_mech)
assert not response.timed_out
assert response.event.event_type == EventType.service_request
assert self.instr.read() == "1"
with pytest.warns(FutureWarning):
response.event_type
with pytest.warns(FutureWarning):
response.context
def test_managing_visa_handler(self):
"""Test using visa handlers."""
def _test(handle):
handler = EventHandler()
event_type = EventType.service_request
event_mech = constants.EventMechanism.handler
user_handle = self.instr.install_handler(
event_type, handler.handle_event, user_handle=handle
)
self.instr.enable_event(event_type, event_mech, None)
self.instr.write("RCVSLOWSRQ")
self.instr.write("1")
self.instr.write("SENDSLOWSRQ")
try:
t1 = time.time()
while not handler.event_success:
if (time.time() - t1) > 2:
break
time.sleep(0.1)
finally:
self.instr.disable_event(event_type, event_mech)
self.instr.uninstall_handler(
event_type, handler.handle_event, user_handle
)
assert handler.session == self.instr.session
assert self.compare_user_handle(handler.handle, user_handle)
assert handler.srq_success
assert self.instr.read() == "1"
self.instr.clear()
class Point(ctypes.Structure):
_fields_ = [("x", ctypes.c_int), ("y", ctypes.c_int)]
def __eq__(self, other):
if type(self) is not type(other):
return False
return self.x == other.x and self.y == other.y
for handle in (1, 1.0, "1", [1], [1.0], Point(1, 2)):
print(handle)
_test(handle)
def test_wrapping_handler(self):
"""Test wrapping a handler using a Resource."""
handler = EventHandler()
event_type = EventType.service_request
event_mech = constants.EventMechanism.handler
wrapped_handler = self.instr.wrap_handler(handler.simplified_handler)
user_handle = self.instr.install_handler(event_type, wrapped_handler, 1)
self.instr.enable_event(event_type, event_mech, None)
self.instr.write("RCVSLOWSRQ")
self.instr.write("1")
self.instr.write("SENDSLOWSRQ")
try:
t1 = time.time()
while not handler.event_success:
if (time.time() - t1) > 2:
break
time.sleep(0.1)
finally:
self.instr.disable_event(event_type, event_mech)
self.instr.uninstall_handler(event_type, wrapped_handler, user_handle)
assert self.instr.session == handler.session
assert self.compare_user_handle(handler.handle, user_handle)
assert handler.srq_success
assert self.instr.read() == "1"
def test_bare_handler(self):
"""Test using a bare handler passing raw backend values."""
from pyvisa import ctwrapper
if not isinstance(self.instr.visalib, ctwrapper.IVIVisaLibrary):
return
ctwrapper.WRAP_HANDLER = False
try:
handler = EventHandler()
event_type = EventType.service_request
event_mech = constants.EventMechanism.handler
user_handle = self.instr.install_handler(
event_type, handler.handle_event, 1
)
self.instr.enable_event(event_type, event_mech, None)
self.instr.write("RCVSLOWSRQ")
self.instr.write("1")
self.instr.write("SENDSLOWSRQ")
try:
t1 = time.time()
while not handler.event_success:
if (time.time() - t1) > 2:
break
time.sleep(0.1)
finally:
self.instr.disable_event(event_type, event_mech)
self.instr.uninstall_handler(
event_type, handler.handle_event, user_handle
)
assert self.instr.session == handler.session.value
assert self.compare_user_handle(handler.handle.contents, user_handle)
assert handler.srq_success
assert self.instr.read() == "1"
finally:
ctwrapper.WRAP_HANDLER = True
class LockableMessagedBasedResourceTestCaseMixin(LockableResourceTestCaseMixin):
"""Mixing for message based resources supporting locking."""
def test_shared_locking(self):
"""Test locking/unlocking a resource."""
instr2 = self.rm.open_resource(str(self.rname))
instr3 = self.rm.open_resource(str(self.rname))
key = self.instr.lock()
instr2.lock(requested_key=key)
assert self.instr.query("*IDN?")
assert instr2.query("*IDN?")
with pytest.raises(errors.VisaIOError):
instr3.query("*IDN?")
# Share the lock for a limited time
with instr3.lock_context(requested_key=key) as key2:
assert instr3.query("*IDN?")
assert key == key2
# Stop sharing the lock
instr2.unlock()
with pytest.raises(errors.VisaIOError):
instr2.query("*IDN?")
with pytest.raises(errors.VisaIOError):
instr3.query("*IDN?")
self.instr.unlock()
assert instr3.query("*IDN?")
def test_exclusive_locking(self):
"""Test locking/unlocking a resource."""
instr2 = self.rm.open_resource(str(self.rname))
self.instr.lock_excl()
with pytest.raises(errors.VisaIOError):
instr2.query("*IDN?")
self.instr.unlock()
assert instr2.query("*IDN?")
# Share the lock for a limited time
with self.instr.lock_context(requested_key="exclusive") as key:
assert key is None
with pytest.raises(errors.VisaIOError):
instr2.query("*IDN?")
```
#### File: testsuite/keysight_assisted_tests/test_shell.py
```python
import os
import time
from contextlib import redirect_stdout
from io import StringIO
from subprocess import PIPE, Popen
from threading import Event, Lock, Thread
from pyvisa import constants, errors
from pyvisa.resources import Resource
from pyvisa.rname import to_canonical_name
from pyvisa.shell import VisaShell
from .. import BaseTestCase
from . import ALIASES, RESOURCE_ADDRESSES, require_virtual_instr
class SubprocessOutputPoller:
"""Continuously check the stdout of a subprocess."""
def __init__(self, process):
super().__init__()
self.process = process
self._lines = []
self._lines_lock = Lock()
self._last_seen = time.monotonic()
self.data_ready = Event()
self._polling_thread = Thread(target=self.poll_stdout)
self._ready_thread = Thread(target=self.check_ready)
# Start background threads
self._polling_thread.start()
self._ready_thread.start()
def poll_stdout(self):
"""Continously read stdout and update the lines.
When no new data arrive after 1s consider that the data are ready.
"""
for line in iter(self.process.stdout.readline, b""):
with self._lines_lock:
self._lines.append(line.rstrip())
self._last_seen = time.monotonic()
def check_ready(self):
"""Check if we got complete data."""
while True:
time.sleep(0.05)
if self._lines and time.monotonic() - self._last_seen > 0.5:
self.data_ready.set()
if not self._polling_thread.is_alive():
break
def get_lines(self):
"""Get the collected lines."""
with self._lines_lock:
lines = self._lines
self._lines = []
self.data_ready.clear()
return lines
def shutdown(self):
"""Wait for threads to die after the process is done."""
self._polling_thread.join()
self._ready_thread.join()
@require_virtual_instr
class TestVisaShell(BaseTestCase):
"""Test the VISA shell."""
def setup_method(self):
"""Start the shell in a subprocess."""
os.environ["COVERAGE_PROCESS_START"] = ".coveragerc"
self.shell = Popen(["pyvisa-shell"], stdin=PIPE, stdout=PIPE)
self.reader = SubprocessOutputPoller(self.shell)
self.reader.data_ready.wait(1)
self.reader.get_lines()
def open_resource(self):
lines = self.communicate(f"open {list(RESOURCE_ADDRESSES.values())[0]}")
assert b"has been opened." in lines[0]
def communicate(self, msg):
"""Write a message on stdin and collect the answer."""
self.shell.stdin.write(msg.encode("ascii") + b"\n")
self.shell.stdin.flush()
self.reader.data_ready.wait(1)
return self.reader.get_lines()
def teardown_method(self):
if self.shell:
self.shell.stdin.write(b"exit\n")
self.shell.stdin.flush()
self.shell.stdin.close()
self.shell.terminate()
self.shell.wait(0.1)
self.reader.shutdown()
def test_complete_open(self):
"""Test providing auto-completion for open."""
shell = VisaShell()
completions = shell.complete_open("TCPIP", 0, 0, 0)
assert to_canonical_name(RESOURCE_ADDRESSES["TCPIP::INSTR"]) in completions
# Test getting an alias from the completion
completions = shell.complete_open("tcp", 0, 0, 0)
assert "tcpip" in completions
def test_list(self):
"""Test listing the connected resources."""
lines = self.communicate("list")
msg = []
for i, rsc in enumerate(RESOURCE_ADDRESSES.values()):
if not rsc.endswith("INSTR"):
continue
msg.append(f"({i:2d}) {to_canonical_name(rsc)}")
if rsc in ALIASES:
msg.append(f" alias: {ALIASES[rsc]}")
print(lines, msg)
for m in msg:
assert any(m.encode("ascii") in line for line in lines)
# TODO fix argument handling to allow filtering
def test_list_handle_error(self):
"""Test handling an error in listing resources."""
shell = VisaShell()
shell.resource_manager = None
temp_stdout = StringIO()
with redirect_stdout(temp_stdout):
shell.do_list("")
output = temp_stdout.getvalue()
assert "no attribute" in output
def test_open_no_args(self):
"""Test opening without any argument."""
lines = self.communicate("open")
assert b"A resource name must be specified." in lines[0]
def test_open_by_number(self):
"""Test opening based on the index of the resource."""
lines = self.communicate("open 0")
assert b'Not a valid resource number. Use the command "list".' in lines[0]
lines = self.communicate("list")
lines = self.communicate("open 0")
rsc = list(RESOURCE_ADDRESSES.values())[0]
assert f"{to_canonical_name(rsc)} has been opened.".encode("ascii") in lines[0]
lines = self.communicate("open 0")
assert (
b"You can only open one resource at a time. "
b"Please close the current one first."
) in lines[0]
def test_open_by_address(self):
"""Test opening based on the resource address."""
rsc = list(RESOURCE_ADDRESSES.values())[0]
lines = self.communicate(f"open {rsc}")
assert f"{rsc} has been opened.".encode("ascii") in lines[0]
def test_open_handle_exception(self):
"""Test handling an exception during opening."""
lines = self.communicate('open ""')
assert b"VI_ERROR_INV_RSRC_NAME" in lines[0]
def test_handle_double_open(self):
"""Test handling before closing resource."""
rsc = list(RESOURCE_ADDRESSES.values())[0]
lines = self.communicate(f"open {rsc}")
lines = self.communicate(f"open {rsc}")
assert (
b"You can only open one resource at a time. "
b"Please close the current one first."
) in lines[0]
def test_command_on_closed_resource(self):
"""Test all the commands that cannot be run without opening a resource."""
for cmd in ("close", "write", "read", "query", "termchar", "timeout", "attr"):
lines = self.communicate(cmd)
assert b'There are no resources in use. Use the command "open".' in lines[0]
def test_close(self):
"""Test closing a resource."""
rsc = list(RESOURCE_ADDRESSES.values())[0]
lines = self.communicate(f"open {rsc}")
assert b"has been opened." in lines[0]
lines = self.communicate("close")
assert b"The resource has been closed." in lines[0]
lines = self.communicate(f"open {rsc}")
assert b"has been opened." in lines[0]
def test_close_handle_error(self):
"""Test handling an error while closing."""
shell = VisaShell()
shell.current = True
temp_stdout = StringIO()
with redirect_stdout(temp_stdout):
shell.do_close("")
output = temp_stdout.getvalue()
assert "no attribute" in output
def test_query(self):
"""querying a value from the instrument."""
self.open_resource()
lines = self.communicate("query *IDN?")
assert b"Response:" in lines[0]
def test_query_handle_error(self):
"""Test handling an error in query."""
shell = VisaShell()
shell.current = True
temp_stdout = StringIO()
with redirect_stdout(temp_stdout):
shell.do_query("")
output = temp_stdout.getvalue()
assert "no attribute" in output
def test_read_write(self):
"""Test writing/reading values from the resource."""
self.open_resource()
lines = self.communicate("write *IDN?")
lines = self.communicate("read")
assert b"Keysight " in lines[0]
def test_read_handle_error(self):
"""Test handling an error in read."""
shell = VisaShell()
shell.current = True
temp_stdout = StringIO()
with redirect_stdout(temp_stdout):
shell.do_read("")
output = temp_stdout.getvalue()
assert "no attribute" in output
def test_write_handle_error(self):
"""Test handling an error in write."""
shell = VisaShell()
shell.current = True
temp_stdout = StringIO()
with redirect_stdout(temp_stdout):
shell.do_write("")
output = temp_stdout.getvalue()
assert "no attribute" in output
def test_timeout_get(self):
"""Test accessing the timeout."""
self.open_resource()
lines = self.communicate("timeout")
assert b"Timeout: " in lines[0]
def test_timeout_get_handle_error(self):
"""Test handling an error in getting teh timeout."""
shell = VisaShell()
shell.current = True
temp_stdout = StringIO()
with redirect_stdout(temp_stdout):
shell.do_timeout("")
output = temp_stdout.getvalue()
assert "no attribute" in output
def test_timeout_set(self):
"""Test setting the timeout."""
self.open_resource()
lines = self.communicate("timeout 1000")
assert b"Done" in lines[0]
lines = self.communicate("timeout")
assert b"Timeout: 1000ms" in lines[0]
def test_timeout_set_handle_error(self):
"""Test handling an error in setting the timeout"""
shell = VisaShell()
shell.current = True
temp_stdout = StringIO()
with redirect_stdout(temp_stdout):
shell.do_timeout("1000")
output = temp_stdout.getvalue()
assert "no attribute" in output
def test_print_attr_list(self):
"""Test printing attribute list."""
class FalseResource:
@classmethod
def get_visa_attribute(cls, id):
if id == constants.VI_ATTR_TMO_VALUE:
raise errors.VisaIOError(constants.VI_ERROR_NSUP_ATTR)
elif id == constants.VI_ATTR_INTF_NUM:
raise Exception("Long text: aaaaaaaaaaaaaaaaaaaa")
else:
raise Exception("Test")
FalseResource.visa_attributes_classes = Resource.visa_attributes_classes
shell = VisaShell()
shell.current = FalseResource
temp_stdout = StringIO()
with redirect_stdout(temp_stdout):
shell.print_attribute_list()
output = temp_stdout.getvalue()
assert "Long text:..." in output
def test_attr_no_args(self):
"""Test getting the list of attributes"""
self.open_resource()
lines = self.communicate("attr")
assert b"VISA name" in lines[1]
def test_attr_too_many_args(self):
"""Test handling wrong args to attr."""
self.open_resource()
lines = self.communicate("attr 1 2 3")
assert (
b"Invalid syntax, use `attr <name>` to get;"
b" or `attr <name> <value>` to set" in lines[0]
)
def test_issue_in_getting_attr(self):
"""Test handling exception in getting an attribute."""
shell = VisaShell()
shell.do_open(list(RESOURCE_ADDRESSES.values())[0])
def broken_get_visa_attribute(self, name=""):
raise Exception("Exception")
# Issue on VI_
old = Resource.get_visa_attribute
Resource.get_visa_attribute = broken_get_visa_attribute
try:
temp_stdout = StringIO()
with redirect_stdout(temp_stdout):
try:
shell.do_attr("VI_ATTR_TERMCHAR")
finally:
Resource.get_visa_attribute = old
output = temp_stdout.getvalue()
assert "Exception" in output
finally:
Resource.get_visa_attribute = old
# Issue on aliased attr
old = type(shell.current).allow_dma
type(shell.current).allow_dma = property(broken_get_visa_attribute)
try:
temp_stdout = StringIO()
with redirect_stdout(temp_stdout):
shell.do_attr("allow_dma")
output = temp_stdout.getvalue()
assert "Exception" in output
finally:
type(shell.current).allow_dma = old
def test_attr_get_set_by_VI_non_boolean(self):
"""Test getting/setting an attr using the VI_ name (int value)"""
self.open_resource()
msg = "attr VI_ATTR_TERMCHAR {}".format(ord("\r"))
lines = self.communicate(msg)
assert b"Done" in lines[0]
lines = self.communicate("attr VI_ATTR_TERMCHAR")
assert str(ord("\r")) in lines[0].decode("ascii")
def test_attr_get_set_by_VI_boolean(self):
"""Test getting/setting an attr using the VI_ name (bool value)"""
self.open_resource()
for v in (False, True):
msg = f"attr VI_ATTR_TERMCHAR_EN {v}"
lines = self.communicate(msg)
assert b"Done" in lines[0]
lines = self.communicate("attr VI_ATTR_TERMCHAR_EN")
assert str(int(v)).encode("ascii") in lines[0]
def test_attr_get_by_VI_handle_error(self):
"""Test accessing an attr by an unknown VI name."""
self.open_resource()
lines = self.communicate("attr VI_test")
assert b"no attribute" in lines[0]
def test_attr_get_by_name(self):
"""Test accessing an attr by Python name."""
self.open_resource()
lines = self.communicate("attr allow_dma")
assert b"True" in lines[0] or b"False" in lines[0]
def test_attr_get_by_name_handle_error(self):
"""Test accessing an attr by an unknown Python name."""
self.open_resource()
lines = self.communicate("attr test")
assert b"no attribute" in lines[0]
def test_attr_set_by_VI_handle_error_unknown_attr(self):
"""Test handling issue in setting VI attr which does not exist."""
self.open_resource()
lines = self.communicate("attr VI_test test")
assert b"no attribute" in lines[0]
def test_attr_set_by_VI_handle_error_non_boolean(self):
"""Test handling issue in setting VI attr. (non boolean value)"""
self.open_resource()
msg = "attr VI_ATTR_TERMCHAR_EN Test"
lines = self.communicate(msg)
assert b"Error" in lines[0]
def test_attr_set_by_VI_handle_error_non_interger(self):
"""Test handling issue in setting VI attr. (non integer value)"""
self.open_resource()
msg = "attr VI_ATTR_TERMCHAR Test"
lines = self.communicate(msg)
assert b"Error" in lines[0]
def test_attr_set_by_VI_handle_error_wrong_value(self):
"""Test handling issue in setting VI attr by name. (wrong value)"""
self.open_resource()
msg = "attr VI_ATTR_TERMCHAR -1"
lines = self.communicate(msg)
assert b"VI_ERROR_NSUP_ATTR_STATE" in lines[0]
def test_attr_set_by_name_handle_error(self):
"""Test handling attempt to set attr by name (which is not supported)."""
self.open_resource()
msg = "attr allow_dma Test"
lines = self.communicate(msg)
assert (
b"Setting Resource Attributes by python name is not yet "
b"supported." in lines[0]
)
def test_complete_attr(self):
"""Test providing auto-completion for attrs."""
shell = VisaShell()
shell.do_open(list(RESOURCE_ADDRESSES.values())[0])
completions = shell.complete_attr("VI_ATTR_TERM", 0, 0, 0)
assert "VI_ATTR_TERMCHAR" in completions
assert "VI_ATTR_TERMCHAR_EN" in completions
completions = shell.complete_attr("allow_d", 0, 0, 0)
assert "allow_dma" in completions
def test_termchar_get_handle_error(self):
"""Test handling error when getting the termchars."""
shell = VisaShell()
shell.current = True
temp_stdout = StringIO()
with redirect_stdout(temp_stdout):
shell.do_termchar("")
output = temp_stdout.getvalue()
assert "no attribute" in output
def test_getting_termchar_absent_mapping(self):
"""Test getting a termchar that does not map to something with a representation."""
shell = VisaShell()
shell.do_open(list(RESOURCE_ADDRESSES.values())[0])
shell.current.read_termination = "X"
shell.current.write_termination = "Z"
temp_stdout = StringIO()
with redirect_stdout(temp_stdout):
shell.do_termchar("")
output = temp_stdout.getvalue()
assert "Termchar read: X write: Z" == output.split("\n")[0]
def test_termchar_get_set_both_identical(self):
"""Test setting both termchars to the same value."""
self.open_resource()
lines = self.communicate("termchar CR")
assert b"Done" in lines[0]
lines = self.communicate("termchar")
assert b"Termchar read: CR write: CR" in lines[0]
def test_termchar_get_set_both_different(self):
"""Test setting both termchars to different values."""
self.open_resource()
lines = self.communicate("termchar CR NUL")
assert b"Done" in lines[0]
lines = self.communicate("termchar")
assert b"Termchar read: CR write: NUL" in lines[0]
def test_termchar_set_too_many_args(self):
"""Test handling to many termchars to termchar."""
self.open_resource()
lines = self.communicate("termchar 1 2 3")
assert b"Invalid syntax" in lines[0]
def test_termchar_set_handle_error_wrong_value(self):
"""Test handling wrong value in setting termchar."""
self.open_resource()
lines = self.communicate("termchar tt")
assert b"use CR, LF, CRLF, NUL or None to set termchar" in lines[0]
def test_termchar_set_handle_error(self):
"""Test handling an error in setting the termchars."""
shell = VisaShell()
shell.current = True
temp_stdout = StringIO()
with redirect_stdout(temp_stdout):
shell.do_termchar("CR")
output = temp_stdout.getvalue()
assert "no attribute" in output
def test_eof(self):
"""Test handling an EOF."""
shell = VisaShell()
assert shell.do_EOF(None)
```
#### File: pyvisa/testsuite/test_env_var_handling.py
```python
import os
import sys
from subprocess import PIPE, Popen
from . import BaseTestCase
class TestEnvVarHandling(BaseTestCase):
"""Test reading env vars"""
def test_reading_wrap_handler(self):
with Popen([sys.executable], stdin=PIPE, stdout=PIPE) as p:
stdout, _ = p.communicate(
b"from pyvisa import ctwrapper;print(ctwrapper.WRAP_HANDLER);exit()"
)
assert b"True" == stdout.rstrip()
env = os.environ.copy()
env["PYVISA_WRAP_HANDLER"] = "0"
with Popen([sys.executable], stdin=PIPE, stdout=PIPE, env=env) as p:
stdout, _ = p.communicate(
b"from pyvisa import ctwrapper;print(ctwrapper.WRAP_HANDLER);exit()"
)
assert b"False" == stdout.rstrip()
``` |
{
"source": "jpserra/vim-ultest",
"score": 3
} |
#### File: ultest/models/base.py
```python
import json
from dataclasses import asdict, dataclass
from typing import List
@dataclass
class BasePosition:
id: str
name: str
file: str
line: int
col: int
running: int
namespaces: List[str]
type: str
def __str__(self):
props = self.dict()
props["name"] = [int(char) for char in self.name.encode()]
return json.dumps(props)
def dict(self):
return asdict(self)
```
#### File: ultest/models/result.py
```python
import json
from dataclasses import asdict, dataclass
@dataclass
class Result:
id: str
file: str
code: int
output: str
def __str__(self):
props = self.dict()
return json.dumps(props)
def dict(self):
return asdict(self)
```
#### File: tests/mocks/__init__.py
```python
import os
from typing import List
def get_output(runner: str) -> List[str]:
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, "test_outputs", runner)
with open(filename) as output:
return output.readlines()
def get_test_file(name: str) -> str:
dirname = os.path.dirname(__file__)
return os.path.join(dirname, "test_files", name)
``` |
{
"source": "jpsety/test",
"score": 3
} |
#### File: test/circuitgraph/io.py
```python
import re
import os
from glob import glob
from pyeda.parsing import boolexpr
from circuitgraph import Circuit
class SequentialElement:
"""Defines a representation of a sequential element for reading/writing
sequential circuits."""
def __init__(self, name, seq_type, io, code_def):
"""
Parameters
----------
name: str
Name of the element (the module name)
seq_type: str
The type of sequential element, either 'ff' or 'lat'
io: dict of str:str
The mapping the 'd', 'q', 'clk', and potentially 'r', 's'
ports to the names of the ports on the module
code_def:
The code defining the module, used for writing the circuit
to verilog
"""
self.name = name
self.seq_type = seq_type
self.io = io
self.code_def = code_def
default_seq_types = [
SequentialElement(
name="fflopd",
seq_type="ff",
io={"d": "D", "q": "Q", "clk": "CK"},
code_def="module fflopd(CK, D, Q);\n"
" input CK, D;\n"
" output Q;\n"
" wire CK, D;\n"
" wire Q;\n"
" wire next_state;\n"
" reg qi;\n"
" assign #1 Q = qi;\n"
" assign next_state = D;\n"
" always\n"
" @(posedge CK)\n"
" qi <= next_state;\n"
" initial\n"
" qi <= 1'b0;\n"
"endmodule",
),
SequentialElement(
name="latchdrs",
seq_type="lat",
io={"d": "D", "q": "Q", "clk": "ENA", "r": "R", "s": "S"},
code_def="",
),
]
cadence_seq_types = [
# Note that this is a modified version of CDN_flop with only a synchronous
# reset that always resets to 0
SequentialElement(
name="CDN_flop",
seq_type="ff",
io={"d": "d", "q": "q", "clk": "clk", "r": "srl"},
code_def="module CDN_flop(clk, d, srl, q);\n"
" input clk, d, srl;\n"
" output q;\n"
" wire clk, d, srl;\n"
" wire q;\n"
" reg qi;\n"
" assign #1 q = qi;\n"
" always\n"
" @(posedge clk)\n"
" if (srl)\n"
" qi <= 1'b0;\n"
" else if (sena)\n"
" qi <= d;\n"
" end\n"
" initial\n"
" qi <= 1'b0;\n"
"endmodule",
)
]
def from_file(path, name=None, seq_types=None):
"""
Creates a new `Circuit` from a verilog file.
Parameters
----------
path: str
the path to the file to read from.
name: str
the name of the module to read if different from the filename.
seq_types: list of dicts of str:str
the types of sequential elements in the file.
Returns
-------
Circuit
the parsed circuit.
"""
ext = path.split(".")[-1]
if name is None:
name = path.split("/")[-1].replace(f".{ext}", "")
with open(path, "r") as f:
netlist = f.read()
if ext == "v":
return verilog_to_circuit(netlist, name, seq_types)
elif ext == "bench":
return bench_to_circuit(netlist, name)
else:
raise ValueError(f"extension {ext} not supported")
def from_lib(circuit, name=None):
"""
Creates a new `Circuit` from a netlist in the `../netlists`
folder
Parameters
----------
circuit: the name of the netlist.
name: the module name, if different from the netlist name.
Returns
-------
Circuit
the parsed circuit.
"""
path = glob(f"{os.path.dirname(__file__)}/../netlists/{circuit}.*")[0]
return from_file(path, name)
def bench_to_circuit(bench, name):
"""
Creates a new Circuit from a bench string.
Parameters
----------
bench: str
bench code.
name: str
the module name.
Returns
-------
Circuit
the parsed circuit.
"""
# create circuit
c = Circuit(name=name)
# get inputs
in_regex = r"(?:INPUT|input)\s*\((.+?)\)"
for net_str in re.findall(in_regex, bench, re.DOTALL):
nets = net_str.replace(" ", "").replace("\n", "").replace("\t", "").split(",")
for n in nets:
c.add(n, "input")
# handle gates
regex = r"(\S+)\s*=\s*(NOT|OR|NOR|AND|NAND|XOR|XNOR|not|or|nor|and|nand|not|xor|xnor)\((.+?)\)"
for net, gate, input_str in re.findall(regex, bench):
# parse all nets
inputs = (
input_str.replace(" ", "").replace("\n", "").replace("\t", "").split(",")
)
c.add(net, gate.lower(), fanin=inputs)
# get outputs
in_regex = r"(?:OUTPUT|output)\s*\((.+?)\)"
for net_str in re.findall(in_regex, bench, re.DOTALL):
nets = net_str.replace(" ", "").replace("\n", "").replace("\t", "").split(",")
for n in nets:
c.set_output(n)
return c
def verilog_to_circuit(verilog, name, seq_types=None):
"""
Creates a new Circuit from a verilog string.
Parameters
----------
verilog: str
verilog code.
name: str
the module name.
seq_types: list of dicts of str:str
the sequential element types.
Returns
-------
Circuit
the parsed circuit.
"""
if seq_types is None:
seq_types = default_seq_types
# extract module
regex = rf"module\s+{name}\s*\(.*?\);(.*?)endmodule"
m = re.search(regex, verilog, re.DOTALL)
module = m.group(1)
# create circuit
c = Circuit(name=name)
# get inputs
in_regex = r"input\s(.+?);"
for net_str in re.findall(in_regex, module, re.DOTALL):
nets = net_str.replace(" ", "").replace("\n", "").replace("\t", "").split(",")
for n in nets:
c.add(n, "input")
# handle gates
regex = r"(or|nor|and|nand|not|xor|xnor)\s+\S+\s*\((.+?)\);"
for gate, net_str in re.findall(regex, module, re.DOTALL):
# parse all nets
nets = net_str.replace(" ", "").replace("\n", "").replace("\t", "").split(",")
c.add(nets[0], gate, fanin=nets[1:])
# handle seq
for st in seq_types:
# find matching insts
regex = rf"{st.name}\s+[^\s(]+\s*\((.+?)\);"
for io in re.findall(regex, module, re.DOTALL):
# find matching pins
pins = {}
for typ, name in st.io.items():
regex = rf".{name}\s*\((.+?)\)"
n = re.findall(regex, io, re.DOTALL)[0]
pins[typ] = n
if pins.get("d") == None:
print(pins)
c.add(
pins.get("q", None),
st.seq_type,
fanin=pins.get("d", None),
clk=pins.get("clk", None),
r=pins.get("r", None),
s=pins.get("s", None),
)
# handle assign statements (with help from pyeda)
assign_regex = r"assign\s+(.+?)\s*=\s*(.+?);"
for dest, expr in re.findall(assign_regex, module, re.DOTALL):
c.add(dest,'buf',fanin=parse_ast(boolexpr.parse(expr), c))
# get outputs
out_regex = r"output\s(.+?);"
for net_str in re.findall(out_regex, module, re.DOTALL):
nets = net_str.replace(" ", "").replace("\n", "").replace("\t", "").split(",")
c.set_output(nets)
# assign constant types
for n in c:
if "type" not in c.graph.nodes[n]:
if n == "1'b0":
c.add(n, "0")
elif n == "1'b1":
c.add(n, "1")
else:
raise ValueError(f"node {n} does not have a type")
return c
def parse_ast(ast, g):
if ast[0] == "var":
return ast[1][0]
else:
fanin = [parse_ast(a, g) for a in ast[1:]]
name = f"{ast[0]}_{'_'.join(fanin)}"
g.add(name, ast[0], fanin=fanin)
return name
def to_file(c, path, seq_types=None):
"""
Writes a `Circuit` to a verilog file.
Parameters
----------
c: Circut
the circuit
path: str
the path to the file to read from.
seq_types: list of dicts of str:str
the types of sequential elements in the file.
"""
with open(path, "w") as f:
f.write(circuit_to_verilog(c, seq_types))
def circuit_to_verilog(c, seq_types=None):
"""
Generates a str of verilog code from a `CircuitGraph`.
Parameters
----------
c: Circuit
the circuit to turn into verilog.
seq_types: list of dicts of str:str
the sequential element types.
Returns
-------
str
verilog code.
"""
inputs = []
outputs = []
insts = []
wires = []
defs = set()
if seq_types is None:
seq_types = default_seq_types
for n in c.nodes():
if c.type(n) in ["xor", "xnor", "buf", "not", "nor", "or", "and", "nand"]:
fanin = ",".join(p for p in c.fanin(n))
insts.append(f"{c.type(n)} g_{n} ({n},{fanin})")
wires.append(n)
elif c.type(n) in ["0", "1"]:
insts.append(f"assign {n} = 1'b{c.type(n)}")
elif c.type(n) in ["input"]:
inputs.append(n)
wires.append(n)
elif c.type(n) in ["ff", "lat"]:
wires.append(n)
# get template
for s in seq_types:
if s.seq_type == c.type(n):
seq = s
defs.add(s.code_def)
break
# connect
io = []
if c.d(n):
d = c.d(n)
io.append(f".{seq['io']['d']}({d})")
if c.r(n):
r = c.r(n)
io.append(f".{seq['io']['r']}({r})")
if c.s(n):
s = c.s(n)
io.append(f".{seq['io']['s']}({s})")
if c.clk(n):
clk = c.clk(n)
io.append(f".{seq['io']['clk']}({clk})")
io.append(f".{seq['io']['q']}({n})")
insts.append(f"{s['name']} g_{n} ({','.join(io)})")
else:
print(f"unknown gate type: {c.type(n)}")
return
if c.output(n):
outputs.append(n)
verilog = f"module {c.name} (" + ",".join(inputs + outputs) + ");\n"
verilog += "".join(f"input {inp};\n" for inp in inputs)
verilog += "".join(f"output {out};\n" for out in outputs)
verilog += "".join(f"wire {wire};\n" for wire in wires)
verilog += "".join(f"{inst};\n" for inst in insts)
verilog += "endmodule\n"
verilog += "\n".join(defs)
return verilog
``` |
{
"source": "jpshankar/random_walk",
"score": 2
} |
#### File: random_walk/tests/test_random_walk.py
```python
import pytest, sys
from randomWalk.random_walk import RandomWalk
from typing import Generator
from randomWalk.steps_generating_exception import StepsGeneratingException
from dataclasses import dataclass
class TestRandomWalk:
def setup_class(cls):
cls.step_count = 5
def setup_method(self, method):
name = method.__name__
if name.endswith('pos_without_add'):
@dataclass(repr=False, eq=False, order=False, unsafe_hash=True)
class PosWithoutAdd:
pos: int
step_function = lambda: PosWithoutAdd(1)
ini_pos = PosWithoutAdd(0)
self.random_walk = RandomWalk(pos=ini_pos, step=step_function)
else:
step_probs = {
-1: 1,
1: 1
}
step = RandomWalk.simple_step_function(step_probs)
self.random_walk = RandomWalk(pos=0, step=step)
def test_steps_as_list(self):
rw_list = self.random_walk.steps_list(5)
assert isinstance(rw_list, list)
assert len(rw_list) == 5
def test_steps_as_generator(self):
rw_gen = self.random_walk.steps_generator(5)
assert isinstance(rw_gen, Generator)
def test_single_step(self):
prev_pos = self.random_walk.pos
self.random_walk.single_step()
assert self.random_walk.pos != prev_pos
assert len(self.random_walk.walk_history) == 2
def test_only_one_generator(self):
self.random_walk.steps_generator(5)
with pytest.raises(StepsGeneratingException):
self.random_walk.steps_generator(5)
def test_new_generator_after_old_one(self):
rw_gen = self.random_walk.steps_generator(self.step_count)
for _ in range(self.step_count):
next(rw_gen)
rw_gen = self.random_walk.steps_generator(self.step_count)
next(rw_gen)
def test_list_during_generator(self):
rw_gen = self.random_walk.steps_generator(self.step_count)
next(rw_gen)
rw_list = self.random_walk.steps_list(5)
assert len(self.random_walk.walk_history) == 7
def test_generator_after_list(self):
rw_gen = self.random_walk.steps_generator(self.step_count)
next(rw_gen)
rw_list = self.random_walk.steps_list(5)
next(rw_gen)
assert len(self.random_walk.walk_history) == 8
def test_step_with_pos_without_add(self):
with pytest.raises(TypeError):
self.random_walk.single_step()
def test_list_with_pos_without_add(self):
with pytest.raises(TypeError):
self.random_walk.steps_list(self.step_count)
def test_generator_with_pos_without_add(self):
with pytest.raises(TypeError):
rw_gen = self.random_walk.steps_generator(self.step_count)
next(rw_gen)
``` |
{
"source": "JPShrad/cs3240-labdemo",
"score": 2
} |
#### File: JPShrad/cs3240-labdemo/hello.py
```python
def greeting(msg):
print(msg)
def main():
greeting('hello')
main()
``` |
{
"source": "jpsim/envoy-mobile",
"score": 2
} |
#### File: envoy-mobile/bazel/kotlin_test.bzl
```python
load("@io_bazel_rules_kotlin//kotlin:kotlin.bzl", "kt_jvm_test")
load("//bazel:kotlin_lib.bzl", "native_lib_name")
def _internal_kt_test(name, srcs, deps = [], data = [], jvm_flags = [], repository = ""):
# This is to work around the issue where we have specific implementation functionality which
# we want to avoid consumers to use but we want to unit test
dep_srcs = []
for dep in deps:
# We'll resolve only the targets in `//library/kotlin/io/envoyproxy/envoymobile`
if dep.startswith(repository + "//library/kotlin/io/envoyproxy/envoymobile"):
dep_srcs.append(dep + "_srcs")
elif dep.startswith(repository + "//library/java/io/envoyproxy/envoymobile"):
dep_srcs.append(dep + "_srcs")
kt_jvm_test(
name = name,
test_class = "io.envoyproxy.envoymobile.bazel.EnvoyMobileTestSuite",
srcs = srcs + dep_srcs,
deps = [
repository + "//bazel:envoy_mobile_test_suite",
"@maven//:org_assertj_assertj_core",
"@maven//:junit_junit",
"@maven//:org_mockito_mockito_inline",
"@maven//:org_mockito_mockito_core",
] + deps,
data = data,
jvm_flags = jvm_flags,
)
# A basic macro to make it easier to declare and run kotlin tests which depend on a JNI lib
# This will create the native .so binary (for linux) and a .jnilib (for OS X) look up
def envoy_mobile_jni_kt_test(name, srcs, native_deps = [], deps = [], library_path = "library/common/jni", repository = ""):
lib_name = native_lib_name(native_deps[0])[3:]
_internal_kt_test(
name,
srcs,
deps,
data = native_deps,
jvm_flags = [
"-Djava.library.path={}".format(library_path),
"-Denvoy_jni_library_name={}".format(lib_name),
],
repository = repository,
)
# A basic macro to make it easier to declare and run kotlin tests
#
# Ergonomic improvements include:
# 1. Avoiding the need to declare the test_class which requires a fully qualified class name (example below)
# 2. Avoiding the need to redeclare common unit testing dependencies like JUnit
# 3. Ability to run more than one test file per target
# 4. Ability to test internal envoy mobile entities
# Usage example:
# load("@envoy_mobile//bazel:kotlin_test.bzl", "envoy_mobile_kt_test)
#
# envoy_mobile_kt_test(
# name = "example_kotlin_test",
# srcs = [
# "ExampleTest.kt",
# ],
# )
def envoy_mobile_kt_test(name, srcs, deps = [], repository = ""):
_internal_kt_test(name, srcs, deps, repository = repository)
# A basic macro to run android based (robolectric) tests with native dependencies
def envoy_mobile_android_test(name, srcs, deps = [], native_deps = [], repository = "", library_path = "library/common/jni"):
lib_name = native_lib_name(native_deps[0])[3:]
native.android_library(
name = name + "_test_lib",
custom_package = "io.envoyproxy.envoymobile.test",
manifest = repository + "//bazel:test_manifest.xml",
visibility = ["//visibility:public"],
data = native_deps,
exports = deps,
)
native.android_local_test(
name = name,
srcs = srcs,
data = native_deps,
deps = deps + [
repository + "//bazel:envoy_mobile_test_suite",
"@maven//:androidx_annotation_annotation",
"@maven//:androidx_test_core",
"@maven//:androidx_test_ext_junit",
"@maven//:androidx_test_runner",
"@maven//:androidx_test_monitor",
"@maven//:androidx_test_rules",
"@maven//:org_robolectric_robolectric",
"@robolectric//bazel:android-all",
"@maven//:org_assertj_assertj_core",
"@maven//:junit_junit",
"@maven//:org_mockito_mockito_inline",
"@maven//:org_mockito_mockito_core",
"@maven//:com_squareup_okhttp3_okhttp",
"@maven//:com_squareup_okhttp3_mockwebserver",
"@maven//:com_squareup_okio_okio",
"@maven//:org_hamcrest_hamcrest",
"@maven//:com_google_truth_truth",
],
manifest = repository + "//bazel:test_manifest.xml",
custom_package = "io.envoyproxy.envoymobile.tests",
test_class = "io.envoyproxy.envoymobile.bazel.EnvoyMobileTestSuite",
jvm_flags = [
"-Djava.library.path={}".format(library_path),
"-Denvoy_jni_library_name={}".format(lib_name),
],
)
``` |
{
"source": "jpsiyyadri/gramex",
"score": 2
} |
#### File: gramex/handlers/capturehandler.py
```python
from __future__ import unicode_literals
import re
import os
import six
import json
import time
import shlex
import atexit
import psutil
import requests
import tornado.gen
from orderedattrdict import AttrDict
from threading import Thread, Lock
from subprocess import Popen, PIPE, STDOUT # nosec
from six.moves.urllib.parse import urlencode, urljoin
from tornado.web import HTTPError
from tornado.httpclient import AsyncHTTPClient
from gramex.config import app_log, variables, recursive_encode
from gramex.http import OK, BAD_REQUEST, GATEWAY_TIMEOUT, BAD_GATEWAY, CLIENT_TIMEOUT
from .basehandler import BaseHandler
_PPTX_MIME = 'application/vnd.openxmlformats-officedocument.presentationml.presentation'
# HTTP headers not to forward to chromecapture.js.
# Keep this sync-ed with the same list in chromecapture.js
_IGNORE_HEADERS = {
'host', # The URL will determine the host
'connection', # Let Tornado manage the connection
'upgrade', # .. and the upgrades
'content-length', # The new request will have a different content - length
'content-md5', # ... and different content - md5
}
class Capture(object):
default_port = 9900 # Default port to run CaptureJS at
check_interval = 0.05 # Frequency (seconds) to check if self.started
# Set engine configurations for PhantomJS and Puppeteer
engines = AttrDict(
phantomjs=AttrDict(
cmd='phantomjs --ssl-protocol=any',
script='capture.js',
first_line=b'PhantomJS.*capture\\.js',
name='Capture',
version='1.0'
),
chrome=AttrDict(
cmd='node',
script='chromecapture.js',
first_line=b'node\\.js.*chromecapture\\.js',
name='ChromeCapture',
version='1.1'
),
)
'''
Create a proxy for capture.js. Typical usage::
capture = Capture()
with open('screenshot.png', 'wb') as handle:
handle.write(capture.png('https://gramener.com/'))
with open('screenshot.pdf', 'wb') as handle:
handle.write(capture.pdf('https://gramener.com/'))
The constructor accepts these optional parameters:
:arg int port: port where capture.js is running. Default: 9900
:arg string url: URL:port where PhantomJS is running with capture.js.
Default: ``http://localhost:<port>/``
:arg string cmd: Command to run PhantomJS with capture.js at the specified
port. Default: ``phantomjs $GRAMEXPATH/apps/capture/capture.js --port=<port>``
:arg int timeout: Seconds to wait for PhantomJS to timeout. Default: 10
The constructor runs :meth:`Capture.start` in a new thread, which checks if
capture.js is running at ``url``. If not, it runs ``cmd`` and checks again.
Until capture.js is detected, all capture methods will fail.
'''
def __init__(self, port=None, url=None, engine=None, cmd=None, timeout=10):
# Set default values for port, url and cmd
self.engine = self.engines['phantomjs' if engine is None else engine]
port = self.default_port if port is None else port
if url is None:
url = 'http://localhost:%d/' % port
if cmd is None:
script = os.path.join(variables.GRAMEXPATH, 'apps', 'capture', self.engine.script)
cmd = '%s "%s" --port=%d' % (self.engine.cmd, script, port)
self.url = url
self.first_line_re = re.compile(self.engine.first_line)
self.cmd = cmd
self.timeout = timeout
self.browser = AsyncHTTPClient()
self.lock = Lock()
self.started = False
self.start()
def start(self):
'''
Starts a thread and check if capture is already running at ``url``. If
not, start ``cmd`` and check again. Print logs from ``cmd``.
This method is thread-safe. It may be called as often as required.
:class:`CaptureHandler` calls this method if ``?start`` is passed.
'''
with self.lock:
thread = Thread(target=self._start)
thread.daemon = True
thread.start()
def _start(self):
'''
Check if capture is already running at ``url``. If not, start ``cmd``
and check again. Print logs from ``cmd``.
'''
self.started = False
script = self.engine.script
try:
# Check if capture.js is at the url specified
app_log.info('Pinging %s at %s', script, self.url)
r = requests.get(self.url, timeout=self.timeout)
self._validate_server(r)
self.started = True
except requests.ReadTimeout:
# If capture.js doesn't respond immediately, we haven't started
app_log.error('url: %s timed out', self.url)
except requests.ConnectionError:
# Try starting the process again
app_log.info('Starting %s via %s', script, self.cmd)
self.close()
# self.cmd is taken from the YAML configuration. Safe to run
self.proc = Popen(shlex.split(self.cmd), stdout=PIPE, stderr=STDOUT) # nosec
self.proc.poll()
atexit.register(self.close)
# TODO: what if readline() does not return quickly?
line = self.proc.stdout.readline().strip()
if not self.first_line_re.search(line):
return app_log.error('cmd: %s invalid. Returned "%s"', self.cmd, line)
app_log.info('Pinging %s at %s', script, self.url)
try:
r = requests.get(self.url, timeout=self.timeout)
self._validate_server(r)
pid = self.proc.pid
app_log.info(line.decode('utf-8') + ' live (pid=%s)', pid)
self.started = True
# Keep logging capture.js output until proc is killed by another thread
while hasattr(self, 'proc'):
line = self.proc.stdout.readline().strip()
if len(line) == 0:
app_log.info('%s terminated: pid=%d', script, pid)
self.started = False
break
# Capture won't print anything, unless there's a problem, or if debug is on.
# So log it at warning level not info.
app_log.warning(line.decode('utf-8'))
except Exception:
app_log.exception('Ran %s. But %s not at %s', self.cmd, script, self.url)
except Exception:
app_log.exception('Cannot start Capture')
def close(self):
'''Stop capture.js if it has been started by this object'''
if hasattr(self, 'proc'):
try:
process = psutil.Process(self.proc.pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
except psutil.NoSuchProcess:
app_log.info('%s PID %d already killed', self.engine.script, self.proc.pid)
pass
delattr(self, 'proc')
def _validate_server(self, response):
# Make sure that the response we got is from the right version of capture.js
server = response.headers.get('Server', '')
parts = server.split('/', 2)
script = self.engine.script
if not len(parts) == 2 or parts[0] != self.engine.name or parts[1] < self.engine.version:
raise RuntimeError('Server: %s at %s is not %s' % (server, self.url, script))
@tornado.gen.coroutine
def capture_async(self, headers=None, **kwargs):
'''
Returns a screenshot of the URL. Runs asynchronously in Gramex. Arguments
are same as :py:func:`capture`
'''
# If ?start is provided, start server and wait until timeout
if 'start' in kwargs:
self.start()
end_time = time.time() + self.timeout
while not self.started and time.time() < end_time:
yield tornado.gen.sleep(self.check_interval)
if not self.started:
raise RuntimeError('%s not started. See logs' % self.engine.script)
if six.PY2:
recursive_encode(kwargs)
r = yield self.browser.fetch(
self.url, method='POST', body=urlencode(kwargs, doseq=True), raise_error=False,
connect_timeout=self.timeout, request_timeout=self.timeout, headers=headers)
if r.code == OK:
self._validate_server(r)
raise tornado.gen.Return(r)
def capture(self, url, **kwargs):
'''
Return a screenshot of the URL.
:arg str url: URL to take a screenshot of
:arg str ext: format of output. Can be pdf, png, gif or jpg
:arg str selector: Restrict screenshot to (optional) CSS selector in URL
:arg int delay: milliseconds (or expression) to wait for before taking a screenshot
:arg str format: A3, A4, A5, Legal, Letter or Tabloid. Defaults to A4. For PDF
:arg str layout: A3, A4, A5, Legal, 16x9, 16x10, 4x3. Defaults to 4x3. For PPTX
:arg str orientation: portrait or landscape. Defaults to portrait. For PDF
:arg str header: header for the page. For PDF
:arg str footer: footer for the page. For PDF
:arg int width: screen width. Default: 1200. For PNG/GIF/JPG
:arg int height: screen height. Default: 768. For PNG/GIF/JPG
:arg float scale: zooms the screen by a factor. For PNG/GIF/JPG
:arg int dpi: dots (pixels) per inch. For PPTX
:arg str title: slide title. For PPTX
:arg int debug: sets log level for HTTP requests (2) and responses (1)
:return: a bytestring with the binary contents of the screenshot
:rtype: bytes
:raises RuntimeError: if capture.js is not running or fails
'''
# Ensure that we're connecting to the right version of capture.js
if not self.started:
end_time = time.time() + self.timeout
while not self.started and time.time() < end_time:
time.sleep(self.check_interval)
if not self.started:
raise RuntimeError('%s not started. See logs' % self.engine.script)
kwargs['url'] = url
r = requests.post(self.url, data=kwargs, timeout=self.timeout)
if r.status_code == OK:
self._validate_server(r)
return r.content
else:
raise RuntimeError('%s error: %s' % (self.engine.script, r.content))
def pdf(self, url, **kwargs):
'''An alias for :meth:`Capture.capture` with ``ext='pdf'``.'''
kwargs['ext'] = 'pdf'
return self.capture(url, **kwargs)
def png(self, url, **kwargs):
'''An alias for :meth:`Capture.capture` with ``ext='png'``.'''
kwargs['ext'] = 'png'
return self.capture(url, **kwargs)
def pptx(self, url, **kwargs):
'''An alias for :meth:`Capture.capture` with ``ext='pptx'``.'''
kwargs['ext'] = 'pptx'
return self.capture(url, **kwargs)
def jpg(self, url, **kwargs):
'''An alias for :meth:`Capture.capture` with ``ext='jpg'``.'''
kwargs['ext'] = 'jpg'
return self.capture(url, **kwargs)
def gif(self, url, **kwargs):
'''An alias for :meth:`Capture.capture` with ``ext='gif'``.'''
kwargs['ext'] = 'gif'
return self.capture(url, **kwargs)
class CaptureHandler(BaseHandler):
'''
Renders a web page as a PDF or as an image. It accepts the same arguments as
:class:`Capture`.
The page is called with the same args as :meth:`Capture.capture`. It also
accepts a ``?start`` parameter that restarts capture.js if required.
'''
# Each config maps to a Capture() object. cls.captures[config] = Capture()
captures = {}
@classmethod
def setup(cls, port=None, url=None, engine=None, cmd=None, **kwargs):
super(CaptureHandler, cls).setup(**kwargs)
capture_kwargs = {}
for kwarg in ('timeout', ):
if kwarg in kwargs:
capture_kwargs[kwarg] = kwargs.pop(kwarg)
# Create a new Capture only if the config has changed
config = dict(engine=engine, port=port, url=url, cmd=cmd, **capture_kwargs)
config_str = json.dumps(config, separators=[',', ':'], sort_keys=True)
if config_str not in cls.captures:
cls.captures[config_str] = cls.capture = Capture(**config)
else:
cls.capture = cls.captures[config_str]
# TODO: if the old config is no longer used, close it
cls.ext = {
'pdf': dict(mime='application/pdf'),
'png': dict(mime='image/png'),
'jpg': dict(mime='image/jpeg'),
'jpeg': dict(mime='image/jpeg'),
'gif': dict(mime='image/gif'),
'pptx': dict(mime=_PPTX_MIME),
}
@tornado.gen.coroutine
def get(self):
args = self.argparse(
url={'default': self.request.headers.get('Referer', None)},
ext={'choices': self.ext, 'default': 'pdf'},
file={'default': 'screenshot'},
selector={'nargs': '*'},
cookie={},
delay={},
width={'type': int},
height={'type': int},
x={'type': int},
y={'type': int},
scale={'type': float},
dpi={'type': int, 'nargs': '*'},
format={'choices': ['A3', 'A4', 'A5', 'Legal', 'Letter', 'Tabloid'], 'default': 'A4'},
layout={'choices': ['A3', 'A4', 'Letter', '16x9', '16x10', '4x3'], 'default': '4x3'},
orientation={'choices': ['portrait', 'landscape'], 'default': 'portrait'},
title={'nargs': '*'},
title_size={'type': int, 'nargs': '*'},
start={'nargs': '*'},
debug={'nargs': '*'},
)
if args['url'] is None:
raise HTTPError(BAD_REQUEST, reason='%s: CaptureHandler needs ?url=' % self.name)
# If the URL is a relative URL, treat it relative to the called path
args['url'] = urljoin(self.request.full_url(), args['url'])
# Copy all relevant HTTP headers as-is
args['headers'] = {
key: val for key, val in self.request.headers.items()
if key not in _IGNORE_HEADERS
}
if 'cookie' not in args:
cookie = self.request.headers.get('Cookie', None)
if cookie is not None:
args['cookie'] = cookie
info = self.ext[args.ext]
try:
response = yield self.capture.capture_async(**args)
except RuntimeError as e:
# capture.js could not fetch the response
raise HTTPError(BAD_GATEWAY, reason=e.args[0])
if response.code == OK:
self.set_header('Content-Type', info['mime'])
self.set_header('Content-Disposition',
'attachment; filename="{file}.{ext}"'.format(**args))
self.write(response.body)
elif response.code == CLIENT_TIMEOUT:
self.set_status(GATEWAY_TIMEOUT, reason='Capture is busy')
self.set_header('Content-Type', 'application/json')
self.write({'status': 'fail', 'msg': [
'Capture did not respond within timeout: %ds' % self.capture.timeout]})
else:
self.set_status(response.code, reason='capture.js error')
self.set_header('Content-Type', 'application/json')
self.write(response.body)
``` |
{
"source": "jpsk/Swedbank-to-YNAB4-converter",
"score": 3
} |
#### File: jpsk/Swedbank-to-YNAB4-converter/converter.py
```python
import os, sys, csv, time
err_msg = 'Please provide filename or path'
_input = None
list = [['Date', 'Payee', 'Category', 'Memo', 'Outflow', 'Inflow']]
def get_abs_path(arg):
if arg[0] == '/':
return arg
else:
return os.getcwd() + '/' + arg
try:
_input = get_abs_path(sys.argv[1])
except IndexError:
exit(err_msg)
try:
_input = csv.reader(open(get_abs_path(_input), 'r'))
next(_input)
for row in _input:
date = row[2].split('-')
date = date[0] + '/' + date[1] + '/' + date[2]
payee = ''
memo = row[4]
if row[7] == 'K':
outflow = ''
inflow = row[5]
else:
outflow = row[5]
inflow = ''
row = [date, payee, '', memo, outflow, inflow]
list.append(row)
except FileNotFoundError:
exit('File not found')
with open('exported_' + time.strftime("%y_%m_%d_%H%M") + '.csv', 'w') as f:
writer = csv.writer(f)
writer.writerows(list)
``` |
{
"source": "jpsmedeiros/distribuicao_de_calor",
"score": 3
} |
#### File: distribuicao_de_calor/src/model.py
```python
import numpy as np
class Model:
def __init__(self, nx, ny, dimensionality):
self.shape = (int(dimensionality[0]/nx), int(dimensionality[1]/ny))
self.nx = nx
self.ny = ny
self.initial_distribution = self.get_initial_distribution()
def get_initial_distribution(self):
matrix = np.zeros(self.shape)
#Calculate Vertical Boundaries
for x in [0, len(matrix)-1]:
for y in range(len(matrix[x])):
matrix[x][y] = self.u(x*self.nx, y*self.ny)
#Calculate Horizontal Boundaries
for y in [0, len(matrix[0])-1]:
for x in range(len(matrix)):
matrix[x][y] = self.u(x*self.nx, y*self.ny)
return matrix
def u(self, x , y):
return 2*x + y**2
```
#### File: distribuicao_de_calor/src/solver.py
```python
import numpy as np
import fatoracao_lu as lu
import math
import heat_map
from scipy.sparse import csr_matrix
class Solver:
def __init__(self, model, delta_t):
self.current_distribution = model.initial_distribution
self.nx = model.nx
self.ny = model.ny
self.delta_t = delta_t
self.shape = model.shape
def solve(self):
heat_map.clearFiles("images/tmp/")#Clear folder
max_difference = 1.0
heat_map.draw(self.current_distribution)
system_to_solve = self.get_system()
while max_difference > 0 and math.log(max_difference, 10) > -7:
linearized_distribution = self.get_array_from_distribution(self.current_distribution)
result = np.array(lu.resolve_lu(system_to_solve, linearized_distribution))
max_difference = self.calculate_max_difference(linearized_distribution, result)
self.current_distribution = result.reshape(self.shape[0], self.shape[1])
heat_map.draw(self.current_distribution)
heat_map.generateGif()
def calculate_max_difference(self, initial, final):
return np.max(np.abs(initial-final))
def get_system(self):
system_dimension = self.shape[0] * self.shape[1]
system_to_solve = []
for i in range(system_dimension):
current_row = [0] * system_dimension
if self.is_boundary(i):
current_row[i] = 1
else:
# i,j term
current_row[i] = 2 * self.delta_t*(self.nx**2 + self.ny**2)/(self.nx**2 * self.ny**2) + 1.0
# i-1,j term
current_row[i - self.shape[0]] = -self.delta_t / self.nx**2
# i+1,j term
current_row[i + self.shape[0]] = -self.delta_t / self.nx**2
# i,j-1 term
current_row[i - 1] = -self.delta_t / self.ny**2
# i,j+1 term
current_row[i + 1] = -self.delta_t / self.ny**2
sparse_row = csr_matrix(current_row)
system_to_solve.append(sparse_row)
return system_to_solve
def get_array_from_distribution(self, matrix):
return matrix.reshape((self.shape[0]*self.shape[1]))
def is_boundary(self, i):
x_size = self.shape[0]
y_size = self.shape[1]
#Case i is in first line
if i // x_size == 0:
return True
#Case i in the first column
if i % x_size == 0:
return True
#Case i is in the last column
if (i+1) % x_size == 0:
return True
#Case i is in the last line
if i // x_size == y_size-1:
return True
return False
``` |
{
"source": "jpsteinb/svgpathtools",
"score": 3
} |
#### File: svgpathtools/svgpathtools/parser.py
```python
from __future__ import division, absolute_import, print_function
from .misctools import real_numbers_in, to_decimals
from numbers import Real
import re
# Internal dependencies
from .path import Path, Subpath, Line, QuadraticBezier, CubicBezier, Arc
COMMANDS = set('MmZzLlHhVvCcSsQqTtAa')
UPPERCASE = set('MZLHVCSQTA')
COMMAND_RE = re.compile(r"([MmZzLlHhVvCcSsQqTtAa])")
FLOAT_RE = re.compile(r"[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?")
def _tokenize_path_string(pathdef):
for x in COMMAND_RE.split(pathdef):
if x in COMMANDS:
yield x
for token in FLOAT_RE.findall(x):
yield float(token)
def _unpack_tokens(pathdef):
for token in pathdef:
if isinstance(token, str):
if token not in COMMANDS:
print("token", token)
raise ValueError("unrecognized string token in svgpathtools.parser._unpack_tokens")
yield token
continue
x, y = real_numbers_in(token)
yield x
if y is not None:
yield y
def generate_path(*args, decimals=None):
if len(args) == 0:
raise ValueError("empty args in generate_path")
if len(args) == 1 and isinstance(args[0], list):
tokens = args[0]
else:
tokens = args
def stringifier(thing):
if isinstance(thing, str):
return thing
assert isinstance(thing, Real)
return to_decimals(thing, decimals)
return " ".join(stringifier(c) for c in _unpack_tokens(tokens))
# The following function returns a Subpath when it can, and otherwise, if
# accept_paths is True, a Path. Does not accept an empty token list / spec.
def parse_subpath(*args, accept_paths=False):
# In the SVG specs, initial movetos are absolute, even if
# specified as 'm'. This is the default behavior here as well.
# But if you pass in a current_pos variable, the initial moveto
# will be relative to that current_pos. This is useful.
if len(args) == 0:
raise ValueError("empty args in parse_subpath")
if len(args) == 1 and isinstance(args[0], str):
elements = list(_tokenize_path_string(args[0]))
elif len(args) == 1 and isinstance(args[0], list):
elements = list(_unpack_tokens(args[0]))
else:
elements = list(_unpack_tokens(args))
if not all(isinstance(x, str) or isinstance(x, Real) for x in elements):
print("args:")
print(args)
print("elements:")
print(elements)
assert False
if len(elements) == 0:
# raise ValueError("Empty token list in parse_subpath.")
return Subpath()
if isinstance(elements[0], float):
elements.insert(0, 'M')
# Reverse for easy use of .pop()
elements.reverse()
path = Path()
subpath = Subpath()
subpath_start = None
command = None
current_pos = 0 # if path starts with an 'm'...
def append_to_path(subpath):
if len(path) > 0 and not accept_paths:
raise ValueError("parse_subpath given multi-subpath path")
path.append(subpath)
def pop_float():
nonlocal elements
el = elements.pop()
if isinstance(el, str):
print("el:", el)
print("elements:", elements)
raise ValueError("string found in tokens when float expected")
assert isinstance(el, Real)
return el
while elements:
if elements[-1] in COMMANDS:
# New command.
last_command = command # Used by S and T
command = elements.pop()
absolute = command in UPPERCASE
command = command.upper()
else:
# If this element starts with numbers, it is an implicit command
# and we don't change the command. Check that it's allowed:
if command is None:
raise ValueError("Missing command after 'Z' in parse_subpath.")
if command == 'M':
# Moveto command.
if len(subpath) > 0:
append_to_path(subpath)
subpath = Subpath()
x = pop_float()
y = pop_float()
pos = x + y * 1j
if absolute:
current_pos = pos
else:
current_pos += pos
# when M is called, reset subpath_start
# This behavior of Z is defined in svg spec:
# http://www.w3.org/TR/SVG/paths.html#PathDataClosePathCommand
subpath_start = current_pos
# Implicit moveto commands are treated as lineto commands.
# So we set command to lineto here, in case there are
# further implicit commands after this moveto.
command = 'L'
elif command == 'Z':
# Close path
if len(subpath) > 0:
subpath.set_Z(forceful=True)
assert subpath.Z
append_to_path(subpath)
subpath = Subpath()
assert subpath_start is not None
current_pos = subpath_start
command = None
elif command == 'L':
x = pop_float()
y = pop_float()
pos = x + y * 1j
if not absolute:
pos += current_pos
subpath.append(Line(current_pos, pos))
current_pos = pos
elif command == 'H':
x = pop_float()
pos = x + current_pos.imag * 1j
if not absolute:
pos += current_pos.real
subpath.append(Line(current_pos, pos))
current_pos = pos
elif command == 'V':
y = pop_float()
pos = current_pos.real + y * 1j
if not absolute:
pos += current_pos.imag * 1j
subpath.append(Line(current_pos, pos))
current_pos = pos
elif command == 'C':
control1 = pop_float() + pop_float() * 1j
control2 = pop_float() + pop_float() * 1j
end = pop_float() + pop_float() * 1j
if not absolute:
control1 += current_pos
control2 += current_pos
end += current_pos
subpath.append(CubicBezier(current_pos, control1, control2, end))
current_pos = end
elif command == 'S':
# Smooth curve. First control point is the "reflection" of
# the second control point in the previous path.
if last_command not in 'CS':
# If there is no previous command or if the previous command
# was not an C, c, S or s, assume the first control point is
# coincident with the current point.
control1 = current_pos
else:
# The first control point is assumed to be the reflection of
# the second control point on the previous command relative
# to the current point.
control1 = current_pos + current_pos - subpath[-1].control2
control2 = pop_float() + pop_float() * 1j
end = pop_float() + pop_float() * 1j
if not absolute:
control2 += current_pos
end += current_pos
subpath.append(CubicBezier(current_pos, control1, control2, end))
current_pos = end
elif command == 'Q':
control = pop_float() + pop_float() * 1j
end = pop_float() + pop_float() * 1j
if not absolute:
control += current_pos
end += current_pos
subpath.append(QuadraticBezier(current_pos, control, end))
current_pos = end
elif command == 'T':
# Smooth curve. Control point is the "reflection" of
# the second control point in the previous path.
if last_command not in 'QT':
# If there is no previous command or if the previous command
# was not an Q, q, T or t, assume the first control point is
# coincident with the current point.
control = current_pos
else:
# The control point is assumed to be the reflection of
# the control point on the previous command relative
# to the current point.
control = current_pos + current_pos - subpath[-1].control
end = pop_float() + pop_float() * 1j
if not absolute:
end += current_pos
subpath.append(QuadraticBezier(current_pos, control, end))
current_pos = end
elif command == 'A':
radius = pop_float() + pop_float() * 1j
rotation = pop_float()
arc = pop_float()
sweep = pop_float()
end = pop_float() + pop_float() * 1j
if not absolute:
end += current_pos
subpath.append(Arc(current_pos, radius, rotation, arc, sweep, end))
current_pos = end
if len(subpath) > 0:
append_to_path(subpath)
subpath = Subpath()
if not accept_paths:
assert len(path) <= 1
if len(path) <= 1:
if len(path) > 0:
assert len(subpath) == 0
return path[-1]
assert len(subpath) == 0
return subpath
return path
def parse_path(*args, accept_paths=None):
"""
Parses a path from a single string or from a list of tokens. The 'accept_paths'
option is accepted here for uniformity with 'parse_subpath', but it has no
effect in this function.
All of the following are valid usages, and will parse to the same path:
parse_path("M 0 0 10 10")
parse_path(['M', 0, 0, 10, 10])
parse_path('M', 0, 0, 10, 10)
parse_path("M 0+0j 10+10j")
parse_path(['M', 0+0j, 10+10j])
parse_path('M', 0+0j, 10+10j)
parse_path('M', 0, 0, 10+10j)
(Etc.)
"""
s = parse_subpath(*args, accept_paths=True)
if isinstance(s, Subpath):
s = Path(s)
return s
``` |
{
"source": "jpsthecelt/asgpstest",
"score": 2
} |
#### File: jpsthecelt/asgpstest/as_GPS.py
```python
try:
import uasyncio as asyncio
except ImportError:
import asyncio
try:
from micropython import const
except ImportError:
const = lambda x : x
from math import modf
from collections import deque
import aioserial
# Angle formats
DD = const(1)
DMS = const(2)
DM = const(3)
KML = const(4)
# Speed units
KPH = const(10)
MPH = const(11)
KNOT = const(12)
# Date formats
MDY = const(20)
DMY = const(21)
LONG = const(22)
# Sentence types
RMC = const(1)
GLL = const(2)
VTG = const(4)
GGA = const(8)
GSA = const(16)
GSV = const(32)
# Messages carrying data
POSITION = const(RMC | GLL | GGA)
ALTITUDE = const(GGA)
DATE = const(RMC)
COURSE = const(RMC | VTG)
class AS_GPS(object):
# Can omit time consuming checks: CRC 6ms Bad char and line length 9ms
FULL_CHECK = True
_SENTENCE_LIMIT = 76 # Max sentence length (based on GGA sentence)
_NO_FIX = 1
# Return day of week from date. Pyboard RTC format: 1-7 for Monday through Sunday.
# https://stackoverflow.com/questions/9847213/how-do-i-get-the-day-of-week-given-a-date-in-python?noredirect=1&lq=1
# Adapted for Python 3 and Pyboard RTC format.
@staticmethod
def _week_day(year, month, day, offset = [0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334]):
aux = year - 1700 - (1 if month <= 2 else 0)
# day_of_week for 1700/1/1 = 5, Friday
day_of_week = 5
# partial sum of days betweem current date and 1700/1/1
day_of_week += (aux + (1 if month <= 2 else 0)) * 365
# leap year correction
day_of_week += aux // 4 - aux // 100 + (aux + 100) // 400
# sum monthly and day offsets
day_of_week += offset[month - 1] + (day - 1)
day_of_week %= 7
day_of_week = day_of_week if day_of_week else 7
return day_of_week
# 8-bit xor of characters between "$" and "*". Takes 6ms on Pyboard!
@staticmethod
def _crc_check(res, ascii_crc):
try:
crc = int(ascii_crc, 16)
except ValueError:
return False
x = 1
crc_xor = 0
while res[x] != '*':
crc_xor ^= ord(res[x])
x += 1
return crc_xor == crc
def __init__(self, portName, local_offset=0, fix_cb=lambda *_ : None, cb_mask=RMC, fix_cb_args=()):
try:
# If None testing: update is called with simulated data
if portName == None:
self._sreader = portName
else:
self._sreader = aioserial.AioSerial(port=portName)
[print("Synchronizing...: {}".format((await aioserial.AioSerial(port=results.gps_device_string).readline_async()).decode('utf8', errors='ignore'),
end='', flush=True)) for i in range(5)]
except:
portName = None
continue
self._fix_cb = fix_cb
self.cb_mask = cb_mask
self._fix_cb_args = fix_cb_args
self._lastRxMsg = deque(maxlen=1)
self.battery = False # Assume no backup battery
# CPython compatibility. Import utime or time for fix time handling.
try:
import utime
self._get_time = utime.ticks_ms
self._time_diff = utime.ticks_diff
self._localtime = utime.localtime
self._mktime = utime.mktime
except ImportError:
# Otherwise default to time module for non-embedded implementations
# Should still support millisecond resolution.
import time
self._get_time = time.time
self._time_diff = lambda start, end: 1000 * (start - end)
self._localtime = time.localtime
self._mktime = time.mktime
# Key: currently supported NMEA sentences. Value: parse method.
self.supported_sentences = {'GPRMC': self._gprmc, 'GLRMC': self._gprmc,
'GPGGA': self._gpgga, 'GLGGA': self._gpgga,
'GPVTG': self._gpvtg, 'GLVTG': self._gpvtg,
'GPGSA': self._gpgsa, 'GLGSA': self._gpgsa,
'GPGSV': self._gpgsv, 'GLGSV': self._gpgsv,
'GPGLL': self._gpgll, 'GLGLL': self._gpgll,
'GNGGA': self._gpgga, 'GNRMC': self._gprmc,
'GNVTG': self._gpvtg,
}
#####################
# Object Status Flags
self._fix_time = None
#####################
# Sentence Statistics
self.crc_fails = 0
self.clean_sentences = 0
self.parsed_sentences = 0
self.unsupported_sentences = 0
#####################
# Data From Sentences
# Time. http://www.gpsinformation.org/dale/nmea.htm indicates seconds
# is an integer. However hardware returns a float, but the fractional
# part is always zero. So treat seconds value as an integer. For
# precise timing use PPS signal and as_tGPS library.
self.local_offset = local_offset # hrs
self.epoch_time = 0 # Integer secs since epoch (Y2K under MicroPython)
# Add ms if supplied by device. Only used by timing drivers.
self.msecs = 0
# Position/Motion
self._latitude = [0, 0.0, 'N'] # (°, mins, N/S)
self._longitude = [0, 0.0, 'W'] # (°, mins, E/W)
self._speed = 0.0 # Knot
self.course = 0.0 # ° clockwise from N
self.altitude = 0.0 # Metres
self.geoid_height = 0.0 # Metres
self.magvar = 0.0 # Magnetic variation (°, -ve == west)
# State variables
self._last_sv_sentence = 0 # for GSV parsing
self._total_sv_sentences = 0
self._satellite_data = dict() # for get_satellite_data()
self._update_ms = 1000 # Update rate for timing drivers. Default 1 sec.
# GPS Info
self.satellites_in_view = 0
self.satellites_in_use = 0
self.satellites_used = []
self.hdop = 0.0
self.pdop = 0.0
self.vdop = 0.0
# Received status
self._valid = 0 # Bitfield of received sentences
if sreader is not None: # Running with UART data
logging.info('\nGot device -- Creating run loop')
loop = asyncio.get_event_loop()
loop.create_task(self._run(loop))
##########################################
# Data Stream Handler Functions
##########################################
async def _run(self, loop):
while True:
# res = await self._sreader.readline()
# res = res.decode('utf8')
try:
res = (await self._sreader.readline_async()).decode('utf8', errors='ignore')
except UnicodeError: # Garbage: can happen e.g. on baudrate change
continue
loop.create_task(self._update(res))
await asyncio.sleep(0) # Ensure task runs and res is copied
# Update takes a line of text
async def _update(self, line):
line = line.rstrip() # Copy line
if self.FULL_CHECK: # 9ms on Pyboard
try:
next(c for c in line if ord(c) < 10 or ord(c) > 126)
return # Bad character received
except StopIteration:
pass # All good
await asyncio.sleep(0)
if len(line) > self._SENTENCE_LIMIT or not '*' in line:
return # Too long or malformed
a = line.split(',')
segs = a[:-1] + a[-1].split('*')
await asyncio.sleep(0)
if self.FULL_CHECK: # 6ms on Pyboard
if not self._crc_check(line, segs[-1]):
self.crc_fails += 1 # Update statistics
return
await asyncio.sleep(0)
self.clean_sentences += 1 # Sentence is good but unparsed.
segs[0] = segs[0][1:] # discard $
segs = segs[:-1] # and checksum
if segs[0] in self.supported_sentences:
try:
s_type = self.supported_sentences[segs[0]](segs) # Parse
except ValueError:
s_type = False
await asyncio.sleep(0)
if isinstance(s_type, int) and (s_type & self.cb_mask):
# Successfully parsed, data was valid and mask matches sentence type
self._fix_cb(self, s_type, *self._fix_cb_args) # Run the callback
if s_type: # Successfully parsed
if self.reparse(segs): # Subclass hook
self.parsed_sentences += 1
return segs[0] # For test programs
else:
if self.parse(segs): # Subclass hook
self.parsed_sentences += 1
self.unsupported_sentences += 1
return segs[0] # For test programs
# Optional hooks for subclass
def parse(self, segs): # Parse unsupported sentences
return True
def reparse(self, segs): # Re-parse supported sentences
return True
########################################
# Fix and Time Functions
########################################
# Caller traps ValueError
def _fix(self, gps_segments, idx_lat, idx_long):
# Latitude
l_string = gps_segments[idx_lat]
lat_degs = int(l_string[0:2])
lat_mins = float(l_string[2:])
lat_hemi = gps_segments[idx_lat + 1]
# Longitude
l_string = gps_segments[idx_long]
lon_degs = int(l_string[0:3])
lon_mins = float(l_string[3:])
lon_hemi = gps_segments[idx_long + 1]
if lat_hemi not in 'NS'or lon_hemi not in 'EW':
raise ValueError
self._latitude[0] = lat_degs # In-place to avoid allocation
self._latitude[1] = lat_mins
self._latitude[2] = lat_hemi
self._longitude[0] = lon_degs
self._longitude[1] = lon_mins
self._longitude[2] = lon_hemi
self._fix_time = self._get_time()
def _dtset(self, _): # For subclass
pass
# A local offset may exist so check for date rollover. Local offsets can
# include fractions of an hour but not seconds (AFAIK).
# Caller traps ValueError
def _set_date_time(self, utc_string, date_string):
if not date_string or not utc_string:
raise ValueError
hrs = int(utc_string[0:2]) # h
mins = int(utc_string[2:4]) # mins
# Secs from MTK3339 chip is a float but others may return only 2 chars
# for integer secs. If a float keep epoch as integer seconds and store
# the fractional part as integer ms (ms since midnight fits 32 bits).
fss, fsecs = modf(float(utc_string[4:]))
secs = int(fsecs)
self.msecs = int(fss * 1000)
d = int(date_string[0:2]) # day
m = int(date_string[2:4]) # month
y = int(date_string[4:6]) + 2000 # year
wday = self._week_day(y, m, d)
t = int(self._mktime((y, m, d, hrs, mins, int(secs), wday - 1, 0, 0)))
self.epoch_time = t # This is the fundamental datetime reference.
self._dtset(wday) # Subclass may override
########################################
# Sentence Parsers
########################################
# For all parsers:
# Initially the ._valid bit for the sentence type is cleared.
# On error a ValueError is raised: trapped by the caller.
# On successful parsing the ._valid bit is set.
# The ._valid mechanism enables the data_received coro to determine what
# sentence types have been received.
# Chip sends rubbish RMC messages before first PPS pulse, but these have
# data valid set to 'V' (void)
def _gprmc(self, gps_segments): # Parse RMC sentence
self._valid &= ~RMC
# Check Receiver Data Valid Flag ('A' active)
if not self.battery:
if gps_segments[2] != 'A':
raise ValueError
# UTC Timestamp and date. Can raise ValueError.
self._set_date_time(gps_segments[1], gps_segments[9])
# Check Receiver Data Valid Flag ('A' active)
if gps_segments[2] != 'A':
raise ValueError
# Data from Receiver is Valid/Has Fix. Longitude / Latitude
# Can raise ValueError.
self._fix(gps_segments, 3, 5)
# Speed
spd_knt = float(gps_segments[7])
# Course
course = float(gps_segments[8])
# Add Magnetic Variation if firmware supplies it
if gps_segments[10]:
mv = float(gps_segments[10])
if gps_segments[11] not in ('EW'):
raise ValueError
self.magvar = mv if gps_segments[11] == 'E' else -mv
# Update Object Data
self._speed = spd_knt
self.course = course
self._valid |= RMC
return RMC
def _gpgll(self, gps_segments): # Parse GLL sentence
self._valid &= ~GLL
# Check Receiver Data Valid Flag
if gps_segments[6] != 'A': # Invalid. Don't update data
raise ValueError
# Data from Receiver is Valid/Has Fix. Longitude / Latitude
self._fix(gps_segments, 1, 3)
# Update Last Fix Time
self._valid |= GLL
return GLL
# Chip sends VTG messages with meaningless data before getting a fix.
def _gpvtg(self, gps_segments): # Parse VTG sentence
self._valid &= ~VTG
course = float(gps_segments[1])
spd_knt = float(gps_segments[5])
self._speed = spd_knt
self.course = course
self._valid |= VTG
return VTG
def _gpgga(self, gps_segments): # Parse GGA sentence
self._valid &= ~GGA
# Number of Satellites in Use
satellites_in_use = int(gps_segments[7])
# Horizontal Dilution of Precision
hdop = float(gps_segments[8])
# Get Fix Status
fix_stat = int(gps_segments[6])
# Process Location and Altitude if Fix is GOOD
if fix_stat:
# Longitude / Latitude
self._fix(gps_segments, 2, 4)
# Altitude / Height Above Geoid
altitude = float(gps_segments[9])
geoid_height = float(gps_segments[11])
# Update Object Data
self.altitude = altitude
self.geoid_height = geoid_height
self._valid |= GGA
# Update Object Data
self.satellites_in_use = satellites_in_use
self.hdop = hdop
return GGA
def _gpgsa(self, gps_segments): # Parse GSA sentence
self._valid &= ~GSA
# Fix Type (None,2D or 3D)
fix_type = int(gps_segments[2])
# Read All (up to 12) Available PRN Satellite Numbers
sats_used = []
for sats in range(12):
sat_number_str = gps_segments[3 + sats]
if sat_number_str:
sat_number = int(sat_number_str)
sats_used.append(sat_number)
else:
break
# PDOP,HDOP,VDOP
pdop = float(gps_segments[15])
hdop = float(gps_segments[16])
vdop = float(gps_segments[17])
# If Fix is GOOD, update fix timestamp
if fix_type <= self._NO_FIX: # Deviation from Michael McCoy's logic. Is this right?
raise ValueError
self.satellites_used = sats_used
self.hdop = hdop
self.vdop = vdop
self.pdop = pdop
self._valid |= GSA
return GSA
def _gpgsv(self, gps_segments):
# Parse Satellites in View (GSV) sentence. Updates no. of SV sentences,
# the no. of the last SV sentence parsed, and data on each satellite
# present in the sentence.
self._valid &= ~GSV
num_sv_sentences = int(gps_segments[1])
current_sv_sentence = int(gps_segments[2])
sats_in_view = int(gps_segments[3])
# Create a blank dict to store all the satellite data from this sentence in:
# satellite PRN is key, tuple containing telemetry is value
satellite_dict = dict()
# Calculate Number of Satelites to pull data for and thus how many segment positions to read
if num_sv_sentences == current_sv_sentence:
sat_segment_limit = ((sats_in_view % 4) * 4) + 4 # Last sentence may have 1-4 satellites
else:
sat_segment_limit = 20 # Non-last sentences have 4 satellites and thus read up to position 20
# Try to recover data for up to 4 satellites in sentence
for sats in range(4, sat_segment_limit, 4):
# If a PRN is present, grab satellite data
if gps_segments[sats]:
try:
sat_id = int(gps_segments[sats])
except IndexError:
raise ValueError # Abandon
try: # elevation can be null (no value) when not tracking
elevation = int(gps_segments[sats+1])
except (ValueError,IndexError):
elevation = None
try: # azimuth can be null (no value) when not tracking
azimuth = int(gps_segments[sats+2])
except (ValueError,IndexError):
azimuth = None
try: # SNR can be null (no value) when not tracking
snr = int(gps_segments[sats+3])
except (ValueError,IndexError):
snr = None
# If no PRN is found, then the sentence has no more satellites to read
else:
break
# Add Satellite Data to Sentence Dict
satellite_dict[sat_id] = (elevation, azimuth, snr)
# Update Object Data
self._total_sv_sentences = num_sv_sentences
self._last_sv_sentence = current_sv_sentence
self.satellites_in_view = sats_in_view
# For a new set of sentences, we either clear out the existing sat data or
# update it as additional SV sentences are parsed
if current_sv_sentence == 1:
self._satellite_data = satellite_dict
else:
self._satellite_data.update(satellite_dict)
# Flag that a msg has been received. Does not mean a full set of data is ready.
self._valid |= GSV
return GSV
#########################################
# User Interface Methods
#########################################
# Data Validity. On startup data may be invalid. During an outage it will be absent.
async def data_received(self, position=False, course=False, date=False,
altitude=False):
self._valid = 0 # Assume no messages at start
result = False
while not result:
result = True
await asyncio.sleep(1) # Successfully parsed messages set ._valid bits
if position and not self._valid & POSITION:
result = False
if date and not self._valid & DATE:
result = False
# After a hard reset the chip sends course messages even though no fix
# was received. Ignore this garbage until a fix is received.
if course:
if self._valid & COURSE:
if not self._valid & POSITION:
result = False
else:
result = False
if altitude and not self._valid & ALTITUDE:
result = False
def latitude(self, coord_format=DD):
# Format Latitude Data Correctly
if coord_format == DD:
decimal_degrees = self._latitude[0] + (self._latitude[1] / 60)
return [decimal_degrees, self._latitude[2]]
elif coord_format == DMS:
mins = int(self._latitude[1])
seconds = round((self._latitude[1] - mins) * 60)
return [self._latitude[0], mins, seconds, self._latitude[2]]
elif coord_format == DM:
return self._latitude
raise ValueError('Unknown latitude format.')
def longitude(self, coord_format=DD):
# Format Longitude Data Correctly
if coord_format == DD:
decimal_degrees = self._longitude[0] + (self._longitude[1] / 60)
return [decimal_degrees, self._longitude[2]]
elif coord_format == DMS:
mins = int(self._longitude[1])
seconds = round((self._longitude[1] - mins) * 60)
return [self._longitude[0], mins, seconds, self._longitude[2]]
elif coord_format == DM:
return self._longitude
raise ValueError('Unknown longitude format.')
def speed(self, units=KNOT):
if units == KNOT:
return self._speed
if units == KPH:
return self._speed * 1.852
if units == MPH:
return self._speed * 1.151
raise ValueError('Unknown speed units.')
async def get_satellite_data(self):
self._total_sv_sentences = 0
while self._total_sv_sentences == 0:
await asyncio.sleep(0)
while self._total_sv_sentences > self._last_sv_sentence:
await asyncio.sleep(0)
return self._satellite_data
def time_since_fix(self): # ms since last valid fix
if self._fix_time is None:
return -1 # No fix yet found
return self._time_diff(self._get_time(), self._fix_time)
def compass_direction(self): # Return cardinal point as string.
from as_GPS_utils import compass_direction
return compass_direction(self)
def latitude_string(self, coord_format=DM):
if coord_format == DD:
return '{:3.6f}° {:s}'.format(*self.latitude(DD))
if coord_format == DMS:
return """{:3d}° {:2d}' {:2d}" {:s}""".format(*self.latitude(DMS))
if coord_format == KML:
form_lat = self.latitude(DD)
return '{:4.6f}'.format(form_lat[0] if form_lat[1] == 'N' else -form_lat[0])
return "{:3d}° {:3.4f}' {:s}".format(*self.latitude(coord_format))
def longitude_string(self, coord_format=DM):
if coord_format == DD:
return '{:3.6f}° {:s}'.format(*self.longitude(DD))
if coord_format == DMS:
return """{:3d}° {:2d}' {:2d}" {:s}""".format(*self.longitude(DMS))
if coord_format == KML:
form_long = self.longitude(DD)
return '{:4.6f}'.format(form_long[0] if form_long[1] == 'E' else -form_long[0])
return "{:3d}° {:3.4f}' {:s}".format(*self.longitude(coord_format))
def speed_string(self, unit=KPH):
sform = '{:3.2f} {:s}'
speed = self.speed(unit)
if unit == MPH:
return sform.format(speed, 'mph')
elif unit == KNOT:
return sform.format(speed, 'knots')
return sform.format(speed, 'km/h')
# Return local time (hrs: int, mins: int, secs:float)
@property
def local_time(self):
t = self.epoch_time + int(3600 * self.local_offset)
_, _, _, hrs, mins, secs, *_ = self._localtime(t)
return hrs, mins, secs
@property
def date(self):
t = self.epoch_time + int(3600 * self.local_offset)
y, m, d, *_ = self._localtime(t)
return d, m, y - 2000
@property
def utc(self):
t = self.epoch_time
_, _, _, hrs, mins, secs, *_ = self._localtime(t)
return hrs, mins, secs
def time_string(self, local=True):
hrs, mins, secs = self.local_time if local else self.utc
return '{:02d}:{:02d}:{:02d}'.format(hrs, mins, secs)
def date_string(self, formatting=MDY):
from as_GPS_utils import date_string
return date_string(self, formatting)
``` |
{
"source": "jpsthecelt/asyncTests",
"score": 2
} |
#### File: jpsthecelt/asyncTests/flask-synchronous.py
```python
from flask import (Flask, render_template,)
app = Flask(__name__)
@app.route('/')
def index():
return render_template( 'index.html', message='Hello Flask Synchronous',)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
``` |
{
"source": "jpstroop/issue-reporter",
"score": 2
} |
#### File: github_reporter/data/issue_report.py
```python
from cached_property import cached_property
from collections import OrderedDict
from github import Github
from github_reporter import timestamp
from github_reporter.data.issue import Issue
from github_reporter.serializers.html_report_renderer import HTMLReportRenderer
from github_reporter.serializers.report_json_encoder import ReportJSONEncoder
from itertools import groupby
from json import dumps
from os.path import join
class IssueReport:
def __init__(self, gh_token, gh_org, yesterday_iso, today_iso):
# memoized properties:
self._generic_report_path = None
self._html_path = None
self._json_path = None
# the issues
self.yesterday_iso = yesterday_iso
self.today_iso = today_iso
q = f"org:{gh_org} updated:>={self.yesterday_iso}"
print(f"{timestamp()} - Report started")
paged_issues = [i for i in Github(gh_token).search_issues(query=q)]
self.issues = [Issue(i, self.yesterday_iso) for i in paged_issues]
self.grouped_issues = IssueReport.group_issues(self.issues)
self.grouped_issues["__meta__"] = IssueReport.gather_stats(self.issues)
self.grouped_issues["__meta__"]["today"] = self.today_iso
self.grouped_issues["__meta__"]["yesterday"] = self.yesterday_iso
print(f"{timestamp()} - Report ran successfully")
def as_json(self):
json = dumps(
self.grouped_issues,
cls=ReportJSONEncoder,
ensure_ascii=False,
indent=2,
)
print(f"{timestamp()} - JSON serialized successfully")
return json
def as_html(self):
renderer = HTMLReportRenderer()
template = "issue_report_page.html.mako"
html = renderer.render(template, r=self.grouped_issues)
print(f"{timestamp()} - HTML serialized successfully")
return html
@cached_property
def generic_report_path(self):
# returns a str representing the path where we'll ultimately want this
# in the repo, without /index.html or .json, i.e.
# docs/reports/YYYY/MM/DD
ymd = self.today_iso.split("T")[0].split("-")
self._generic_report_path = join("docs", "reports", *ymd)
return self._generic_report_path
@cached_property
def html_path(self):
self._html_path = join(self.generic_report_path, "index.html")
print(f"{timestamp()} - HTML path: {self._html_path}")
return self._html_path
@cached_property
def json_path(self):
self._json_path = f"{self.generic_report_path}.json"
print(f"{timestamp()} - JSON path: {self._json_path}")
return self._json_path
@staticmethod
def group_issues(issues):
print(f"{timestamp()} - Grouping by repo started")
groups = OrderedDict()
issues.sort(key=lambda i: (i.repository_name, i.updated_at))
for repo, issues in groupby(issues, lambda i: i.repository_name):
groups[repo] = list(issues)
# we need to use the issues multiple times when we serialize
# hence the `list(_grouper)` cast.
return groups
@staticmethod
def gather_stats(issues):
groups = {"issue_count": len(issues)}
issues.sort(key=lambda r: r.action)
for action, issues in groupby(issues, lambda r: r.action):
groups[f"{action}_count"] = len(list(issues))
return groups
```
#### File: issue-reporter/github_reporter/github_committer.py
```python
from github import Github, InputGitTreeElement
from github_reporter import timestamp
from os import sep
from sys import stderr
class GithubCommitter:
def __init__(self, github_token, repo_name):
self.github = Github(github_token)
self.repo = self.github.get_user().get_repo(repo_name)
def commit(self, path_data_pairs, commit_message, branch):
try:
branch_ref = self.repo.get_git_ref(f"heads/{branch}")
branch_sha = branch_ref.object.sha
base_tree = self.repo.get_git_tree(branch_sha)
element_list = self._build_element_list(path_data_pairs)
tree = self.repo.create_git_tree(element_list, base_tree)
parent = self.repo.get_git_commit(branch_sha)
commit = self.repo.create_git_commit(
commit_message, tree, [parent]
)
print(f"{timestamp()} - Commit sha: {commit.sha}")
branch_ref.edit(commit.sha)
except Exception as e:
print(f"{timestamp()} - {e}")
return False
else:
return True
def _build_element_list(self, path_data_pairs):
element_list = []
for path, data in path_data_pairs:
print(f"{timestamp()} - Adding {path} to commit")
element = InputGitTreeElement(path, "100644", "blob", data.read())
element_list.append(element)
return element_list
```
#### File: issue-reporter/github_reporter/github_reporter.py
```python
from cached_property import cached_property
from contextlib import contextmanager
from datetime import datetime, timedelta
from github_reporter import timestamp
from github_reporter.data.issue_report import IssueReport
from github_reporter.github_committer import GithubCommitter
from io import StringIO
from json import dumps
from os import sep
from os.path import join
from pytz import timezone
from requests import get
import requests_cache as rc
class GithubReporter:
def __init__(self, secrets, config, dump_to_stdout=False):
rc.install_cache("ghr", backend="memory", expire_after=300)
self.dump_to_stdout = dump_to_stdout
self.yesterday_iso, self.today_iso = self.get_dates(config["timezone"])
self.secrets = secrets
self.config = config
# memoized vars
self._index_json_path = None
self._issue_report = None
@cached_property
def issue_report(self):
token = self.secrets["GITHUB_TOKEN"]
org = self.secrets["GITHUB_ORGANIZATION"]
self._issue_report = IssueReport(
token, org, self.yesterday_iso, self.today_iso
)
return self._issue_report
def get_dates(self, tz):
today_dt = datetime.now(timezone(tz)).replace(tzinfo=None)
today = today_dt.isoformat(timespec="seconds")
yesterday = (today_dt - timedelta(days=1)).isoformat(
timespec="seconds"
)
print(f"{timestamp()} - Today: {today}")
print(f"{timestamp()} - Yesterday: {yesterday}")
return (yesterday, today)
@contextmanager
def report_strings(self):
json_string = StringIO(self.issue_report.as_json())
html_string = StringIO(self.issue_report.as_html())
index_string = StringIO(self.updated_index())
yield json_string, html_string, index_string
json_string.close()
html_string.close()
index_string.close()
@cached_property
def index_json_path(self):
self._index_json_path = join("docs", "index.json")
print(f"{timestamp()} - Index JSON path: {self._index_json_path}")
return self._index_json_path
def updated_index(self):
index = self.get_current_index()
date = self.today_iso.split("T")[0]
index = list(filter(lambda e: e["date"] != date, index))
html = (
f"{sep.join(self.issue_report.html_path.split(sep)[1:-1])}{sep}"
) # removes docs/ and index.html
entry = {
"date": date,
"meta": self.issue_report.grouped_issues["__meta__"],
"run_start": self.today_iso,
"html": html,
"json": sep.join(
self.issue_report.json_path.split(sep)[1:]
), # removes docs/
}
index.insert(0, entry)
return dumps(index, indent=2, ensure_ascii=False)
def get_current_index(self):
org = self.config["github_org"]
repo = self.config["github_repo_name"]
url = f"https://{org}.github.io/{repo}/index.json"
with rc.disabled():
print(f"{timestamp()} - Getting {url} for updating")
headers = {
"cache-control": "no-store"
} # not sure if this makes a differnce;
index_json = get(url, headers=headers).json()
return index_json
def run_report(self):
with self.report_strings() as (json_str, html_str, index_str):
if self.dump_to_stdout:
print(json_str.read())
commit_success = True
else:
repo = self.config["github_repo_name"]
committer = GithubCommitter(self.secrets["GITHUB_TOKEN"], repo)
date = self.today_iso.split("T")[0]
message = f"[automated commit] reports for {date}"
print(f"{timestamp()} - Committing {message}")
path_data_pairs = (
(self.issue_report.json_path, json_str),
(self.issue_report.html_path, html_str),
(self.index_json_path, index_str),
)
branch = self.config["branch"]
commit_success = committer.commit(
path_data_pairs, message, branch
)
return commit_success
```
#### File: jpstroop/issue-reporter/main.py
```python
from github_reporter import timestamp
from github_reporter.app_setup import load_config, load_secrets
from github_reporter.github_reporter import GithubReporter
from sys import stderr
def main(event={}, context={}):
try:
is_google_run = event.get("is_google_run", True)
dump_to_stdout = event.get("dump_to_stdout", False)
secrets = load_secrets()
config = load_config()
if is_google_run:
print(f"{timestamp()} - Google called me")
if dump_to_stdout:
print(f"{timestamp()} - Local run, will dump JSON to stdout")
gh_reporter = GithubReporter(secrets, config, dump_to_stdout)
commit_success = gh_reporter.run_report()
print(f"{timestamp()} - Commit success: {commit_success}")
return True # Cloud Functions must return something
except Exception as e:
print(f"{timestamp()} - {e}", file=stderr)
return False
if __name__ == "__main__":
main(event={"is_google_run": False, "dump_to_stdout": True})
```
#### File: jpstroop/issue-reporter/regenerate_html.py
```python
from datetime import date, timedelta
from github_reporter.html_report_renderer import HTMLReportRenderer
from json import load
from os import sep, walk
from os.path import abspath, dirname, join
# A helper script for regenerating the HTML from our JSON reports when
# the template has changed.
def html_file_path_from_json_path(json_path):
return json_path.replace(".json", f"{sep}index.html")
def html_dir_path_from_json_path(json_path):
return json_path.replace(".json", "")
def html_from_json(renderer, json_path):
with open(json_path, "r") as f:
report = load(f)
html_path = html_file_path_from_json_path(json_path)
template = "issue_report_page.html.mako"
with open(html_path, "w") as f:
print(renderer.render(template, r=report), file=f)
return 0
def accumulate_json(root_path):
def filt(tup):
return all([i.endswith(".json") for i in tup[2]]) and len(tup[2]) > 0
dir_files = [(dp, fn) for dp, _, fn in filter(filt, walk(root_path))]
return [join(dir, file) for dir, files in dir_files for file in files]
def date_from_json_path(json_path):
y, m, d = map(int, json_path.split(".")[0].split(sep)[-3:])
return date(y, m, d)
def docs_dir():
return join(abspath(dirname(__file__)), "docs", "reports")
def json_is_older_than_n_days(json_path, n):
today = date.today()
file_date = date_from_json_path(json_path)
return (today - file_date).days > n
# "public" functions you might want to call in __main__
def rerender_html():
renderer = HTMLReportRenderer()
[html_from_json(renderer, jp) for jp in accumulate_json(docs_dir())]
# TODO: pick this up when we have more data and actually want it
# TODO: don't forget to update index.json
# TODO: don't forget empty month and year dirs-
# Maybe just run something like this:
# https://www.jacobtomlinson.co.uk/posts/2014/
# python-script-recursively-remove-empty-folders/directories/
def remove_old_reports(days):
json_paths = accumulate_json(docs_dir())
to_rm = []
for jp in json_paths:
if json_is_older_than_n_days(jp, days):
html_path = html_file_path_from_json_path(jp)
html_dir = html_dir_path_from_json_path(jp)
to_rm.append((jp, html_path, html_dir))
print(to_rm)
# TODO: sigh...JSONReport class:
# .date, .html, .html_dir, .rerender_html(), .older_than(days)
# .remove()
if __name__ == "__main__":
rerender_html()
```
#### File: github_reporter/serializers/report_json_encoder_tests.py
```python
from datetime import datetime
from github_reporter.data.event import Event
from github_reporter.serializers.report_json_encoder import ReportJSONEncoder
from json import dumps, loads
from pytest import raises
from unittest.mock import Mock
class ReportJSONEncoderTests():
def test_it_handles_datetimes(self, faker):
date = faker.date_time()
d = { 'date' : date }
encoded = dumps(d, cls=ReportJSONEncoder)
expected = f'{{"date": "{date.isoformat()}"}}'
assert encoded == expected
def test_it_handles_iterables(self):
d = { 'i' : range(5) }
encoded = dumps(d, cls=ReportJSONEncoder)
expected = '{"i": [0, 1, 2, 3, 4]}'
assert encoded == expected
def test_it_handle_events(self, event_props):
gh_event = Mock(**event_props)
event = Event(gh_event)
encoded = dumps(event, cls=ReportJSONEncoder)
decoded = loads(encoded)
assert decoded['created_at'] == event_props['created_at'].isoformat()
assert decoded['type'] == event_props['event']
assert decoded['actor_name'] == event_props['actor.name']
def test_raises_typeerrror(self):
with raises(TypeError):
dumps(Mock(cannot='serialize'), cls=ReportJSONEncoder)
``` |
{
"source": "jpstroop/lcc-tree",
"score": 2
} |
#### File: jpstroop/lcc-tree/build_data.py
```python
from os.path import abspath
from os.path import dirname
from os.path import join
from pymarc import MARCReader
import glob
import json
import multiprocessing
import tqdm
out_dir = join(dirname(abspath(__file__)), 'data')
path = f'{out_dir}/*.mrc'
def process_file(f):
results = {}
try:
with open(f, 'rb') as fh:
reader = MARCReader(fh)
for record in reader:
if '050' in record:
if 'a' in record['050']:
# might want to change this stuff
# take the first set of codes before a "."
lcc = record['050']['a'].split('.')[0]
# take whatever is first if there is a space
lcc = lcc.split(' ')[0]
if lcc not in results:
results[lcc] = 0
results[lcc]+=1
except:
print(f"error parsing {f}")
pass
return results
if __name__ == '__main__':
# error parsing /Users/jstroop/workspace/lcc-tree/data/49932.mrc
work = list(glob.glob(path))
the_pool = multiprocessing.Pool(multiprocessing.cpu_count())
all_results = {}
for one_file_results in tqdm.tqdm(the_pool.imap_unordered(process_file, work), total=len(work)):
for k in one_file_results:
if k not in all_results:
all_results[k] =one_file_results[k]
else:
all_results[k] = all_results[k] + one_file_results[k]
json.dump(all_results, open('lcc_count.json','w'), indent=2)
the_pool.close()
the_pool.join()
```
#### File: jpstroop/lcc-tree/get_data.py
```python
from os import makedirs
from os import remove
from os.path import abspath
from os.path import dirname
from os.path import join
from os.path import sep
from requests import get
from gzip import open as gz_open
from shutil import copyfileobj
ALL_RECORDS_URL = "https://bibdata.princeton.edu/dumps/21750.json"
def set_up():
out_dir = join(dirname(abspath(__file__)), 'data')
makedirs(out_dir, exist_ok=True)
return out_dir
def download(url, file_path):
r = get(url, stream=True)
with open(file_path, 'wb') as fd:
for chunk in r.iter_content(chunk_size=1024):
fd.write(chunk)
mrc_path = file_path.replace('.gz', '.mrc')
with gz_open(file_path, 'rb') as f_in:
with open(mrc_path, 'wb') as f_out:
copyfileobj(f_in, f_out)
remove(file_path)
if __name__ == '__main__':
out_dir = set_up()
all_records_json = get(ALL_RECORDS_URL).json()
for entry in all_records_json['files']['bib_records']:
url = entry['dump_file']
download_path = f"{out_dir}{sep}{url.split('/')[-1]}.gz"
print(url)
download(url, download_path)
``` |
{
"source": "jpstroop/loris-redux",
"score": 2
} |
#### File: loris/compliance/abstract_feature_set.py
```python
from loris.compliance.helpers import ComparableMixin
from loris.compliance.helpers import st
class AbstractFeatureSet(ComparableMixin):
# Override these in implementations
LEVEL_2 = ()
LEVEL_1 = ()
LEVEL_0 = () # Only necessary to overide if value are listed in the profile
ALL = ()
def __init__(self, config):
self._config = config
self._features = None
@property
def features(self):
# This is a list of features the config actually supports, regardless
# of level. See http://iiif.io/api/image/3.1/compliance/
# This should be passed to the various Parameter constructors.
if self._features is None:
self._features = st(k for k, v in self._config.items() if v["enabled"])
return self._features
# This is here to that we can change features dynamically during tests. It
# is not used in production
@features.setter
def features(self, features):
self._features = features
def __int__(self):
if all(f in self.features for f in self.LEVEL_2):
return 2
elif all(f in self.features for f in self.LEVEL_1):
return 1
else:
return 0
```
#### File: loris/compliance/helpers.py
```python
def st(it):
# Make a sorted tuple. Sorting makes testing easier.
return tuple(sorted(it))
class ComparableMixin(object):
# Make it possible to do comparisons with an int w/o casting. Classes must
# implement __int__(self)
def __lt__(self, an_int):
return int(self) < an_int
def __le__(self, an_int):
return int(self) <= an_int
def __eq__(self, an_int):
return int(self) == an_int
def __ne__(self, an_int):
return int(self) != an_int
def __gt__(self, an_int):
return int(self) > an_int
def __ge__(self, an_int):
return int(self) >= an_int
```
#### File: loris/compliance/__init__.py
```python
from loris.compliance.format import FormatCompliance
from loris.compliance.helpers import ComparableMixin
from loris.compliance.helpers import st
from loris.compliance.http import HttpCompliance
from loris.compliance.quality import QualityCompliance
from loris.compliance.region import RegionCompliance
from loris.compliance.rotation import RotationCompliance
from loris.compliance.size import SizeCompliance
from loris.constants import KEYWORD_MAX_AREA
from loris.constants import KEYWORD_MAX_HEIGHT
from loris.constants import KEYWORD_MAX_WIDTH
from loris.constants import QUALITY_COLOR
class Compliance(ComparableMixin):
ALL_LEVEL_1 = st(
HttpCompliance.LEVEL_1
+ QualityCompliance.LEVEL_1
+ RegionCompliance.LEVEL_1
+ RotationCompliance.LEVEL_1
+ SizeCompliance.LEVEL_1
)
ALL_LEVEL_2 = st(
FormatCompliance.LEVEL_2
+ HttpCompliance.LEVEL_2
+ QualityCompliance.LEVEL_2
+ RegionCompliance.LEVEL_2
+ RotationCompliance.LEVEL_2
+ SizeCompliance.LEVEL_2
)
def __init__(self, config):
self.format = FormatCompliance(config["formats"])
self.http = HttpCompliance(config["http"])
self.quality = QualityCompliance(config["quality"])
self.region = RegionCompliance(config["region"])
self.rotation = RotationCompliance(config["rotation"])
self.size = SizeCompliance(config["size"])
self._extra_features = None
self._int = None
self._uri = None
# make it possible to do int(self), and do comparisons
def __int__(self):
if self._int is None:
ints = map(
int, (self.format, self.http, self.quality, self.region, self.rotation, self.size)
)
self._int = min(ints)
return self._int
def __str__(self):
return f"level{int(self)}"
@property
def uri(self):
if self._uri is None:
self._uri = f"http://iiif.io/api/image/3/level{int(self)}.json"
return self._uri
@property
def all_enabled_features(self):
# Note that formats and qualities aren't 'features' and are always
# listed explicitly in the profile (other that jpg and default)
return st(
self.http.features + self.region.features + self.rotation.features + self.size.features
)
def extra_qualities(self, include_color=True):
qualities = self.quality.features
if not include_color:
qualities = tuple(filter(lambda q: q != QUALITY_COLOR, qualities))
return qualities
@property
def extra_formats(self):
return self.format.features
@property
def extra_features(self):
# Features supported above the calculated compliance level, i.e. the
# difference between all enabled features and the calculated compliance
# level. For listing in profile[1]['supports'].
if self._extra_features is None:
level_features = set(()) # 0
if int(self) == 2:
level_features = set(Compliance.ALL_LEVEL_2)
elif int(self) == 1:
level_features = set(Compliance.ALL_LEVEL_1)
self._extra_features = set(self.all_enabled_features) - level_features
return st(self._extra_features)
```
#### File: loris/compliance/region.py
```python
from loris.compliance.abstract_feature_set import AbstractFeatureSet
from loris.compliance.helpers import st
from loris.constants import FEATURE_REGION_BY_PCT
from loris.constants import FEATURE_REGION_BY_PIXEL
from loris.constants import FEATURE_REGION_SQUARE
class RegionCompliance(AbstractFeatureSet):
LEVEL_1 = st((FEATURE_REGION_BY_PIXEL, FEATURE_REGION_SQUARE))
LEVEL_2 = st(LEVEL_1 + (FEATURE_REGION_BY_PCT,))
ALL = LEVEL_2
def __init__(self, config):
super().__init__(config)
```
#### File: loris/handlers/handler_helpers_mixin.py
```python
from loris.constants import FEATURE_PROFILE_LINK_HEADER
from loris.requests.iiif_request import IIIFRequest
import cherrypy
class HandlerHelpersMixin(object):
@property
def _profile_header_enabled(self):
return FEATURE_PROFILE_LINK_HEADER in IIIFRequest.compliance.http.features
@property
def _profile_header(self):
return f'<{IIIFRequest.compliance.uri}>;rel="profile"'
def _etag_match(self, request):
return cherrypy.request.headers.get("if-none-match") == request.etag
def _error_response(self, loris_exception):
cherrypy.response.headers["Content-Type"] = "application/json"
cherrypy.response.status = loris_exception.http_status_code
return str(loris_exception).encode("utf8")
```
#### File: loris/handlers/image_handler.py
```python
from logging import getLogger
from loris.constants import MEDIA_TYPE_MAPPING
from loris.exceptions import LorisException
from loris.handlers.cors_mixin import CORSMixin
from loris.handlers.handler_helpers_mixin import HandlerHelpersMixin
from loris.requests.image_request import ImageRequest
import cherrypy
logger = getLogger("loris")
class ImageHandler(HandlerHelpersMixin, CORSMixin):
exposed = True
def GET(self, identifier, iiif_params):
cherrypy.response.headers["Allow"] = "GET"
self._set_acao()
del cherrypy.response.headers["Content-Type"]
try:
return self._conditional_response(identifier, iiif_params)
except LorisException as le:
return self._error_response(le)
def _conditional_response(self, identifier, iiif_params):
image_request = ImageRequest(identifier, iiif_params)
if self._etag_match(image_request):
cherrypy.response.status = 304
return None
elif iiif_params != image_request.canonical:
canoncial_uri = f"/{identifier}/{image_request.canonical}"
cherrypy.response.headers["Location"] = canoncial_uri
cherrypy.response.status = 303
return None
else:
transcoder = ImageRequest.transcoders[image_request.file_format]
stream = transcoder.execute(image_request)
media_type = MEDIA_TYPE_MAPPING[image_request.format]
if self._profile_header_enabled:
cherrypy.response.headers["Link"] = self._profile_header
cherrypy.response.headers["content-type"] = media_type
cherrypy.response.headers["etag"] = image_request.etag
return stream.getvalue()
```
#### File: loris/helpers/import_class.py
```python
from importlib import import_module
def import_class(qname):
# Imports a class and returns it (the class, not an instance).
module_name = ".".join(qname.split(".")[:-1])
class_name = qname.split(".")[-1]
module = import_module(module_name)
return getattr(module, class_name)
```
#### File: loris/info/pillow_extractor.py
```python
from loris.constants import COLOR_QUALITIES
from loris.constants import QUALITY_BITONAL_QUALITIES
from loris.constants import QUALITY_GROUP_GRAY
from loris.info.abstract_extractor import AbstractExtractor
from loris.info.structs.info import Info
from loris.info.structs.size import Size
from loris.info.structs.tile import Tile
from math import ceil
from PIL import Image
MODES_TO_QUALITIES = {
"1": QUALITY_BITONAL_QUALITIES,
"L": QUALITY_GROUP_GRAY,
"LA": QUALITY_GROUP_GRAY,
"P": QUALITY_GROUP_GRAY,
"RGB": COLOR_QUALITIES,
"RGBA": COLOR_QUALITIES,
"CMYK": COLOR_QUALITIES,
"YCbCr": COLOR_QUALITIES,
"I": COLOR_QUALITIES,
"F": COLOR_QUALITIES,
}
COLOR_MODES = ("RGB", "RGBA", "CMYK", "YCbCr", "I", "F")
class PillowExtractor(AbstractExtractor):
def __init__(self, compliance, app_configs):
super().__init__(compliance, app_configs)
sf = app_configs["sizes_and_tiles"]["other_formats"]
self.include_sizes_and_tiles = sf["enabled"]
if self.include_sizes_and_tiles:
self.tile_w = sf["tile_width"]
self.tile_h = sf["tile_height"]
self.include_all_factors = sf["all_scale_factors"]
self.min_dimension = sf["min_dimension"]
def extract(self, path, http_identifier):
info = self.init_info(http_identifier)
pillow_image = Image.open(path)
w, h = pillow_image.size
info.width, info.height = (w, h)
info.extra_qualities = self._make_qualities(pillow_image)
max_size = self.max_size(w, h)
if self.include_sizes_and_tiles:
scale_factors = self._scale_factors(w, h)
info.tiles = self._calc_tiles(w, h, scale_factors)
tile_size = info.tiles[0]
info.sizes = self._calc_sizes(w, h, max_size, tile_size, scale_factors)
else:
info.sizes = [max_size]
return info
def _make_qualities(self, pillow_image):
is_color = PillowExtractor.is_color(pillow_image)
return self.compliance.extra_qualities(is_color)
def _scale_factors(self, image_w, image_h):
short_image_dimenson = min(image_w, image_h)
scales = []
nxt = 1
while ceil(short_image_dimenson / nxt) >= self.min_dimension:
scales.append(nxt)
nxt = scales[-1] * 2
return scales
def _calc_tiles(self, image_w, image_h, scale_factors):
image_long = max(image_w, image_h)
self.tile_long = max(self.tile_w, self.tile_h)
scales = filter(lambda s: (image_long / s) > self.tile_long, scale_factors)
return [Tile(self.tile_w, tuple(scales), self.tile_h)]
def _calc_sizes(self, image_w, image_h, max_size, tile_size, scale_factors):
# Note: We make heavy use of the fact that Size and Tile structs are
# comparable here. See loris.info.structs.size.Size, etc. for details.
# It's cool.
sizes = [max_size]
for s in scale_factors:
this_size = Size(ceil(image_w / s), ceil(image_h / s))
less_than_max = this_size < max_size
less_than_tile = this_size < tile_size
if self.include_all_factors and less_than_max:
sizes.append(this_size)
elif not self.include_all_factors and less_than_tile:
sizes.append(this_size)
return sizes
@staticmethod
def is_color(pillow_image):
return pillow_image.mode in COLOR_MODES
```
#### File: info/structs/info.py
```python
from collections import OrderedDict
from dataclasses import dataclass
from json import dumps
from loris.compliance import Compliance
from loris.constants import KEYWORD_CONTEXT
from loris.constants import KEYWORD_EXTRA_FEATURES
from loris.constants import KEYWORD_EXTRA_FORMATS
from loris.constants import KEYWORD_EXTRA_QUALITIES
from loris.constants import KEYWORD_HEIGHT
from loris.constants import KEYWORD_ID
from loris.constants import KEYWORD_IMAGE_SERVICE_3
from loris.constants import KEYWORD_MAX_AREA
from loris.constants import KEYWORD_MAX_HEIGHT
from loris.constants import KEYWORD_MAX_WIDTH
from loris.constants import KEYWORD_PROFILE
from loris.constants import KEYWORD_PROTOCOL
from loris.constants import KEYWORD_SIZES
from loris.constants import KEYWORD_TILES
from loris.constants import KEYWORD_TYPE
from loris.constants import KEYWORD_WIDTH
from loris.constants import URI_CONTEXT
from loris.constants import URI_PROTOCOL
from loris.info.structs.size import Size
from loris.info.structs.tile import Tile
from operator import methodcaller
from typing import List
@dataclass
class Info:
compliance: Compliance
http_identifier: str
width: int = None
height: int = None
_long_dim: int = None
_short_dim: int = None
_all_scales: List[int] = None
tiles: List[Tile] = None
sizes: List[Size] = None
max_area: int = None
max_width: int = None
max_height: int = None
extra_formats: List[str] = None
extra_qualities: List[str] = None
extra_features: List[str] = None
def __str__(self):
return dumps(self.to_dict())
def __repr__(self):
return repr(self.to_dict())
@property
def long_dim(self):
if not self._long_dim:
self._long_dim = max(self.width, self.height)
return self._long_dim
@property
def short_dim(self):
if not self._short_dim:
self._short_dim = min(self.width, self.height)
return self._short_dim
@property
def all_scales(self):
# When dealing with Jp2s, scaleFactors are the same as the baked-in
# resolutions. These are easier to deal with than the sizes list when
# making derivatives
if not self._all_scales:
self._all_scales = [s for t in self.tiles for s in t.scale_factors]
return self._all_scales
@staticmethod
def _cleandict(d):
"""
Remove None values from the dict to avoid nulls in serialization.
"""
if not isinstance(d, OrderedDict):
return d
return {k: Info._cleandict(v) for (k, v) in d.items() if v is not None}
@staticmethod
def _sizes_to_list(sizes):
return list(map(methodcaller("to_dict"), sorted(sizes)))
def to_dict(self):
d = OrderedDict()
d[KEYWORD_CONTEXT] = URI_CONTEXT
d[KEYWORD_ID] = self.http_identifier
d[KEYWORD_TYPE] = KEYWORD_IMAGE_SERVICE_3
d[KEYWORD_PROTOCOL] = URI_PROTOCOL
d[KEYWORD_PROFILE] = str(self.compliance)
d[KEYWORD_WIDTH] = self.width
d[KEYWORD_HEIGHT] = self.height
if self.tiles:
d[KEYWORD_TILES] = Info._sizes_to_list(self.tiles)
if self.sizes:
d[KEYWORD_SIZES] = Info._sizes_to_list(self.sizes)
d[KEYWORD_MAX_AREA] = self.max_area
d[KEYWORD_MAX_WIDTH] = self.max_width
d[KEYWORD_MAX_HEIGHT] = self.max_height
d[KEYWORD_EXTRA_FORMATS] = self.extra_formats
d[KEYWORD_EXTRA_QUALITIES] = self.extra_qualities
d[KEYWORD_EXTRA_FEATURES] = self.extra_features
return Info._cleandict(d)
```
#### File: info/structs/size.py
```python
from collections import OrderedDict
from dataclasses import dataclass
from loris.constants import KEYWORD_HEIGHT
from loris.constants import KEYWORD_WIDTH
@dataclass
class Size:
__slots__ = "width", "height"
width: int
height: int
def __lt__(self, other):
return self.width < other.width and self.height < other.height
def __le__(self, other):
return self.width <= other.width and self.height <= other.height
def __eq__(self, other):
return self.width == other.width and self.height == other.height
def __ge__(self, other):
return self.width >= other.width and self.height >= other.height
def __gt__(self, other):
return self.width > other.width and self.height > other.height
def __ne__(self, other):
return (self.width != other.width) or (self.height != other.height)
def __repr__(self):
return f"Size({self.width},{self.height})"
def to_dict(self):
return OrderedDict(((KEYWORD_WIDTH, self.width), (KEYWORD_HEIGHT, self.height)))
```
#### File: loris/parameters/api.py
```python
from abc import ABCMeta
from abc import abstractmethod
class AbstractParameter(metaclass=ABCMeta):
@abstractmethod
def __init__(self, uri_slice, enabled_features):
self.uri_slice = uri_slice
self.enabled_features = enabled_features
self._canonical = None
return
@property
@abstractmethod
def canonical(self): # pragma: no cover
return
def __str__(self): # pragma: no cover
return self.canonical
```
#### File: loris/parameters/rotation.py
```python
from loris.constants import FEATURE_ROTATION_ARBITRARY
from loris.constants import FEATURE_ROTATION_BY_90S
from loris.constants import FEATURE_ROTATION_MIRRORING
from loris.exceptions import FeatureNotEnabledException
from loris.exceptions import RequestException
from loris.exceptions import SyntaxException
from loris.parameters.api import AbstractParameter
from re import compile
REGEX = compile(r"^!?\d+(?:\.\d+)?$")
class RotationParameter(AbstractParameter):
def __init__(self, uri_slice, enabled_features):
super().__init__(uri_slice, enabled_features)
if not REGEX.match(uri_slice):
msg = f"Could not parse region request ({uri_slice})"
raise SyntaxException(msg)
self.mirror = self.uri_slice[0] == "!"
self._rotation = None
self._run_checks()
@property
def rotation(self):
# raises SyntaxException
if self._rotation is None:
s = self.uri_slice[1:] if self.mirror else self.uri_slice
self._rotation = float(s)
return self._rotation
@property
def canonical(self):
if self._canonical is None:
if self.mirror:
self._canonical = f"!{self.rotation:g}"
else:
self._canonical = f"{self.rotation:g}"
return self._canonical
def _run_checks(self):
self._check_range()
self._check_mirroring()
self._check_rotation()
def _check_range(self):
if not 0.0 <= self.rotation <= 360.0:
msg = f"Rotation must be between 0 and 360 ({self.rotation})"
raise RequestException(msg)
def _check_mirroring(self):
if self.mirror and FEATURE_ROTATION_MIRRORING not in self.enabled_features:
raise FeatureNotEnabledException(FEATURE_ROTATION_MIRRORING)
def _check_rotation(self):
if self.rotation == 0.0:
return
if self.rotation % 90 == 0.0 and FEATURE_ROTATION_BY_90S not in self.enabled_features:
raise FeatureNotEnabledException(FEATURE_ROTATION_BY_90S)
if self.rotation % 90 != 0.0 and FEATURE_ROTATION_ARBITRARY not in self.enabled_features:
raise FeatureNotEnabledException(FEATURE_ROTATION_ARBITRARY)
```
#### File: loris/parameters/size.py
```python
from decimal import Decimal
from loris.constants import DECIMAL_ONE_HUNDRED
from loris.constants import FEATURE_SIZE_BY_CONFINED_WH
from loris.constants import FEATURE_SIZE_BY_H
from loris.constants import FEATURE_SIZE_BY_PCT
from loris.constants import FEATURE_SIZE_BY_W
from loris.constants import FEATURE_SIZE_BY_WH
from loris.constants import FEATURE_SIZE_UPSCALING
from loris.constants import KEYWORD_MAX
from loris.constants import KEYWORD_MAX_AREA
from loris.constants import KEYWORD_MAX_HEIGHT
from loris.constants import KEYWORD_MAX_WIDTH
from loris.exceptions import FeatureNotEnabledException
from loris.exceptions import RequestException
from loris.exceptions import SyntaxException
from loris.info.structs.size import Size
from loris.parameters.api import AbstractParameter
from math import floor
from re import compile
from re import match
# Note that these regexes do not account for a leading '^'; we just check for
# that in self._deduce_request_type()
W_REGEX = compile(r"^\d+,$")
H_REGEX = compile(r"^,\d+$")
WH_REGEX = compile(r"^\d+,\d+$")
CONFINED_REGEX = compile(r"^!\d+,\d+$")
class SizeParameter(AbstractParameter):
def __init__(self, uri_slice, enabled_features, info, region_param):
super().__init__(uri_slice, enabled_features)
self.info = info
# delegations:
self.region_w = region_param.pixel_w
self.region_h = region_param.pixel_h
# calculations:
self.image_max_width, self.image_max_height = self._calc_image_max_wh()
self.width = None
self.height = None
self.upscaling_requested = False
# memoized properties:
self._request_type = None
# raises SyntaxException, RequestException
self._initialize_properties()
# raises FeatureNotEnabledException, RequestException
self._run_checks()
@property
def request_type(self):
# raises SyntaxException
# raises FeatureNotEnabledException
if self._request_type is None:
self._request_type = self._deduce_request_type()
return self._request_type
@property
def canonical(self):
if self._canonical is None:
if self.request_type is KEYWORD_MAX:
self._canonical = KEYWORD_MAX
else:
self._canonical = f"{self.width},{self.height}"
return self._canonical
def _initialize_properties(self):
# raises SyntaxException, RequestException
if self.request_type is KEYWORD_MAX:
self._init_max_request()
return
if self.request_type is FEATURE_SIZE_BY_W:
self._init_by_w_request()
return
if self.request_type is FEATURE_SIZE_BY_H:
self._init_by_h_request()
return
if self.request_type is FEATURE_SIZE_BY_PCT:
self._init_by_pct_request()
return
if self.request_type is FEATURE_SIZE_BY_CONFINED_WH:
self._init_by_confined_wh_request()
return
if self.request_type is FEATURE_SIZE_BY_WH:
self._init_wh_request()
return
def _run_checks(self):
# raises RequestException
self._check_size_upscaling()
# raises FeatureNotEnabledException
self._check_if_supported()
# raises RequestException
self._check_if_larger_than_max()
self._adjust_if_actually_max()
def _deduce_request_type(self):
slice = self.uri_slice
if slice[0:1] == "^":
self.upscaling_requested = True
slice = slice[1:]
if slice == KEYWORD_MAX:
return KEYWORD_MAX
if match(W_REGEX, slice):
return FEATURE_SIZE_BY_W
if match(H_REGEX, slice):
return FEATURE_SIZE_BY_H
if match(WH_REGEX, slice):
return FEATURE_SIZE_BY_WH
if match(CONFINED_REGEX, slice):
return FEATURE_SIZE_BY_CONFINED_WH
if slice.split(":")[0] == "pct":
return FEATURE_SIZE_BY_PCT
msg = f'Size syntax "{self.uri_slice}" is not valid.'
raise SyntaxException(msg)
def _adjust_if_actually_max(self):
if self.image_max_width == self.width and self.image_max_height == self.height:
self._request_type = KEYWORD_MAX
if self.region_w == self.width and self.region_h == self.height:
self._request_type = KEYWORD_MAX
def _calc_image_max_wh(self):
# remember, region may be the whole image. it doesn't really matter
max_w = self.region_w
max_h = self.region_h
if self.info.max_area:
scale = (self.info.max_area / (max_w * max_h)) ** 0.5
max_w = floor(self.region_w * scale)
max_h = floor(self.region_h * scale)
if self.info.max_width and max_w > self.info.max_width:
scale = self.info.max_width / self.region_w
max_w = floor(self.region_w * scale)
max_h = floor(self.region_h * scale)
if self.info.max_height:
scale = self.info.max_height / self.region_h
max_w = floor(self.region_w * scale)
max_h = floor(self.region_h * scale)
return (max_w, max_h)
def _init_max_request(self):
if self.region_w < self.image_max_width:
self.width = self.region_w
else:
self.width = self.image_max_width
if self.region_h < self.image_max_height:
self.height = self.region_h
else:
self.height = self.image_max_height
def _init_by_w_request(self):
slice = self._strip_caret_if_upsample()
self.width = int(slice[:-1])
scale = self.width / self.region_w
self.height = round(self.region_h * scale)
def _init_by_h_request(self):
slice = self._strip_caret_if_upsample()
self.height = int(slice[1:])
scale = self.height / self.region_h
self.width = round(self.region_w * scale)
def _init_by_pct_request(self):
slice = self._strip_caret_if_upsample()
try:
scale = SizeParameter._pct_to_decimal(slice.split(":")[1])
except ValueError as ve:
raise SyntaxException(str(ve))
if scale <= 0:
msg = f"Size percentage must be greater than 0 ({self.uri_slice})."
raise RequestException(msg)
w_decimal = self.region_w * scale
h_decimal = self.region_h * scale
# handle teeny, tiny requests.
self.width = 1 if 0 < w_decimal < 1 else int(w_decimal)
self.height = 1 if 0 < h_decimal < 1 else int(h_decimal)
def _init_by_confined_wh_request(self):
slice = self._strip_caret_if_upsample()
request_w, request_h = map(int, slice[1:].split(","))
# TODO: below may need more precision than we get from floats w/
# large images.
scale = min(request_w / self.region_w, request_h / self.region_h)
self.width = int(self.region_w * scale)
self.height = int(self.region_h * scale)
def _init_wh_request(self):
slice = self._strip_caret_if_upsample()
self.width, self.height = map(int, slice.split(","))
def _strip_caret_if_upsample(self):
s = self.uri_slice[1:] if self.upscaling_requested else self.uri_slice
return s
@staticmethod
def _pct_to_decimal(n):
return Decimal(float(n)) / DECIMAL_ONE_HUNDRED
def _check_size_upscaling(self):
upscaling_configured = FEATURE_SIZE_UPSCALING in self.enabled_features
larger = self.width > self.region_w or self.height > self.region_h
if self.upscaling_requested and not upscaling_configured:
raise FeatureNotEnabledException(FEATURE_SIZE_UPSCALING)
if larger and not self.upscaling_requested:
msg = (
f"Image would be upsampled (region is {self.region_w}×"
f"{self.region_h}, image is {self.width}×{self.height}), but "
"upsampling synax ('^') was not used."
)
raise RequestException(msg)
def _check_if_supported(self):
# raises FeatureNotEnabledException
if self.request_type is KEYWORD_MAX:
return
try:
if self.request_type not in self.enabled_features:
raise FeatureNotEnabledException(self.request_type)
except FeatureNotEnabledException as fe:
if fe.feature is FEATURE_SIZE_BY_W and self._allowed_level0_size_request():
return
else:
raise
def _check_if_larger_than_max(self):
area = self.width * self.height
if self.info.max_area and area > self.info.max_area:
msg = (
f"Request area ({area}) is greater "
f"than max area allowed ({self.info.max_area})"
)
raise RequestException(msg)
if self.info.max_width and self.width > self.info.max_width:
msg = (
f"Request width ({self.width}) is greater than"
f"max width allowed ({self.info.max_width})"
)
raise RequestException(msg)
if self.info.max_height and self.height > self.info.max_height:
msg = (
f"Request height ({self.height}) is greater than"
f"max height allowed ({self.info.max_height})"
)
raise RequestException(msg)
def _allowed_level0_size_request(self):
if self.info.tiles:
tile_width = self.info.tiles[0].width
tile_height = self.info.tiles[0].height
right_col_width = self.info.width % tile_width
bottom_row_height = self.info.height % tile_height
tile_requirements = (
self.width in (tile_width, right_col_width),
self.height in (tile_height, bottom_row_height),
)
size = Size(self.width, self.height)
return all(tile_requirements) or size in self.info.sizes
else:
return False
```
#### File: loris/requests/meta_request.py
```python
class MetaRequest(type):
_compliance = None
_info_cache = None
_extractors = None
_app_configs = None
_transcoders = None
_resolvers = None
def _get_compliance(self):
return self._compliance
def _set_compliance(self, compliance):
self._compliance = compliance
compliance = property(_get_compliance, _set_compliance)
def _get_info_cache(self):
return self._info_cache
def _set_info_cache(self, info_cache):
self._info_cache = info_cache
info_cache = property(_get_info_cache, _set_info_cache)
def _get_extractors(self):
return self._extractors
def _set_extractors(self, extractors):
self._extractors = extractors
extractors = property(_get_extractors, _set_extractors)
def _get_app_configs(self):
return self._app_configs
def _set_app_configs(self, app_configs):
self._app_configs = app_configs
app_configs = property(_get_app_configs, _set_app_configs)
def _get_transcoders(self):
return self._transcoders
def _set_transcoders(self, transcoders):
self._transcoders = transcoders
transcoders = property(_get_transcoders, _set_transcoders)
def _get_resolvers(self):
return self._resolvers
def _set_resolvers(self, resolvers):
self._resolvers = resolvers
resolvers = property(_get_resolvers, _set_resolvers)
```
#### File: loris/transcoders/openjpeg_jp2_transcoder.py
```python
from logging import getLogger
from loris.constants import KEYWORD_FULL
from loris.transcoders.abstract_jp2_transcoder import AbstractJp2Transcoder
from loris.transcoders.api import AbstractTranscoder
from os.path import abspath
from os.path import dirname
from os.path import join
from platform import processor
from platform import system
OPJ_BIN = "opj_decompress"
logger = getLogger("loris")
class OpenJpegJp2Transcoder(AbstractJp2Transcoder, AbstractTranscoder):
def __init__(self, config):
AbstractTranscoder.__init__(self, config)
AbstractJp2Transcoder.__init__(self, config)
self.lib_dir, self.bin = OpenJpegJp2Transcoder._find_openjpeg()
self.env = {"LD_LIBRARY_PATH": self.lib_dir, "PATH": self.bin}
def _build_command(self, image_request, fifo_path):
i_param = f"-i {image_request.file_path}"
o_param = f"-o {fifo_path}"
d_param = OpenJpegJp2Transcoder.decode_area_from_image_request(image_request)
r_param = OpenJpegJp2Transcoder.reduce_from_image_request(image_request)
return f"{self.bin} {i_param} {o_param} {d_param} {r_param}"
@staticmethod
def decode_area_from_image_request(image_request):
# analogous to kdu_expand -{region} but works w/ pixels
if image_request.region_request_type is KEYWORD_FULL:
return ""
else:
x = image_request.region_pixel_x
y = image_request.region_pixel_y
w = image_request.region_pixel_x + image_request.region_pixel_w
h = image_request.region_pixel_y + image_request.region_pixel_h
return f"-d {x},{y},{w},{h}"
@staticmethod
def reduce_from_image_request(image_request):
arg = OpenJpegJp2Transcoder.reduce_arg_from_image_request(image_request)
return f"-r {arg}"
@staticmethod
def _find_openjpeg():
sys = system().lower()
proc = processor() # is this enough?
package_dir = dirname(dirname(abspath(__file__)))
opj_dir = join(package_dir, "openjpeg", sys, proc)
if sys in ("linux", "darwin"):
return (opj_dir, join(opj_dir, OPJ_BIN))
else:
msg = f"OpenJpeg binaries not included for for {sys}/{proc}"
raise RuntimeError(msg)
```
#### File: loris/compliance/quality_tests.py
```python
from loris.compliance.quality import QualityCompliance
class TestQualityCompliance(object):
def test_how_to_get_2(self):
cfg = {"color": {"enabled": True}, "gray": {"enabled": True}, "bitonal": {"enabled": True}}
assert QualityCompliance(cfg) == 2
def test_1_with_nothing_enabled(self):
cfg = {
"color": {"enabled": False},
"gray": {"enabled": False},
"bitonal": {"enabled": False},
}
assert QualityCompliance(cfg) == 1
```
#### File: loris/compliance/size_tests.py
```python
from loris.compliance.size import SizeCompliance
class TestSizeCompliance(object):
def test_2_plus(self):
cfg = {
"sizeByW": {"enabled": True},
"sizeByH": {"enabled": True},
"sizeByPct": {"enabled": True},
"sizeByConfinedWh": {"enabled": True},
"sizeByWh": {"enabled": True},
"sizeUpscaling": {"enabled": True},
}
assert SizeCompliance(cfg) == 2
def test_1_wo_sizeByWh(self):
cfg = {
"sizeByW": {"enabled": True},
"sizeByH": {"enabled": True},
"sizeByPct": {"enabled": False},
"sizeByConfinedWh": {"enabled": True},
"sizeByWh": {"enabled": False},
"sizeUpscaling": {"enabled": False},
}
assert SizeCompliance(cfg) == 1
def test_1_wo_sizeByConfinedWh(self):
cfg = {
"sizeByW": {"enabled": True},
"sizeByH": {"enabled": True},
"sizeByPct": {"enabled": False},
"sizeByConfinedWh": {"enabled": False},
"sizeByWh": {"enabled": True},
"sizeUpscaling": {"enabled": False},
}
assert SizeCompliance(cfg) == 1
def test_how_to_get_1(self):
cfg = {
"sizeByW": {"enabled": True},
"sizeByH": {"enabled": True},
"sizeByPct": {"enabled": False},
"sizeByConfinedWh": {"enabled": False},
"sizeByWh": {"enabled": False},
"sizeUpscaling": {"enabled": False},
}
assert SizeCompliance(cfg) == 1
def test_0_wo_sizeByH(self):
cfg = {
"sizeByW": {"enabled": True},
"sizeByH": {"enabled": False},
"sizeByPct": {"enabled": False},
"sizeByConfinedWh": {"enabled": False},
"sizeByWh": {"enabled": False},
"sizeUpscaling": {"enabled": False},
}
assert SizeCompliance(cfg) == 0
def test_0_wo_sizeByW(self):
cfg = {
"sizeByW": {"enabled": False},
"sizeByH": {"enabled": True},
"sizeByPct": {"enabled": False},
"sizeByConfinedWh": {"enabled": False},
"sizeByWh": {"enabled": False},
"sizeUpscaling": {"enabled": False},
}
assert SizeCompliance(cfg) == 0
def test_how_to_get_2(self):
cfg = {
"sizeByW": {"enabled": True},
"sizeByH": {"enabled": True},
"sizeByPct": {"enabled": True},
"sizeByConfinedWh": {"enabled": True},
"sizeByWh": {"enabled": True},
"sizeUpscaling": {"enabled": False},
}
assert SizeCompliance(cfg) == 2
def test_how_to_get_0(self):
cfg = {
"sizeByW": {"enabled": False},
"sizeByH": {"enabled": False},
"sizeByPct": {"enabled": False},
"sizeByConfinedWh": {"enabled": False},
"sizeByWh": {"enabled": False},
"sizeUpscaling": {"enabled": False},
}
assert SizeCompliance(cfg) == 0
```
#### File: loris/handlers/favicon_handler_tests.py
```python
from tests.loris.handlers.base_handler_test import BaseHandlerTest
class TestFaviconHandler(BaseHandlerTest):
# See http://docs.cherrypy.org/en/latest/advanced.html#testing-your-application
def test_favicon(self):
response = self.get("/favicon.ico")
assert response.status_code == 200
assert response.headers["Allow"] == "GET"
assert response.headers["Content-Type"] == "image/x-icon"
assert response.headers["Cache-Control"] == "max-age=31536000, public"
assert response.headers["Content-Length"] == "156176"
```
#### File: loris/parameters/api_tests.py
```python
from loris.parameters.api import AbstractParameter
from unittest.mock import Mock
import pytest
class ProperImpl(AbstractParameter):
def __init__(self, uri_slice, enabled_features):
super(ProperImpl, self).__init__(uri_slice, enabled_features)
@property
def canonical(self):
return "canonical version"
class TestAbstractParameter(object):
def test_canonical_required(self):
class WithoutCanonical(AbstractParameter):
def __init__(self, uri_slice, enabled_features):
super(WithoutCanonical, self).__init__(uri_slice, enabled_features)
with pytest.raises(TypeError) as type_error:
w = WithoutCanonical("abc", (), Mock())
assert "Can't instantiate abstract class" in str(type_error.value)
def test_init_required(self):
class WithoutInit(AbstractParameter):
@property
def canonical(self):
return "canonical version"
with pytest.raises(TypeError) as type_error:
w = WithoutInit("abc", (), Mock())
assert "Can't instantiate abstract class" in str(type_error.value)
def test_init_sig_required(self):
class WrongInitSig(AbstractParameter):
def __init__(self):
super(WrongInitSig, self).__init__()
@property
def canonical(self):
return "canonical version"
with pytest.raises(TypeError) as type_error:
WrongInitSig()
assert "__init__() missing 2 required positional" in str(type_error.value)
def test_proper_impl(self):
ProperImpl("foo", ())
def test_stuff_is_defined(self):
p = ProperImpl("foo", ())
assert p.uri_slice == "foo"
assert p.enabled_features == ()
```
#### File: loris/parameters/quality_tests.py
```python
from loris.exceptions import FeatureNotEnabledException
from loris.exceptions import SyntaxException
from loris.parameters.quality import QualityParameter
from unittest.mock import Mock
import pytest
class TestQualityParameter(object):
def mock_info(self, qualities_available):
return Mock(extra_qualities=qualities_available)
def test_canonical_color_with_color(self):
uri_slice = "color"
enabled_features = ("color", "bitonal", "gray")
info = self.mock_info(("color", "bitonal", "gray"))
qp = QualityParameter(uri_slice, enabled_features, info)
assert qp.canonical == "default"
def test_canonical_gray_with_color(self):
uri_slice = "gray"
enabled_features = ("color", "bitonal", "gray")
info = self.mock_info(("color", "bitonal", "gray"))
qp = QualityParameter(uri_slice, enabled_features, info)
assert qp.canonical == "gray"
def test_canonical_gray_with_gray(self):
uri_slice = "gray"
enabled_features = ("color", "bitonal", "gray")
info = self.mock_info(("bitonal", "gray"))
qp = QualityParameter(uri_slice, enabled_features, info)
assert qp.canonical == "default"
def test_gray_raises_if_not_enabled(self):
uri_slice = "gray"
enabled_features = ("color", "bitonal")
info = self.mock_info(("bitonal", "gray"))
with pytest.raises(FeatureNotEnabledException) as fe:
QualityParameter(uri_slice, enabled_features, info)
assert "not support the 'gray'" in fe.value.message
def test_unrecognizable_raises(self):
uri_slice = "foo"
enabled_features = ("color", "bitonal", "gray")
info = self.mock_info(("color", "bitonal", "gray"))
with pytest.raises(SyntaxException) as se:
QualityParameter(uri_slice, enabled_features, info)
assert 'Value "foo" for quality is not recognized' == se.value.message
```
#### File: loris/parameters/size_tests.py
```python
from loris.constants import KEYWORD_MAX
from loris.exceptions import FeatureNotEnabledException
from loris.exceptions import RequestException
from loris.exceptions import SyntaxException
from loris.info.structs.size import Size
from loris.info.structs.tile import Tile
from loris.parameters.size import SizeParameter
from unittest.mock import Mock
import pytest
class TestSizeParameter(object):
def mock_info(self, width, height, **kwargs):
long_dim = max(width, height)
short_dim = min(width, height)
sizes = kwargs.get("sizes", [Size(width, height)])
tiles = kwargs.get("tiles")
max_area = kwargs.get("max_area")
max_width = kwargs.get("max_width")
max_height = kwargs.get("max_height")
tiles = kwargs.get("tiles")
kwargs = {
"width": width,
"height": height,
"long_dim": long_dim,
"short_dim": short_dim,
"sizes": sizes,
"tiles": tiles,
"max_area": max_area,
"max_width": max_width,
"max_height": max_height,
}
return Mock(**kwargs)
def mock_region(self, region_width, region_height):
return Mock(pixel_w=region_width, pixel_h=region_height)
def test__deduce_request_type_raises_syntax_exception(self):
uri_slice = "wtf"
info_data = self.mock_info(8000, 6001)
features = ()
region_param = self.mock_region(400, 300)
with pytest.raises(SyntaxException) as se:
_ = SizeParameter(uri_slice, features, info_data, region_param).request_type
assert 'Size syntax "wtf" is not valid.' == se.value.message
def test__init_max_size_ok(self):
uri_slice = "max"
features = ()
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(3456, 1234)
sp = SizeParameter(uri_slice, features, info_data, region_param)
assert sp.width == 3456
assert sp.height == 1234
assert sp.canonical is KEYWORD_MAX # gets adjusted
def test__init_max_over_size_w(self):
uri_slice = "max"
features = ()
info_data = self.mock_info(8000, 6001, max_width=4000)
region_param = self.mock_region(5000, 2000) # 1000 wider than allowed
sp = SizeParameter(uri_slice, features, info_data, region_param)
assert sp.width == 4000
assert sp.height == 1600
assert sp.canonical is KEYWORD_MAX
def test__init_max_over_size_h(self):
uri_slice = "max"
features = ()
info_data = self.mock_info(8000, 6000, max_height=4000)
region_param = self.mock_region(5000, 4500) # 500 higher than allowed
sp = SizeParameter(uri_slice, features, info_data, region_param)
assert sp.width == 4444
assert sp.height == 4000
assert sp.canonical is KEYWORD_MAX
def test__init_max_over_size_area(self):
uri_slice = "max"
features = ()
max_area = 24000000
info_data = self.mock_info(8000, 6000, max_area=24000000)
region_param = self.mock_region(5000, 7000)
sp = SizeParameter(uri_slice, features, info_data, region_param)
assert sp.width == 4140
assert sp.height == 5796
assert (sp.height * sp.width) < max_area
assert sp.canonical is KEYWORD_MAX
def test__init_sizeByW(self):
uri_slice = "1024,"
features = "sizeByW"
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(2048, 2048)
sp = SizeParameter(uri_slice, features, info_data, region_param)
assert sp.width == 1024
assert sp.height == 1024
assert sp.canonical == "1024,1024"
def test__check_if_supported_sizeByW_raises(self):
uri_slice = "1024,"
features = ()
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(2048, 2048)
with pytest.raises(FeatureNotEnabledException) as fe:
SizeParameter(uri_slice, features, info_data, region_param)
assert "not support the 'sizeByW'" in fe.value.message
def test_max_as_sizeByW_adjusts_request_type(self):
uri_slice = "1024,"
features = "sizeByW"
info_data = self.mock_info(8000, 6000, max_area=24000000)
region_param = self.mock_region(1024, 1024)
sp = SizeParameter(uri_slice, features, info_data, region_param)
assert sp.width == 1024
assert sp.height == 1024
assert sp.request_type is KEYWORD_MAX
assert sp.canonical == KEYWORD_MAX
def test_full_as_sizeByW_still_raises(self):
uri_slice = "1024,"
features = ()
info_data = self.mock_info(8000, 6000)
region_param = self.mock_region(1024, 1024)
with pytest.raises(FeatureNotEnabledException) as fe:
SizeParameter(uri_slice, features, info_data, region_param)
assert "not support the 'sizeByW'" in fe.value.message
def test__init_sizeByH(self):
uri_slice = ",1024"
features = "sizeByH"
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(2048, 3072)
sp = SizeParameter(uri_slice, features, info_data, region_param)
assert sp.width == 683
assert sp.height == 1024
assert sp.canonical == "683,1024"
@pytest.mark.skip(reason="test not written")
def test_max_as_sizeByH_adjusts_request_type(self):
raise NotImplementedError
@pytest.mark.skip(reason="test not written")
def test_max_as_sizeByH_still_raises(self):
raise NotImplementedError
def test__check_if_supported_sizeByH_raises(self):
uri_slice = ",1024"
features = ()
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(2048, 3072)
with pytest.raises(FeatureNotEnabledException) as fe:
SizeParameter(uri_slice, features, info_data, region_param)
assert "not support the 'sizeByH'" in fe.value.message
def test__init_sizeByPct(self):
uri_slice = "pct:20"
features = "sizeByPct"
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(2000, 3000)
sp = SizeParameter(uri_slice, features, info_data, region_param)
assert sp.width == 400
assert sp.height == 600
assert sp.canonical == "400,600"
def test__check_if_supported_sizeByPct_raises(self):
uri_slice = "pct:20"
features = ()
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(2000, 3000)
with pytest.raises(FeatureNotEnabledException) as fe:
SizeParameter(uri_slice, features, info_data, region_param)
assert "not support the 'sizeByPct'" in fe.value.message
def test_pct_request_round_lt_0_to_1(self):
uri_slice = "pct:0.01"
features = "sizeByPct"
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(2000, 3000)
sp = SizeParameter(uri_slice, features, info_data, region_param)
assert sp.width == 1
assert sp.height == 1
assert sp.canonical == "1,1"
def test_pct_0_raises(self):
uri_slice = "pct:0"
features = "sizeByPct"
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(2000, 3000)
with pytest.raises(RequestException) as se:
SizeParameter(uri_slice, features, info_data, region_param)
assert "Size percentage must be greater than 0 (pct:0)." == se.value.message
@pytest.mark.skip(reason="test not written")
def test_full_as_sizeByPct_adjusts_request_type(self):
raise NotImplementedError
@pytest.mark.skip(reason="test not written")
def test_full_as_sizeByPct_still_raises(self):
raise NotImplementedError
def test__init_sizeByConfinedWh_portrait(self):
uri_slice = "!200,200"
features = "sizeByConfinedWh"
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(2000, 3000)
sp = SizeParameter(uri_slice, features, info_data, region_param)
assert sp.width == 133
assert sp.height == 200
assert sp.canonical == "133,200"
def test__init_sizeByConfinedWh_landscape(self):
uri_slice = "!300,300"
features = "sizeByConfinedWh"
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(2000, 1200)
sp = SizeParameter(uri_slice, features, info_data, region_param)
assert sp.width == 300
assert sp.height == 180
assert sp.canonical == "300,180"
def test__check_if_supported_sizeByConfinedWh_raises(self):
uri_slice = "!200,200"
features = ()
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(2000, 3000)
with pytest.raises(FeatureNotEnabledException) as fe:
SizeParameter(uri_slice, features, info_data, region_param)
assert "not support the 'sizeByConfinedWh'" in fe.value.message
@pytest.mark.skip(reason="test not written")
def test_full_as_sizeByConfinedWh_adjusts_request_type(self):
raise NotImplementedError
@pytest.mark.skip(reason="test not written")
def test_full_as_sizeByConfinedWh_still_raises(self):
raise NotImplementedError
def test__init_sizeByWh(self):
uri_slice = "400,300"
features = "sizeByWh"
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(8000, 6001)
sp = SizeParameter(uri_slice, features, info_data, region_param)
assert sp.width == 400
assert sp.height == 300
assert sp.canonical == "400,300"
def test__check_if_supported_sizeByWh_raises(self):
uri_slice = "400,300"
features = ()
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(8000, 6001)
with pytest.raises(FeatureNotEnabledException) as fe:
SizeParameter(uri_slice, features, info_data, region_param)
assert "not support the 'sizeByWh'" in fe.value.message
@pytest.mark.skip(reason="test not written")
def test_full_as_sizeByWh_adjusts_request_type(self):
raise NotImplementedError
@pytest.mark.skip(reason="test not written")
def test_full_as_sizeByWh_still_raises(self):
raise NotImplementedError
def test__sizeUpscaling_ok(self): # This should raise now
uri_slice = "400,300"
features = ("sizeByWh", "sizeUpscaling")
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(399, 299)
with pytest.raises(RequestException) as re:
SizeParameter(uri_slice, features, info_data, region_param)
assert "Image would be upsampled" in re.value.message
def test_sizeUpscaling_raises(self):
uri_slice = "^400,300"
features = "sizeByWh"
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(399, 299)
with pytest.raises(FeatureNotEnabledException) as fe:
SizeParameter(uri_slice, features, info_data, region_param)
assert "not support the 'sizeUpscaling' feature" in fe.value.message
def test_upscaling_ok_if_sizeUpscaling_enabled(self):
uri_slice = "^400,300"
features = ("sizeByWh", "sizeUpscaling")
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(399, 299)
sp = SizeParameter(uri_slice, features, info_data, region_param)
assert sp.upscaling_requested
assert sp.width == 400
assert sp.height == 300
assert sp.region_w == 399
assert sp.region_h == 299
def test_upscaling_ok_w_syntax(self):
uri_slice = "^400,"
features = ("sizeByW", "sizeUpscaling")
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(399, 299)
SizeParameter(uri_slice, features, info_data, region_param)
def test_upscaling_ok_h_syntax(self):
uri_slice = "^,400"
features = ("sizeByH", "sizeUpscaling")
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(399, 299)
SizeParameter(uri_slice, features, info_data, region_param)
def test_upscaling_ok_pct_syntax(self):
uri_slice = "^pct:101"
features = ("sizeByPct", "sizeUpscaling")
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(399, 299)
SizeParameter(uri_slice, features, info_data, region_param)
def test_upscaling_ok_pct_syntax(self):
uri_slice = "^pct:101"
features = ("sizeByPct", "sizeUpscaling")
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(399, 299)
SizeParameter(uri_slice, features, info_data, region_param)
def test_upscaling_ok_max_syntax(self):
uri_slice = "^max"
features = "sizeUpscaling"
info_data = self.mock_info(8000, 6001)
region_param = self.mock_region(399, 299)
SizeParameter(uri_slice, features, info_data, region_param)
def test_width_larger_than_max_raises(self):
uri_slice = "5000,"
features = ("sizeByW", "sizeUpscaling")
info_data = self.mock_info(8000, 6001, max_width=4000)
region_param = self.mock_region(4000, 3000)
with pytest.raises(RequestException) as re:
SizeParameter(uri_slice, features, info_data, region_param)
assert "Image would be upsampled" in re.value.message
def test_height_larger_than_max_raises(self):
uri_slice = ",1024"
features = "sizeByH"
info_data = self.mock_info(8000, 6001, max_height=1000)
region_param = self.mock_region(2048, 2048)
with pytest.raises(RequestException) as re:
SizeParameter(uri_slice, features, info_data, region_param)
assert "height (1024) is greater" in re.value.message
def test_area_larger_than_max_raises(self):
uri_slice = "5000,"
features = ("sizeByW", "sizeUpscaling")
info_data = self.mock_info(8000, 6001, max_area=16000000)
region_param = self.mock_region(5000, 6000)
with pytest.raises(RequestException) as re:
SizeParameter(uri_slice, features, info_data, region_param)
assert "area (30000000) is greater" in re.value.message
def test_can_get_tiles_without_sizeByW_if_allowed(self):
uri_slice = "1024,"
features = ()
tiles = [Tile(1024, [1, 2, 4, 8, 16])]
info_data = self.mock_info(8000, 6000, tiles=tiles)
region_param = self.mock_region(1024, 1024)
sp = SizeParameter(uri_slice, features, info_data, region_param)
assert sp.width == 1024
assert sp.height == 1024
def test_can_get_right_edge_tiles_without_sizeByW_if_allowed(self):
uri_slice = "30,"
features = ()
tiles = [Tile(1024, [1, 2, 4, 8, 16])]
info_data = self.mock_info(2078, 6000, tiles=tiles)
region_param = self.mock_region(60, 2048)
sp = SizeParameter(uri_slice, features, info_data, region_param)
assert sp.width == 30
assert sp.height == 1024
def test_can_get_bottom_row_tiles_without_sizeByW_if_allowed(self):
uri_slice = "1024,"
features = ()
tiles = [Tile(1024, [1, 2, 4, 8, 16])]
info_data = self.mock_info(8000, 6000, tiles=tiles)
region_param = self.mock_region(4096, 3520)
sp = SizeParameter(uri_slice, features, info_data, region_param)
assert sp.width == 1024
assert sp.height == 880
def test_normal_sizeByW_raises(self):
uri_slice = "1025," # off by one; could be arbirary
features = ()
tiles = [Tile(1024, [1, 2, 4, 8, 16])]
info_data = self.mock_info(8000, 6000, tiles=tiles)
region_param = self.mock_region(4096, 3520)
with pytest.raises(FeatureNotEnabledException) as fe:
SizeParameter(uri_slice, features, info_data, region_param)
assert "not support the 'sizeByW'" in fe.value.message
def test_can_get_small_sizes_without_sizeByW_if_allowed(self):
uri_slice = "1000,"
features = ()
tiles = [Tile(1024, [1, 2, 4, 8, 16])]
sizes = [Size(1000, 750), Size(500, 375), Size(250, 187)]
info_data = self.mock_info(8000, 6000, sizes=sizes, tiles=tiles)
region_param = self.mock_region(8000, 6000)
sp = SizeParameter(uri_slice, features, info_data, region_param)
assert sp.width == 1000
assert sp.height == 750
```
#### File: loris/resolvers/api_tests.py
```python
from loris.resolvers.api import AbstractResolver
import pytest
class ProperImpl(AbstractResolver):
def is_resolvable(self, ident):
return True
def resolve(self, ident):
# Note that a real impl. would need to raise an IOError
return "/foo/bar/baz.jpg"
class TestAbstractResolver(object):
def test_is_resolvable_required(self):
class WithoutIsResolvable(AbstractResolver):
def resolve(self, ident):
return "/foo/bar/baz.jpg"
@staticmethod
def characterize(file_path):
return "jpg"
with pytest.raises(TypeError) as type_error:
w = WithoutIsResolvable({})
assert "Can't instantiate abstract class" in str(type_error.value)
def test_resolvable_required(self):
class WithoutResolvable(AbstractResolver):
def is_resolvable(self, ident):
return True
@staticmethod
def characterize(file_path):
return "jpg"
with pytest.raises(TypeError) as type_error:
w = WithoutResolvable({})
assert "Can't instantiate abstract class" in str(type_error.value)
def test_proper_impl_works(self):
resolver = ProperImpl({})
def test_arbirtary_configs_added_to_instance(self):
config = {"foo": "bar", "baz": "quux"}
resolver = ProperImpl(config)
assert resolver.foo == "bar"
assert resolver.baz == "quux"
```
#### File: loris/resolvers/resolvers_tests.py
```python
from loris.resolvers import Resolvers
from loris.resolvers.file_system_resolver import FileSystemResolver
import pytest
class MyStupidResolver:
def __init__(self):
pass
class TestResolvers(object):
def test_can_load_from_config(self):
config = [
{
"class": "loris.resolvers.file_system_resolver.FileSystemResolver",
"prefix": "a",
"config": {
"root": "/fill/me/in",
"format_suffix": None,
"cache": False,
"cache_root": "/tmp",
"cache_size": 100,
},
}
]
resolvers = Resolvers(config)
assert "a" in resolvers._resolvers
assert isinstance(resolvers._resolvers["a"], FileSystemResolver)
assert resolvers._resolvers["a"].cache is False
def test_add_resolver(self):
resolvers = Resolvers([])
klass = "loris.resolvers.file_system_resolver.FileSystemResolver"
prefix = "a"
config = {"root": "/fill/me/in"}
resolvers.add_resolver(klass, prefix, config)
assert "a" in resolvers._resolvers
assert isinstance(resolvers._resolvers["a"], FileSystemResolver)
def test_raises_if_not_an_abstract_resolver(self):
resolvers = Resolvers([])
with pytest.raises(TypeError) as te:
klass = "tests.loris.resolvers.resolvers_tests.MyStupidResolver"
resolvers.add_resolver(klass, "x", {})
assert "MyStupidResolver must subclass AbstractResolver" == str(te.value)
def test_raises_if_key_is_not_a_str(self):
resolvers = Resolvers([])
with pytest.raises(TypeError) as te:
klass = "loris.resolvers.file_system_resolver.FileSystemResolver"
resolvers.add_resolver(klass, 1, {})
assert "Resolver prefixes must be strings, got int" == str(te.value)
```
#### File: loris/transcoders/helpers.py
```python
from contextlib import contextmanager
from os import unlink
from PIL import Image
BLACK = (0, 0, 0)
BLUE = (0, 0, 150)
DARK_SLATE = (49, 79, 79)
GREEN = (0, 150, 0)
ORANGE = (255, 165, 0)
PURPLE = (128, 0, 128)
RED = (150, 0, 0)
WHITE = (255, 255, 255)
def is_close_color(src_rgb, expected_rgb, threshold=10):
# Try to account for variations in color that result from compression
pairs = map(sorted, zip(src_rgb, expected_rgb))
return all(d <= threshold for d in map(lambda t: t[1] - t[0], pairs))
@contextmanager
def tmp_image(bytes_io, fmt="jpg"):
tmp = f"/tmp/loris_tmp/img.{fmt}"
try:
with open(tmp, "wb") as f:
f.write(bytes_io.getvalue())
i = Image.open(tmp)
yield i
finally:
unlink(tmp)
```
#### File: loris/transcoders/openjpeg_jp2_transcoder_tests.py
```python
from loris.constants import FEATURE_SIZE_BY_W
from loris.constants import KEYWORD_FULL
from loris.transcoders.openjpeg_jp2_transcoder import OpenJpegJp2Transcoder
from os.path import exists
from os.path import isdir
from tests.loris.transcoders.helpers import BLUE
from tests.loris.transcoders.helpers import GREEN
from tests.loris.transcoders.helpers import is_close_color
from tests.loris.transcoders.helpers import ORANGE
from tests.loris.transcoders.helpers import RED
from tests.loris.transcoders.helpers import tmp_image
from unittest.mock import Mock
import pytest
@pytest.fixture(scope="module")
def transcoder():
return OpenJpegJp2Transcoder({})
class TestOpenJpegJp2Transcoder(object):
def test_it_can_find_openjpeg(self):
lib, binary = OpenJpegJp2Transcoder._find_openjpeg()
assert exists(lib)
assert isdir(lib)
assert exists(binary)
def test_decode_area_from_image_request(self):
args = {
"region_pixel_x": 0,
"region_pixel_y": 1024,
"region_pixel_w": 512,
"region_pixel_h": 512,
}
image_request = Mock(**args)
meth = OpenJpegJp2Transcoder.decode_area_from_image_request
assert meth(image_request) == "-d 0,1024,512,1536"
def test_reduce_from_image_request(self):
info = Mock(width=5000, height=6500, all_scales=[1, 2, 4, 8, 16, 32, 64])
args = {"width": 250, "height": 400, "info": info}
image_request = Mock(**args)
meth = OpenJpegJp2Transcoder.reduce_from_image_request
assert meth(image_request) == "-r 4"
def test__build_command(self, transcoder):
info = Mock(width=5000, height=6500, all_scales=[1, 2, 4, 8, 16, 32, 64])
mock_data = {
"width": 250,
"height": 400,
"region_pixel_x": 0,
"region_pixel_y": 1024,
"region_pixel_w": 512,
"region_pixel_h": 512,
"info": info,
"file_path": "/foo/bar.jp2",
}
image_request = Mock(**mock_data)
fake_pipe = "/baz/quux.bmp"
cmd_no_path = "opj_decompress -i /foo/bar.jp2 -o /baz/quux.bmp -d 0,1024,512,1536 -r 4"
assert transcoder._build_command(image_request, fake_pipe).endswith(cmd_no_path)
@pytest.mark.filterwarnings("ignore:unclosed file")
def test__execute_small_full(self, transcoder, region_test_jp2):
# This is the equivalent of /full/full/0/default.jpg.
# It will be slow (~2-2.5 seconds)
image_request = Mock(
info=Mock(width=6000, height=8000, all_scales=[1, 2, 4, 8, 16, 32, 64]),
file_path=region_test_jp2,
region_request_type=KEYWORD_FULL, # _region_param.request_type
region_pixel_x=0, # _region_param.pixel_x
region_pixel_y=0, # _region_param.pixel_y
region_pixel_w=60, # _region_param.pixel_w
region_pixel_h=80, # _region_param.pixel_h
size_request_type=FEATURE_SIZE_BY_W, # _size_param.request_type
width=60, # _size_param.width
height=80, # _size_param.height
mirror=False, # _rotation_param.mirror
rotation=0.0, # _rotation_param.rotation
quality="default", # _quality_param.canonical
format="jpg", # _format_param.canonical
)
stream = transcoder.execute(image_request)
with tmp_image(stream) as i:
# i.show() # helpful
assert i.size == (60, 80)
assert is_close_color(i.getpixel((0, 0)), GREEN)
assert is_close_color(i.getpixel((59, 0)), RED)
assert is_close_color(i.getpixel((0, 79)), BLUE)
assert is_close_color(i.getpixel((59, 79)), ORANGE)
``` |
{
"source": "JPStrydom/Webcam-Colour-Tracker",
"score": 3
} |
#### File: Webcam-Colour-Tracker/src/draw.py
```python
import cv2
from . import config
camera_width = config.camera_width
camera_height = config.camera_height
def draw_color_line(img_brg, draw_point_array, draw_color, is_canvas=False):
draw_color_line_help(img_brg, draw_point_array, (0, 0, 0), is_canvas)
draw_color_line_help(img_brg, draw_point_array, draw_color, is_canvas)
def draw_color_line_help(img_brg, draw_point_array, draw_color, is_canvas=False):
padding = 0
if draw_color == (0, 0, 0):
padding = 10
taper_rate = 2
if is_canvas:
taper_rate = (50 / len(draw_point_array))
for index, point in enumerate(draw_point_array):
if validate_point(index, point, draw_point_array, is_canvas):
cv2.line(
img_brg,
tuple(point),
tuple(draw_point_array[index + 1]),
(int(draw_color[0]), int(draw_color[1]), int(draw_color[2])),
round(50 - index * taper_rate + padding)
)
def validate_point(index, point, draw_point_array, is_canvas):
return (index <= 25 or is_canvas) and\
point[0] != 0 and\
point[1] != 0 and\
draw_point_array[index + 1, 0] != 0 and\
draw_point_array[index + 1, 1] != 0
def draw_target_circle(img_brg):
cv2.circle(img_brg, (round(camera_width / 2), round(camera_height / 2)), 25, (0, 0, 0), 10)
cv2.circle(img_brg, (round(camera_width / 2), round(camera_height / 2)), 25, (255, 255, 255), 6)
cv2.circle(img_brg, (round(camera_width / 2), round(camera_height / 2)), 25, (0, 0, 0), 2)
``` |
{
"source": "jpsura/pyapi",
"score": 3
} |
#### File: jpsura/pyapi/astro.py
```python
import urllib.request
import json
LIST = 'http://api.open-notify.org/astros.json'
def main():
## Call the webservice
isslist = urllib.request.urlopen(LIST)
## put fileobject into helmet
datalist = isslist.read()
## decode JSON to Python data structure
jsonlist = json.loads(datalist.decode('utf-8'))
## display our Pythonic data
#print("\n\nConverted Python data")
#print(jsonlist)
print("\n\nPeople in Space: ", jsonlist['people'][0]['name'])
print(" ", jsonlist['people'][1]['name'])
print(" ", jsonlist['people'][2]['name'])
print(" ", jsonlist['people'][3]['name'])
#print(jsonlist)
main()
```
#### File: pyapi/rpg/mygame01.py
```python
def showInstructions():
print('''
RPG Game
========
Commands:
go [direction]
get [item]
''')
def showStatus():
#print player status
print('------------------')
print('You are in the ' + currentRoom)
#print the current inventory
print('Inventory : ' + str(inventory))
#print an item if there is one
if "item" in rooms[currentRoom]:
print('You see a ' + rooms[currentRoom]['item'])
print("------------------")
#an inventory
inventory = []
#a dictionary liking a room to other rooms
rooms = {
'Hall' : {
'south' : 'Kitchen',
'east' : 'Dining Hall',
'item' : 'key'
},
'Kitchen' : {
'north' : 'Hall',
'item' : 'Monster'
},
'Dining Hall' : {
'west' : 'Hall',
'south' : 'Garden',
'item' : 'potion'
},
'Garden' : {
'north' : 'Dining Hall'
}
}
#start player in the hall
currentRoom = 'Hall'
showInstructions()
#loop forever
while True:
showStatus()
#get the player next move
#.split() breaks it up into a list array
#eg typing 'go east' would give the list:
#['go', 'east']
move = ''
while move == '':
move = input('>')
move = move.lower().split()
#if they go first
if move [0] == 'go':
#check they are allowed to go
if move[1] in rooms [currentRoom]:
#set the current rooms to a new room
currentRoom = rooms[currentRoom][move[1]]
#there is no door to a new room
else:
print('You can\'t go that way')
#if they type 'get' first
if move[0] == 'get':
#if the room contains an item and the item is the one they want to get
if 'item' in rooms[currentRoom] and move[1] in rooms[currentRoom]['item']:
inventory += [move[1]]
print(move[1] + "picked up")
del rooms[currentRoom]['item']
else:
print("You can't pick that up")
if 'item' in rooms[currentRoom] and 'Monster' is rooms[currentRoom]['item']:
print('A monster has got you.....game over man!')
break
if currentRoom == 'Garden' and 'key' in inventory and 'potion'in inventory:
print('You escaped the house with the ultra rare key and magic potion....you win')
break
```
#### File: jpsura/pyapi/session02.py
```python
from flask import Flask, session, render_template, redirect, url_for, escape, request
app = Flask(__name__)
app.secret_key = "any random string"
## If the user hits the root of our API
@app.route("/")
def index():
## if the key "username" has a value in session
if "username" in session:
username = session["username"]
if 'visits' in session:
session['visits'] = session.get('visits') + 1 # reading and updating session data
else:
session['visits'] = 1 # setting session data
visitno = "Total visits: {}".format(session.get('visits'))
return "Logged in as " + username + " your lifetime visits are " + visitno + "<br>" + \
"<b><a href = '/logout'>click here to log out</a></b>"
## if the key "username" does not have a value in session
return "You are not logged in <br><a href = '/login'></b>" + \
"click here to log in</b></a>"
## If the user hits /login with a GET or POST
@app.route("/login", methods = ["GET", "POST"])
def login():
## if you sent us a POST because you clicked the login button
if request.method == "POST":
## request.form["xyzkey"]: use indexing if you know the key exists
## request.form.get("xyzkey"): use get if the key might not exist
session["username"] = request.form.get("username")
return redirect(url_for("index"))
## return this HTML data if you send us a GET
return """
<form action = "" method = "post">
<p><input type = text name = username></p>
<p><input type = submit value = Login></p>
</form>
"""
@app.route("/logout")
def logout():
# remove the username from the session if it is there
session.pop("username", None)
return redirect(url_for("index"))
@app.route('/delete-visits/')
def delete_visits():
if "username" in session:
session.pop('visits', None) # delete visits
return 'Visits deleted'
return "You are not logged in <br><a href = '/login'></b>" + "click here to log in</b></a>"
if __name__ == "__main__":
app.run(port=5006)
``` |
{
"source": "jpsvcv/Connect4-Game",
"score": 4
} |
#### File: jpsvcv/Connect4-Game/main.py
```python
from termcolor import colored
import random
# Variables definition
red, blue = colored('X', 'red', attrs=['bold']), colored('O', 'blue', attrs=['bold']) # u'\u2B24'
balls = [colored(u'\u2B24', 'red'), colored(u'\u2B24', 'blue')]
player_1, player_2 = 'red', 'blue'
row, col = 6, 7
def generate_board():
return [[] for i in range(col)] # each nested list represents a column of the game board
board = generate_board().copy()
def first_player():
return random.randint(1, 2)
player = first_player()
def print_header():
for i in range(col):
if i == 0:
print(' ' * 2 + str(i+1), end='')
else:
print(' ' * 3 + str(i+1), end='')
print()
# board line
def print_line():
for i in range(col):
print('+', end='')
for j in range(3):
print('-', end='')
print('+')
def draw_board():
print_header()
for i in range(row):
print_line()
print_row(i) # is the columns content (think in that in reverse mode)
print_line()
print_header()
# Fill each cell on the screen with the respective column content in reverse order
# Math is beautiful
def print_row(cell_position):
max_index, max_len = 5, 6
for column in board:
print('|' + ' ', end='')
current_len = len(column)
current_index = current_len - 1
total_empty_cells = max_index - current_index
if cell_position >= total_empty_cells:
print_index = - 1 * (cell_position + 1) # this is like reversing the order (my version)
real_index = max_len + print_index # the right index is targeted in reverse order
color_ball = column[real_index]
if color_ball == 'red':
print(red + ' ', end='') # print the red ball
elif color_ball == 'blue':
print(blue + ' ', end='')
else: # 'win'
if color == 'red':
print(colored('X', 'green', attrs=['bold']) + ' ', end='')
elif color == 'blue':
print(colored('O', 'green', attrs=['bold']) + ' ', end='')
else:
print(' ', end='')
print('|')
def print_title():
print('\n' + ' ' * 3 + colored('--- Connect 4 Game ---', attrs=['bold']))
print_title()
print()
p = 1
players = []
while p < 3:
if p == 1:
s = '1st'
else:
s = '2nd'
players.append(input('- Enter the ' + s + ' Player name: ').strip().capitalize())
p += 1
print()
draw_board()
tmp_player = first_player() - 1
start_play = players[tmp_player]
ball = str(balls[tmp_player])
print('\n--- ' + colored(start_play.upper(), attrs=['bold']) + ' shall start the game with '
+ ball + ' ---\n') # u'\u2B24'
def save_move():
column = move - 1
if len(board[column]) < 6:
board[column].append(color)
else:
print('\n-- ' + colored('WARNING.:', 'blue', attrs=['bold']) +
' Invalid move. Please choose new column to play.')
return True
return False
# check for horizontal victory
def check_horizontal():
tmp_board = clone_board()
for column in tmp_board:
for cell in column:
current_column = tmp_board.index(column)
current_cell = column.index(cell)
try:
if column[current_cell] == tmp_board[current_column + 1][current_cell] \
== tmp_board[current_column + 2][current_cell] \
== tmp_board[current_column + 3][current_cell]:
board[current_column][current_cell] = board[current_column + 1][current_cell] \
= board[current_column + 2][current_cell] = board[current_column + 3][current_cell] = 'win'
return True
column[column.index(cell)] = '' # this line make all the difference in the horizontal detection
except IndexError:
continue
return False
# check for vertical victory
def check_vertical():
for column in board:
for cell in column:
current_cell = column.index(cell)
try:
if column[current_cell] == column[current_cell + 1] == column[current_cell + 2] \
== column[current_cell + 3]:
column[current_cell] = column[current_cell + 1] = column[current_cell + 2] \
= column[current_cell + 3] = 'win'
return True
except IndexError:
continue
return False
# case - positive_slope or negative_slope
def analyse_slope(case, tmp_board, column, cell):
current_column = tmp_board.index(column)
current_cell = column.index(cell)
status = False
try:
if case == 'positive_slope':
if tmp_board[current_column][current_cell] == tmp_board[current_column + 1][current_cell + 1] \
== tmp_board[current_column + 2][current_cell + 2] \
== tmp_board[current_column + 3][current_cell + 3]:
board[current_column][current_cell] = board[current_column + 1][current_cell + 1] \
= board[current_column + 2][current_cell + 2] = board[current_column + 3][current_cell + 3] = 'win'
status = True
else: # negative_slope
if tmp_board[current_column][current_cell] == tmp_board[current_column + 1][current_cell - 1] \
== tmp_board[current_column + 2][current_cell - 2] \
== tmp_board[current_column + 3][current_cell - 3]:
board[current_column][current_cell] = board[current_column + 1][current_cell - 1] \
= board[current_column + 2][current_cell - 2] \
= board[current_column + 3][current_cell - 3] = 'win'
status = True
if status:
return status
except IndexError:
return False
return False
# print the winner move with different color on the screen
# case - can be slope (for positive or negative), vertical or horizontal
def draw_winner_board(cells, case):
print_header()
for i in range(row):
print_line()
print_row(i) # is the columns content (think in that in reverse mode)
print_line()
print_header()
return False
# tmp_board = board.copy() or tmp_board = board[::] are not working for me
# so, I'm implementing my own, ... that's working very well
def clone_board():
tmp = []
for i in board:
tmp.append(i.copy())
return tmp
# to check all possible combination on diagonal, you need analyse only a half (21 cells) of the board (2 cells)
def check_diagonal():
tmp_board = clone_board()
status = False
for column in tmp_board:
if len(column) > 0: # must have at least one element
if color in column: # the current color must be available in that cell
if board.index(column) < 4: # analyze only a half of the board. 21 in 42
for cell in column:
if column.index(cell) < 3: # 1st half of the column
status = analyse_slope('positive_slope', tmp_board, column, cell) #
elif column.index(cell) > 2: # 2nd half of the column
status = analyse_slope('negative_slope', tmp_board, column, cell)
if status:
return True
tmp_board[tmp_board.index(column)][column.index(cell)] = ''
return False
def check_winner():
# check for vertical victory
if check_vertical(): # difficulty level: very easy
return True
# check for horizontal victory
if check_horizontal(): # difficulty level: easy
return True
# check for diagonal victory
if check_diagonal(): # difficulty level: I'm going to make it to be as medium-easy as possible
return True
return False
def set_move():
col_num = 0
while col_num < 1 or col_num > 7:
try:
col_num = int(input(colored(player.upper(), color, attrs=['bold']) + '\'s move.: '))
while col_num < 1 or col_num > 7:
print('\n-- ' + colored('WARNING.:', 'magenta', attrs=['bold']) +
' Please enter a number between [1, 7]')
col_num = int(input(colored(player.upper(), color, attrs=['bold']) + '\'s move.: '))
except ValueError:
print('\n-- ' + colored('WARNING.:', 'magenta', attrs=['bold']) + ' Please enter a number between [1, 7]')
return col_num
player = start_play
colors = ['red', 'blue']
total_move = 0
while True:
color = colors[tmp_player]
move = set_move()
column_is_full = save_move() # move option can failed - list bound error
while column_is_full:
move = set_move()
column_is_full = save_move()
total_move += 1
# printing the current board on the screen
print()
draw_board()
print()
if total_move > 6:
if check_winner(): # check if we have a WINNER
print(colored('--- RESULT.: ', 'green', attrs=['bold']) +
colored(player.upper(), color, attrs=['bold']) + colored(' is the WINNER', attrs=['bold']))
print()
draw_board()
break
else: # check for a TIE
count_full = 0
for item in board:
if len(item) == 6:
count_full += 1
if count_full == col: # col is equals to the total of board inner list
print(colored('--- RESULT.: ', 'green', attrs=['bold']) +
colored('TIE', attrs=['bold']))
break
if tmp_player == 0:
tmp_player = 1
else:
tmp_player = 0
player = players[tmp_player]
``` |
{
"source": "jpswade/pimusicbox",
"score": 2
} |
#### File: addons/service.xbmc.callbacks-0.2/default.py
```python
import os
import sys
import xbmc
import xbmcgui
import xbmcaddon
import subprocess
script_xbmc_starts = ''
script_player_starts = ''
script_player_stops = ''
script_player_pauses = ''
script_player_resumes = ''
script_screensaver_starts = ''
script_screensaver_stops = ''
__addon__ = xbmcaddon.Addon()
__addonversion__ = __addon__.getAddonInfo('version')
__addonid__ = __addon__.getAddonInfo('id')
__addonname__ = __addon__.getAddonInfo('name')
def log(txt):
message = '%s: %s' % (__addonname__, txt.encode('ascii', 'ignore'))
xbmc.log(msg=message, level=xbmc.LOGDEBUG)
class Main:
def __init__(self):
self._init_vars()
self._init_property()
global script_xbmc_starts
if script_xbmc_starts:
log('Going to execute script = "' + script_xbmc_starts + '"')
subprocess.Popen([script_xbmc_starts])
self._daemon()
def _init_vars(self):
self.Player = MyPlayer()
self.Monitor = MyMonitor(update_settings = self._init_property, player_status = self._player_status)
def _init_property(self):
log('Reading properties')
global script_xbmc_starts
global script_player_starts
global script_player_stops
global script_player_pauses
global script_player_resumes
global script_screensaver_starts
global script_screensaver_stops
script_xbmc_starts = xbmc.translatePath(__addon__.getSetting("xbmc_starts"))
script_player_starts = xbmc.translatePath(__addon__.getSetting("player_starts"))
script_player_stops = xbmc.translatePath(__addon__.getSetting("player_stops"))
script_player_pauses = xbmc.translatePath(__addon__.getSetting("player_pauses"))
script_player_resumes = xbmc.translatePath(__addon__.getSetting("player_resumes"))
script_screensaver_starts = xbmc.translatePath(__addon__.getSetting("screensaver_starts"))
script_screensaver_stops = xbmc.translatePath(__addon__.getSetting("screensaver_stops"))
log('script xbmc starts = "' + script_xbmc_starts + '"')
log('script player starts = "' + script_player_starts + '"')
log('script player stops = "' + script_player_stops + '"')
log('script player pauses = "' + script_player_pauses + '"')
log('script player resumes = "' + script_player_resumes + '"')
log('script screensaver starts = "' + script_screensaver_starts + '"')
log('script screensaver stops = "' + script_screensaver_stops + '"')
def _player_status(self):
return self.Player.playing_status()
def _daemon(self):
while (not xbmc.abortRequested):
# Do nothing
xbmc.sleep(600)
log('abort requested')
class MyMonitor(xbmc.Monitor):
def __init__(self, *args, **kwargs):
xbmc.Monitor.__init__(self)
self.get_player_status = kwargs['player_status']
self.update_settings = kwargs['update_settings']
def onSettingsChanged(self):
self.update_settings()
def onScreensaverActivated(self):
log('screensaver starts')
global script_screensaver_starts
if script_screensaver_starts:
log('Going to execute script = "' + script_screensaver_starts + '"')
subprocess.Popen([script_screensaver_starts,self.get_player_status()])
def onScreensaverDeactivated(self):
log('screensaver stops')
global script_screensaver_stops
if script_screensaver_stops:
log('Going to execute script = "' + script_screensaver_stops + '"')
subprocess.Popen([script_screensaver_stops])
class MyPlayer(xbmc.Player):
def __init__(self):
xbmc.Player.__init__(self)
self.substrings = [ '-trailer', 'http://' ]
def playing_status(self):
if self.isPlaying():
return 'status=playing' + ';' + self.playing_type()
else:
return 'status=stopped'
def playing_type(self):
type = 'unkown'
if (self.isPlayingAudio()):
type = "music"
else:
if xbmc.getCondVisibility('VideoPlayer.Content(movies)'):
filename = ''
isMovie = True
try:
filename = self.getPlayingFile()
except:
pass
if filename != '':
for string in self.substrings:
if string in filename:
isMovie = False
break
if isMovie:
type = "movie"
elif xbmc.getCondVisibility('VideoPlayer.Content(episodes)'):
# Check for tv show title and season to make sure it's really an episode
if xbmc.getInfoLabel('VideoPlayer.Season') != "" and xbmc.getInfoLabel('VideoPlayer.TVShowTitle') != "":
type = "episode"
return 'type=' + type
def onPlayBackStarted(self):
log('player starts')
global script_player_starts
if script_player_starts:
log('Going to execute script = "' + script_player_starts + '"')
subprocess.Popen([script_player_starts,self.playing_type()])
def onPlayBackEnded(self):
self.onPlayBackStopped()
def onPlayBackStopped(self):
log('player stops')
global script_player_stops
if script_player_stops:
log('Going to execute script = "' + script_player_stops + '"')
subprocess.Popen([script_player_stops,self.playing_type()])
def onPlayBackPaused(self):
log('player pauses')
global script_player_pauses
if script_player_pauses:
log('Going to execute script = "' + script_player_pauses + '"')
subprocess.Popen([script_player_pauses,self.playing_type()])
def onPlayBackResumed(self):
log('player resumes')
global script_player_resumes
if script_player_resumes:
log('Going to execute script = "' + script_player_resumes + '"')
subprocess.Popen([script_player_resumes,self.playing_type()])
if (__name__ == "__main__"):
log('script version %s started' % __addonversion__)
Main()
del MyPlayer
del MyMonitor
del Main
log('script version %s stopped' % __addonversion__)
```
#### File: addons/weather.wunderground/default.py
```python
import os, sys, socket, unicodedata, urllib2, time, base64, gzip
from datetime import date
from StringIO import StringIO
import xbmc, xbmcgui, xbmcaddon, xbmcvfs
if sys.version_info < (2, 7):
import simplejson
else:
import json as simplejson
__addon__ = xbmcaddon.Addon()
__addonname__ = __addon__.getAddonInfo('name')
__addonid__ = __addon__.getAddonInfo('id')
__cwd__ = __addon__.getAddonInfo('path').decode("utf-8")
__version__ = __addon__.getAddonInfo('version')
__language__ = __addon__.getLocalizedString
__resource__ = xbmc.translatePath( os.path.join( __cwd__, 'resources', 'lib' ).encode("utf-8") ).decode("utf-8")
sys.path.append(__resource__)
from utilities import *
from wunderground import wundergroundapi
WUNDERGROUND_LOC = 'http://autocomplete.wunderground.com/aq?query=%s&format=JSON'
WEATHER_FEATURES = 'hourly/conditions/forecast10day/astronomy/almanac/alerts/satellite'
FORMAT = 'json'
ENABLED = __addon__.getSetting('Enabled')
DEBUG = __addon__.getSetting('Debug')
XBMC_PYTHON = xbmcaddon.Addon(id='xbmc.python').getAddonInfo('version')
WEATHER_ICON = xbmc.translatePath('special://temp/weather/%s.png').decode("utf-8")
WEATHER_WINDOW = xbmcgui.Window(12600)
LANGUAGE = xbmc.getLanguage().lower()
SPEEDUNIT = xbmc.getRegion('speedunit')
TEMPUNIT = unicode(xbmc.getRegion('tempunit'),encoding='utf-8')
TIMEFORMAT = xbmc.getRegion('meridiem')
DATEFORMAT = xbmc.getRegion('dateshort')
MAXDAYS = 6
socket.setdefaulttimeout(10)
def recode(alert): # workaround: wunderground provides a corrupt alerts message
try:
alert = alert.encode("latin-1").rstrip(' )').decode("utf-8")
except:
pass
return alert
def log(txt):
if DEBUG == 'true':
if isinstance (txt,str):
txt = txt.decode("utf-8")
message = u'%s: %s' % (__addonid__, txt)
xbmc.log(msg=message.encode("utf-8"), level=xbmc.LOGDEBUG)
def set_property(name, value):
WEATHER_WINDOW.setProperty(name, value)
def refresh_locations():
locations = 0
for count in range(1, 6):
loc_name = __addon__.getSetting('Location%s' % count)
if loc_name != '':
locations += 1
else:
__addon__.setSetting('Location%sid' % count, '')
set_property('Location%s' % count, loc_name)
set_property('Locations', str(locations))
log('available locations: %s' % str(locations))
def find_location(loc):
url = WUNDERGROUND_LOC % urllib2.quote(loc)
try:
req = urllib2.urlopen(url)
response = req.read()
req.close()
except:
response = ''
return response
def location(string):
locs = []
locids = []
log('location: %s' % string)
loc = unicodedata.normalize('NFKD', unicode(string, 'utf-8')).encode('ascii','ignore')
log('searching for location: %s' % loc)
query = find_location(loc)
log('location data: %s' % query)
data = parse_data(query)
if data != '' and data.has_key('RESULTS'):
for item in data['RESULTS']:
location = item['name']
locationid = item['l'][3:]
locs.append(location)
locids.append(locationid)
return locs, locids
def geoip():
retry = 0
while (retry < 6) and (not xbmc.abortRequested):
query = wundergroundapi('geolookup', 'lang:EN', 'autoip', FORMAT)
if query != '':
retry = 6
else:
retry += 1
xbmc.sleep(10000)
log('geoip download failed')
log('geoip data: %s' % query)
data = parse_data(query)
if data != '' and data.has_key('location'):
location = data['location']['city']
locationid = data['location']['l'][3:]
__addon__.setSetting('Location1', location)
__addon__.setSetting('Location1id', locationid)
log('geoip location: %s' % location)
else:
location = ''
locationid = ''
return location, locationid
def forecast(loc,locid):
try:
lang = LANG[LANGUAGE]
except:
lang = 'EN'
opt = 'lang:' + lang
log('weather location: %s' % locid)
retry = 0
while (retry < 6) and (not xbmc.abortRequested):
query = wundergroundapi(WEATHER_FEATURES, opt, locid, FORMAT)
if query != '':
retry = 6
else:
retry += 1
xbmc.sleep(10000)
log('weather download failed')
log('forecast data: %s' % query)
data = parse_data(query)
if data != '' and data.has_key('response') and not data['response'].has_key('error'):
properties(data,loc,locid)
else:
clear()
def clear():
set_property('Current.Condition' , 'N/A')
set_property('Current.Temperature' , '0')
set_property('Current.Wind' , '0')
set_property('Current.WindDirection' , 'N/A')
set_property('Current.Humidity' , '0')
set_property('Current.FeelsLike' , '0')
set_property('Current.UVIndex' , '0')
set_property('Current.DewPoint' , '0')
set_property('Current.OutlookIcon' , 'na.png')
set_property('Current.FanartCode' , 'na')
for count in range (0, MAXDAYS+1):
set_property('Day%i.Title' % count, 'N/A')
set_property('Day%i.HighTemp' % count, '0')
set_property('Day%i.LowTemp' % count, '0')
set_property('Day%i.Outlook' % count, 'N/A')
set_property('Day%i.OutlookIcon' % count, 'na.png')
set_property('Day%i.FanartCode' % count, 'na')
def parse_data(json):
try:
raw = json.replace('<br>',' ').replace('ä','ä') # wu api bugs
reply = raw.replace('"-999%"','""').replace('"-9999.00"','""').replace('"-9998"','""').replace('"NA"','""') # wu will change these to null responses in the future
data = simplejson.loads(reply)
except:
log('failed to parse weather data')
data = ''
return data
def properties(data,loc,locid):
# standard properties
weathercode = WEATHER_CODES[data['current_observation']['icon_url'][31:-4]]
set_property('Current.Location' , loc)
set_property('Current.Condition' , data['current_observation']['weather'])
set_property('Current.Temperature' , str(data['current_observation']['temp_c']))
set_property('Current.Wind' , str(data['current_observation']['wind_kph']))
set_property('Current.WindDirection' , data['current_observation']['wind_dir'])
set_property('Current.Humidity' , data['current_observation']['relative_humidity'].rstrip('%'))
set_property('Current.FeelsLike' , data['current_observation']['feelslike_c'])
set_property('Current.UVIndex' , data['current_observation']['UV'])
set_property('Current.DewPoint' , str(data['current_observation']['dewpoint_c']))
set_property('Current.OutlookIcon' , '%s.png' % weathercode) # xbmc translates it to Current.ConditionIcon
set_property('Current.FanartCode' , weathercode)
for count, item in enumerate(data['forecast']['simpleforecast']['forecastday']):
weathercode = WEATHER_CODES[item['icon_url'][31:-4]]
set_property('Day%i.Title' % count, item['date']['weekday'])
set_property('Day%i.HighTemp' % count, str(item['high']['celsius']))
set_property('Day%i.LowTemp' % count, str(item['low']['celsius']))
set_property('Day%i.Outlook' % count, item['conditions'])
set_property('Day%i.OutlookIcon' % count, '%s.png' % weathercode)
set_property('Day%i.FanartCode' % count, weathercode)
if count == MAXDAYS:
break
# forecast properties
set_property('Forecast.IsFetched' , 'true')
set_property('Forecast.City' , data['current_observation']['display_location']['city'])
set_property('Forecast.State' , data['current_observation']['display_location']['state_name'])
set_property('Forecast.Country' , data['current_observation']['display_location']['country'])
update = time.localtime(float(data['current_observation']['observation_epoch']))
local = time.localtime(float(data['current_observation']['local_epoch']))
if DATEFORMAT[1] == 'd':
updatedate = WEEKDAY[update[6]] + ' ' + str(update[2]) + ' ' + MONTH[update[1]] + ' ' + str(update[0])
localdate = WEEKDAY[local[6]] + ' ' + str(local[2]) + ' ' + MONTH[local[1]] + ' ' + str(local[0])
elif DATEFORMAT[1] == 'm':
updatedate = WEEKDAY[update[6]] + ' ' + MONTH[update[1]] + ' ' + str(update[2]) + ', ' + str(update[0])
localdate = WEEKDAY[local[6]] + ' ' + str(local[2]) + ' ' + MONTH[local[1]] + ' ' + str(local[0])
else:
updatedate = WEEKDAY[update[6]] + ' ' + str(update[0]) + ' ' + MONTH[update[1]] + ' ' + str(update[2])
localdate = WEEKDAY[local[6]] + ' ' + str(local[0]) + ' ' + MONTH[local[1]] + ' ' + str(local[2])
if TIMEFORMAT != '/':
updatetime = time.strftime('%I:%M%p', update)
localtime = time.strftime('%I:%M%p', local)
else:
updatetime = time.strftime('%H:%M', update)
localtime = time.strftime('%H:%M', local)
set_property('Forecast.Updated' , updatedate + ' - ' + updatetime)
# current properties
set_property('Current.IsFetched' , 'true')
set_property('Current.LocalTime' , localtime)
set_property('Current.LocalDate' , localdate)
set_property('Current.WindDegree' , str(data['current_observation']['wind_degrees']) + u'°')
set_property('Current.SolarRadiation' , str(data['current_observation']['solarradiation']))
if 'F' in TEMPUNIT:
set_property('Current.Pressure' , data['current_observation']['pressure_in'] + ' inHg')
set_property('Current.Precipitation' , data['current_observation']['precip_1hr_in'] + ' in')
set_property('Current.HeatIndex' , str(data['current_observation']['heat_index_f']) + TEMPUNIT)
set_property('Current.WindChill' , str(data['current_observation']['windchill_f']) + TEMPUNIT)
else:
set_property('Current.Pressure' , data['current_observation']['pressure_mb'] + ' mb')
set_property('Current.Precipitation' , data['current_observation']['precip_1hr_metric'] + ' mm')
set_property('Current.HeatIndex' , str(data['current_observation']['heat_index_c']) + TEMPUNIT)
set_property('Current.WindChill' , str(data['current_observation']['windchill_c']) + TEMPUNIT)
if SPEEDUNIT == 'mph':
set_property('Current.Visibility' , data['current_observation']['visibility_mi'] + ' mi')
set_property('Current.WindGust' , str(data['current_observation']['wind_gust_mph']) + ' ' + SPEEDUNIT)
else:
set_property('Current.Visibility' , data['current_observation']['visibility_km'] + ' km')
set_property('Current.WindGust' , str(data['current_observation']['wind_gust_kph']) + ' ' + SPEEDUNIT)
# today properties
set_property('Today.IsFetched' , 'true')
if TIMEFORMAT != '/':
AM = unicode(TIMEFORMAT.split('/')[0],encoding='utf-8')
PM = unicode(TIMEFORMAT.split('/')[1],encoding='utf-8')
hour = int(data['moon_phase']['sunrise']['hour']) % 24
isam = (hour >= 0) and (hour < 12)
if isam:
hour = ('12' if (hour == 0) else '%02d' % (hour))
set_property('Today.Sunrise' , hour.lstrip('0') + ':' + data['moon_phase']['sunrise']['minute'] + ' ' + AM)
else:
hour = ('12' if (hour == 12) else '%02d' % (hour-12))
set_property('Today.Sunrise' , hour.lstrip('0') + ':' + data['moon_phase']['sunrise']['minute'] + ' ' + PM)
hour = int(data['moon_phase']['sunset']['hour']) % 24
isam = (hour >= 0) and (hour < 12)
if isam:
hour = ('12' if (hour == 0) else '%02d' % (hour))
set_property('Today.Sunset' , hour.lstrip('0') + ':' + data['moon_phase']['sunset']['minute'] + ' ' + AM)
else:
hour = ('12' if (hour == 12) else '%02d' % (hour-12))
set_property('Today.Sunset' , hour.lstrip('0') + ':' + data['moon_phase']['sunset']['minute'] + ' ' + PM)
else:
set_property('Today.Sunrise' , data['moon_phase']['sunrise']['hour'] + ':' + data['moon_phase']['sunrise']['minute'])
set_property('Today.Sunset' , data['moon_phase']['sunset']['hour'] + ':' + data['moon_phase']['sunset']['minute'])
set_property('Today.moonphase' , MOONPHASE(int(data['moon_phase']['ageOfMoon']), int(data['moon_phase']['percentIlluminated'])))
if 'F' in TEMPUNIT:
set_property('Today.AvgHighTemperature' , data['almanac']['temp_high']['normal']['F'] + TEMPUNIT)
set_property('Today.AvgLowTemperature' , data['almanac']['temp_low']['normal']['F'] + TEMPUNIT)
try:
set_property('Today.RecordHighTemperature' , data['almanac']['temp_high']['record']['F'] + TEMPUNIT)
set_property('Today.RecordLowTemperature' , data['almanac']['temp_low']['record']['F'] + TEMPUNIT)
except:
set_property('Today.RecordHighTemperature' , '')
set_property('Today.RecordLowTemperature' , '')
else:
set_property('Today.AvgHighTemperature' , data['almanac']['temp_high']['normal']['C'] + TEMPUNIT)
set_property('Today.AvgLowTemperature' , data['almanac']['temp_low']['normal']['C'] + TEMPUNIT)
try:
set_property('Today.RecordHighTemperature' , data['almanac']['temp_high']['record']['C'] + TEMPUNIT)
set_property('Today.RecordLowTemperature' , data['almanac']['temp_low']['record']['C'] + TEMPUNIT)
except:
set_property('Today.RecordHighTemperature' , '')
set_property('Today.RecordLowTemperature' , '')
try:
set_property('Today.RecordHighYear' , data['almanac']['temp_high']['recordyear'])
set_property('Today.RecordLowYear' , data['almanac']['temp_low']['recordyear'])
except:
set_property('Today.RecordHighYear' , '')
set_property('Today.RecordLowYear' , '')
# daily properties
set_property('Daily.IsFetched', 'true')
for count, item in enumerate(data['forecast']['simpleforecast']['forecastday']):
weathercode = WEATHER_CODES[item['icon_url'][31:-4]]
set_property('Daily.%i.LongDay' % (count+1), item['date']['weekday'])
set_property('Daily.%i.ShortDay' % (count+1), item['date']['weekday_short'])
if DATEFORMAT[1] == 'd':
set_property('Daily.%i.LongDate' % (count+1), str(item['date']['day']) + ' ' + item['date']['monthname'])
set_property('Daily.%i.ShortDate' % (count+1), str(item['date']['day']) + ' ' + MONTH[item['date']['month']])
else:
set_property('Daily.%i.LongDate' % (count+1), item['date']['monthname'] + ' ' + str(item['date']['day']))
set_property('Daily.%i.ShortDate' % (count+1), MONTH[item['date']['month']] + ' ' + str(item['date']['day']))
set_property('Daily.%i.Outlook' % (count+1), item['conditions'])
set_property('Daily.%i.OutlookIcon' % (count+1), WEATHER_ICON % weathercode)
set_property('Daily.%i.FanartCode' % (count+1), weathercode)
if SPEEDUNIT == 'mph':
set_property('Daily.%i.WindSpeed' % (count+1), str(item['avewind']['mph']) + ' ' + SPEEDUNIT)
set_property('Daily.%i.MaxWind' % (count+1), str(item['maxwind']['mph']) + ' ' + SPEEDUNIT)
elif SPEEDUNIT == 'Beaufort':
set_property('Daily.%i.WindSpeed' % (count+1), KPHTOBFT(item['avewind']['kph']))
set_property('Daily.%i.MaxWind' % (count+1), KPHTOBFT(item['maxwind']['kph']))
else:
set_property('Daily.%i.WindSpeed' % (count+1), str(item['avewind']['kph']) + ' ' + SPEEDUNIT)
set_property('Daily.%i.MaxWind' % (count+1), str(item['maxwind']['kph']) + ' ' + SPEEDUNIT)
set_property('Daily.%i.WindDirection' % (count+1), item['avewind']['dir'])
set_property('Daily.%i.ShortWindDirection' % (count+1), item['avewind']['dir'])
set_property('Daily.%i.WindDegree' % (count+1), str(item['avewind']['degrees']) + u'°')
set_property('Daily.%i.Humidity' % (count+1), str(item['avehumidity']) + '%')
set_property('Daily.%i.MinHumidity' % (count+1), str(item['minhumidity']) + '%')
set_property('Daily.%i.MaxHumidity' % (count+1), str(item['maxhumidity']) + '%')
if 'F' in TEMPUNIT:
set_property('Daily.%i.HighTemperature' % (count+1), str(item['high']['fahrenheit']) + TEMPUNIT)
set_property('Daily.%i.LowTemperature' % (count+1), str(item['low']['fahrenheit']) + TEMPUNIT)
set_property('Daily.%i.LongOutlookDay' % (count+1), data['forecast']['txt_forecast']['forecastday'][2*count]['fcttext'])
set_property('Daily.%i.LongOutlookNight' % (count+1), data['forecast']['txt_forecast']['forecastday'][2*count+1]['fcttext'])
set_property('Daily.%i.Precipitation' % (count+1), str(item['qpf_day']['in']) + ' in')
set_property('Daily.%i.Snow' % (count+1), str(item['snow_day']['in']) + ' in')
else:
set_property('Daily.%i.HighTemperature' % (count+1), str(item['high']['celsius']) + TEMPUNIT)
set_property('Daily.%i.LowTemperature' % (count+1), str(item['low']['celsius']) + TEMPUNIT)
set_property('Daily.%i.LongOutlookDay' % (count+1), data['forecast']['txt_forecast']['forecastday'][2*count]['fcttext_metric'])
set_property('Daily.%i.LongOutlookNight' % (count+1), data['forecast']['txt_forecast']['forecastday'][2*count+1]['fcttext_metric'])
set_property('Daily.%i.Precipitation' % (count+1), str(item['qpf_day']['mm']) + ' mm')
set_property('Daily.%i.Snow' % (count+1), str(item['snow_day']['cm']) + ' mm')
set_property('Daily.%i.ChancePrecipitation' % (count+1), data['forecast']['txt_forecast']['forecastday'][2*count]['pop'] + '%')
# weekend properties
set_property('Weekend.IsFetched', 'true')
if __addon__.getSetting('Weekend') == '2':
weekend = [4,5]
elif __addon__.getSetting('Weekend') == '1':
weekend = [5,6]
else:
weekend = [6,7]
count = 0
for item in data['forecast']['simpleforecast']['forecastday']:
if date(item['date']['year'], item['date']['month'], item['date']['day']).isoweekday() in weekend:
weathercode = WEATHER_CODES[item['icon_url'][31:-4]]
set_property('Weekend.%i.LongDay' % (count+1), item['date']['weekday'])
set_property('Weekend.%i.ShortDay' % (count+1), item['date']['weekday_short'])
if DATEFORMAT[1] == 'd':
set_property('Weekend.%i.LongDate' % (count+1), str(item['date']['day']) + ' ' + item['date']['monthname'])
set_property('Weekend.%i.ShortDate' % (count+1), str(item['date']['day']) + ' ' + MONTH[item['date']['month']])
else:
set_property('Weekend.%i.LongDate' % (count+1), item['date']['monthname'] + ' ' + str(item['date']['day']))
set_property('Weekend.%i.ShortDate' % (count+1), MONTH[item['date']['month']] + ' ' + str(item['date']['day']))
set_property('Weekend.%i.Outlook' % (count+1), item['conditions'])
set_property('Weekend.%i.OutlookIcon' % (count+1), WEATHER_ICON % weathercode)
set_property('Weekend.%i.FanartCode' % (count+1), weathercode)
if SPEEDUNIT == 'mph':
set_property('Weekend.%i.WindSpeed' % (count+1), str(item['avewind']['mph']) + ' ' + SPEEDUNIT)
set_property('Weekend.%i.MaxWind' % (count+1), str(item['maxwind']['mph']) + ' ' + SPEEDUNIT)
elif SPEEDUNIT == 'Beaufort':
set_property('Weekend.%i.WindSpeed' % (count+1), KPHTOBFT(item['avewind']['kph']))
set_property('Weekend.%i.MaxWind' % (count+1), KPHTOBFT(item['maxwind']['kph']))
else:
set_property('Weekend.%i.WindSpeed' % (count+1), str(item['avewind']['kph']) + ' ' + SPEEDUNIT)
set_property('Weekend.%i.MaxWind' % (count+1), str(item['maxwind']['kph']) + ' ' + SPEEDUNIT)
set_property('Weekend.%i.WindDirection' % (count+1), item['avewind']['dir'])
set_property('Weekend.%i.ShortWindDirection' % (count+1), item['avewind']['dir'])
set_property('Weekend.%i.WindDegree' % (count+1), str(item['avewind']['degrees']) + u'°')
set_property('Weekend.%i.Humidity' % (count+1), str(item['avehumidity']) + '%')
set_property('Weekend.%i.MinHumidity' % (count+1), str(item['minhumidity']) + '%')
set_property('Weekend.%i.MaxHumidity' % (count+1), str(item['maxhumidity']) + '%')
set_property('Weekend.%i.ChancePrecipitation' % (count+1), data['forecast']['txt_forecast']['forecastday'][2*count]['pop'] + '%')
if 'F' in TEMPUNIT:
set_property('Weekend.%i.HighTemperature' % (count+1), str(item['high']['fahrenheit']) + TEMPUNIT)
set_property('Weekend.%i.LowTemperature' % (count+1), str(item['low']['fahrenheit']) + TEMPUNIT)
set_property('Weekend.%i.Precipitation' % (count+1), str(item['qpf_day']['in']) + ' in')
set_property('Weekend.%i.Snow' % (count+1), str(item['snow_day']['in']) + ' in')
set_property('Weekend.%i.LongOutlookDay' % (count+1), data['forecast']['txt_forecast']['forecastday'][2*count]['fcttext'])
set_property('Weekend.%i.LongOutlookNight' % (count+1), data['forecast']['txt_forecast']['forecastday'][2*count+1]['fcttext'])
else:
set_property('Weekend.%i.HighTemperature' % (count+1), str(item['high']['celsius']) + TEMPUNIT)
set_property('Weekend.%i.LowTemperature' % (count+1), str(item['low']['celsius']) + TEMPUNIT)
set_property('Weekend.%i.Precipitation' % (count+1), str(item['qpf_day']['mm']) + ' mm')
set_property('Weekend.%i.Snow' % (count+1), str(item['snow_day']['cm']) + ' mm')
if data['current_observation']['display_location']['country'] == 'UK': # for the brits
dfcast_e = data['forecast']['txt_forecast']['forecastday'][2*count]['fcttext'].split('.')
dfcast_m = data['forecast']['txt_forecast']['forecastday'][2*count]['fcttext_metric'].split('.')
nfcast_e = data['forecast']['txt_forecast']['forecastday'][2*count+1]['fcttext'].split('.')
nfcast_m = data['forecast']['txt_forecast']['forecastday'][2*count+1]['fcttext_metric'].split('.')
for field in dfcast_e:
if field.endswith('mph'): # find windspeed in mph
wind = field
break
for field in dfcast_m:
if field.endswith('km/h'): # find windspeed in km/h
dfcast_m[dfcast_m.index(field)] = wind # replace windspeed in km/h with windspeed in mph
break
for field in nfcast_e:
if field.endswith('mph'): # find windspeed in mph
wind = field
break
for field in nfcast_m:
if field.endswith('km/h'): # find windspeed in km/h
nfcast_m[nfcast_m.index(field)] = wind # replace windspeed in km/h with windspeed in mph
break
set_property('Weekend.%i.LongOutlookDay' % (count+1), '. '.join(dfcast_m))
set_property('Weekend.%i.LongOutlookNight' % (count+1), '. '.join(nfcast_m))
else:
set_property('Weekend.%i.LongOutlookDay' % (count+1), data['forecast']['txt_forecast']['forecastday'][2*count]['fcttext_metric'])
set_property('Weekend.%i.LongOutlookNight' % (count+1), data['forecast']['txt_forecast']['forecastday'][2*count+1]['fcttext_metric'])
count += 1
if count == 2:
break
# 36 hour properties
set_property('36Hour.IsFetched', 'true')
for count, item in enumerate(data['forecast']['txt_forecast']['forecastday']):
weathercode = WEATHER_CODES[item['icon_url'][31:-4]]
if 'F' in TEMPUNIT:
try:
fcast = item['fcttext'].split('.')
for line in fcast:
if line.endswith('F'):
set_property('36Hour.%i.TemperatureHeading' % (count+1), line.rsplit(' ',1)[0])
set_property('36Hour.%i.Temperature' % (count+1), line.rsplit(' ',1)[1].rstrip('F').strip() + TEMPUNIT)
break
except:
set_property('36Hour.%i.TemperatureHeading' % (count+1), '')
set_property('36Hour.%i.Temperature' % (count+1), '')
set_property('36Hour.%i.Forecast' % (count+1), item['fcttext'])
else:
try:
fcast = item['fcttext_metric'].split('.')
for line in fcast:
if line.endswith('C'):
set_property('36Hour.%i.TemperatureHeading' % (count+1), line.rsplit(' ',1)[0])
set_property('36Hour.%i.Temperature' % (count+1), line.rsplit(' ',1)[1].rstrip('C').strip() + TEMPUNIT)
break
except:
set_property('36Hour.%i.TemperatureHeading' % (count+1), '')
set_property('36Hour.%i.Temperature' % (count+1), '')
if data['current_observation']['display_location']['country'] == 'UK': # for the brits
fcast_e = item['fcttext'].split('.')
for field in fcast_e:
if field.endswith('mph'): # find windspeed in mph
wind = field
break
for field in fcast:
if field.endswith('km/h'): # find windspeed in km/h
fcast[fcast.index(field)] = wind # replace windspeed in km/h with windspeed in mph
break
set_property('36Hour.%i.Forecast' % (count+1), '. '.join(fcast))
else:
set_property('36Hour.%i.Forecast' % (count+1), item['fcttext_metric'])
set_property('36Hour.%i.Heading' % (count+1), item['title'])
set_property('36Hour.%i.ChancePrecipitation' % (count+1), item['pop'] + '%')
set_property('36Hour.%i.OutlookIcon' % (count+1), WEATHER_ICON % weathercode)
set_property('36Hour.%i.FanartCode' % (count+1), weathercode)
if count == 2:
break
# hourly properties
set_property('Hourly.IsFetched', 'true')
for count, item in enumerate(data['hourly_forecast']):
weathercode = WEATHER_CODES[item['icon_url'][31:-4]]
if TIMEFORMAT != '/':
set_property('Hourly.%i.Time' % (count+1), item['FCTTIME']['civil'])
else:
set_property('Hourly.%i.Time' % (count+1), item['FCTTIME']['hour_padded'] + ':' + item['FCTTIME']['min'])
if DATEFORMAT[1] == 'd':
set_property('Hourly.%i.ShortDate' % (count+1), item['FCTTIME']['mday_padded'] + ' ' + item['FCTTIME']['month_name_abbrev'])
set_property('Hourly.%i.LongDate' % (count+1), item['FCTTIME']['mday_padded'] + ' ' + item['FCTTIME']['month_name'])
else:
set_property('Hourly.%i.ShortDate' % (count+1), item['FCTTIME']['month_name_abbrev'] + ' ' + item['FCTTIME']['mday_padded'])
set_property('Hourly.%i.LongDate' % (count+1), item['FCTTIME']['month_name'] + ' ' + item['FCTTIME']['mday_padded'])
if 'F' in TEMPUNIT:
set_property('Hourly.%i.Temperature' % (count+1), item['temp']['english'] + TEMPUNIT)
set_property('Hourly.%i.DewPoint' % (count+1), item['dewpoint']['english'] + TEMPUNIT)
set_property('Hourly.%i.FeelsLike' % (count+1), item['feelslike']['english'] + TEMPUNIT)
set_property('Hourly.%i.Precipitation' % (count+1), item['qpf']['english'] + ' in')
set_property('Hourly.%i.Snow' % (count+1), item['snow']['english'] + ' in')
set_property('Hourly.%i.HeatIndex' % (count+1), item['heatindex']['english'] + TEMPUNIT)
set_property('Hourly.%i.WindChill' % (count+1), item['windchill']['english'] + TEMPUNIT)
set_property('Hourly.%i.Mslp' % (count+1), item['mslp']['english'] + ' inHg')
else:
set_property('Hourly.%i.Temperature' % (count+1), item['temp']['metric'] + TEMPUNIT)
set_property('Hourly.%i.DewPoint' % (count+1), item['dewpoint']['metric'] + TEMPUNIT)
set_property('Hourly.%i.FeelsLike' % (count+1), item['feelslike']['metric'] + TEMPUNIT)
set_property('Hourly.%i.Precipitation' % (count+1), item['qpf']['metric'] + ' mm')
set_property('Hourly.%i.Snow' % (count+1), item['snow']['metric'] + ' mm')
set_property('Hourly.%i.HeatIndex' % (count+1), item['heatindex']['metric'] + TEMPUNIT)
set_property('Hourly.%i.WindChill' % (count+1), item['windchill']['metric'] + TEMPUNIT)
set_property('Hourly.%i.Mslp' % (count+1), item['mslp']['metric'] + ' inHg')
if SPEEDUNIT == 'mph':
set_property('Hourly.%i.WindSpeed' % (count+1), item['wspd']['english'] + ' ' + SPEEDUNIT)
elif SPEEDUNIT == 'Beaufort':
set_property('Hourly.%i.WindSpeed' % (count+1), KPHTOBFT(int(item['wspd']['metric'])))
else:
set_property('Hourly.%i.WindSpeed' % (count+1), item['wspd']['metric'] + ' ' + SPEEDUNIT)
set_property('Hourly.%i.WindDirection' % (count+1), item['wdir']['dir'])
set_property('Hourly.%i.ShortWindDirection' % (count+1), item['wdir']['dir'])
set_property('Hourly.%i.WindDegree' % (count+1), item['wdir']['degrees'] + u'°')
set_property('Hourly.%i.Humidity' % (count+1), item['humidity'] + '%')
set_property('Hourly.%i.UVIndex' % (count+1), item['uvi'])
set_property('Hourly.%i.ChancePrecipitation' % (count+1), item['pop'] + '%')
set_property('Hourly.%i.Outlook' % (count+1), item['condition'])
set_property('Hourly.%i.OutlookIcon' % (count+1), WEATHER_ICON % weathercode)
set_property('Hourly.%i.FanartCode' % (count+1), weathercode)
# alert properties
set_property('Alerts.IsFetched', 'true')
if str(data['alerts']) != '[]':
rss = ''
alerts = ''
for count, item in enumerate(data['alerts']):
description = recode(item['description']) # workaround: wunderground provides a corrupt alerts message
message = recode(item['message']) # workaround: wunderground provides a corrupt alerts message
set_property('Alerts.%i.Description' % (count+1), description)
set_property('Alerts.%i.Message' % (count+1), message)
set_property('Alerts.%i.StartDate' % (count+1), item['date'])
set_property('Alerts.%i.EndDate' % (count+1), item['expires'])
set_property('Alerts.%i.Significance' % (count+1), SEVERITY[item['significance']])
rss = rss + description.replace('\n','') + ' - '
alerts = alerts + message + '[CR][CR]'
set_property('Alerts.RSS' , rss.rstrip(' - '))
set_property('Alerts' , alerts.rstrip('[CR][CR]'))
set_property('Alerts.Count' , str(count+1))
else:
set_property('Alerts.RSS' , '')
set_property('Alerts' , '')
set_property('Alerts.Count' , '0')
# map properties
set_property('Map.IsFetched', 'true')
filelist = []
locid = base64.b16encode(locid)
addondir = os.path.join(__cwd__, 'resources', 'logo')
mapdir = xbmc.translatePath('special://profile/addon_data/%s/map' % __addonid__)
set_property('MapPath', addondir)
if not xbmcvfs.exists(mapdir):
xbmcvfs.mkdir(mapdir)
dirs, filelist = xbmcvfs.listdir(mapdir)
animate = __addon__.getSetting('Animate')
for img in filelist:
item = xbmc.translatePath('special://profile/addon_data/%s/map/%s' % (__addonid__,img)).decode("utf-8")
if animate == 'true':
if (time.time() - os.path.getmtime(item) > 14400) or (not locid in item):
xbmcvfs.delete(item)
else:
xbmcvfs.delete(item)
zoom = __addon__.getSetting('Zoom')
if zoom == '10': # default setting does not return decimals, changed setting will
zoom = '10.0'
url = data['satellite']['image_url_ir4'].replace('width=300&height=300','width=640&height=360').replace('radius=75','radius=%i' % int(1000/int(zoom.rstrip('0').rstrip('.,'))))
log('map url: %s' % url)
try:
req = urllib2.Request(url)
req.add_header('Accept-encoding', 'gzip')
response = urllib2.urlopen(req)
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO(response.read())
compr = gzip.GzipFile(fileobj=buf)
data = compr.read()
else:
data = response.read()
response.close()
log('satellite image downloaded')
except:
data = ''
log('satellite image downloaded failed')
if data != '':
timestamp = time.strftime('%Y%m%d%H%M%S')
mapfile = xbmc.translatePath('special://profile/addon_data/%s/map/%s-%s.png' % (__addonid__,locid,timestamp)).decode("utf-8")
try:
tmpmap = open(mapfile, 'wb')
tmpmap.write(data)
tmpmap.close()
set_property('MapPath', mapdir)
except:
log('failed to save satellite image')
log('version %s started: %s' % (__version__, sys.argv))
log('lang: %s' % LANGUAGE)
log('speed: %s' % SPEEDUNIT)
log('temp: %s' % TEMPUNIT[1])
log('time: %s' % TIMEFORMAT)
log('date: %s' % DATEFORMAT)
set_property('WeatherProvider', __addonname__)
set_property('WeatherProviderLogo', xbmc.translatePath(os.path.join(__cwd__, 'resources', 'banner.png')))
if sys.argv[1].startswith('Location'):
keyboard = xbmc.Keyboard('', xbmc.getLocalizedString(14024), False)
keyboard.doModal()
if (keyboard.isConfirmed() and keyboard.getText() != ''):
text = keyboard.getText()
locations, locationids = location(text)
dialog = xbmcgui.Dialog()
if locations != []:
selected = dialog.select(xbmc.getLocalizedString(396), locations)
if selected != -1:
__addon__.setSetting(sys.argv[1], locations[selected])
__addon__.setSetting(sys.argv[1] + 'id', locationids[selected])
log('selected location: %s' % locations[selected])
log('selected location id: %s' % locationids[selected])
else:
dialog.ok(__addonname__, xbmc.getLocalizedString(284))
elif ENABLED == 'false':
clear()
log('you need to enable weather retrieval in the weather underground addon settings')
elif XBMC_PYTHON == '1.0' or XBMC_PYTHON == '2.0' or XBMC_PYTHON == '2.0.0':
clear()
log('older versions of XBMC are not supported by the weather underground addon')
else:
location = __addon__.getSetting('Location%s' % sys.argv[1])
locationid = __addon__.getSetting('Location%sid' % sys.argv[1])
if (locationid == '') and (sys.argv[1] != '1'):
location = __addon__.getSetting('Location1')
locationid = __addon__.getSetting('Location1id')
log('trying location 1 instead')
if locationid == '':
log('fallback to geoip')
location, locationid = geoip()
if not locationid == '':
forecast(location, locationid)
else:
log('no location found')
clear()
refresh_locations()
log('finished')
```
#### File: resources/lib/wunderground.py
```python
import urllib2, gzip, base64
from StringIO import StringIO
WAIK = 'NDEzNjBkMjFkZjFhMzczNg=='
WUNDERGROUND_URL = 'http://api.wunderground.com/api/%s/%s/%s/q/%s.%s'
def wundergroundapi(features, settings, query, fmt):
url = WUNDERGROUND_URL % (base64.b64decode(WAIK)[::-1], features, settings, query, fmt)
try:
req = urllib2.Request(url)
req.add_header('Accept-encoding', 'gzip')
response = urllib2.urlopen(req)
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO(response.read())
compr = gzip.GzipFile(fileobj=buf)
data = compr.read()
else:
data = response.read()
response.close()
except:
data = ''
return data
``` |
{
"source": "jpsxlr8/Self-Driving-Car",
"score": 3
} |
#### File: jpsxlr8/Self-Driving-Car/self_driving_car.py
```python
import csv
import numpy as np
from google.colab import files
import os
import zipfile
import random
import tensorflow as tf
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from shutil import copyfile
import matplotlib.pyplot as plt
import cv2
from tqdm import tqdm
from keras import optimizers
from keras.utils import to_categorical
from google.colab import drive
drive.mount('/content/drive')
#for second model
local_zip = '/content/drive/My Drive/handwrittenmathsymbols.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/content')
zip_ref.close()
DATADIR = "/content/drive/My Drive/data/photo"
CATEGORIES = ["1","2","3","new"]
for category in CATEGORIES: # do dogs and cats
path = os.path.join(DATADIR,category) # create path to dogs and cats
for img in os.listdir(path): # iterate over each image per dogs and cats
img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE) # convert to array
plt.imshow(img_array, cmap='gray') # graph it
plt.show() # display!
break # we just want one for now so break
break
print(img_array)
print(img_array.shape)
#TO CHANGE THE SIZE OF IMAGE
IMG_SIZE = 250
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(new_array, cmap='gray')
plt.show()
source_images_m1 = []
source_path_m1 = []
DATADIR = "/content/drive/My Drive/data/photo"
def create_source_data():
for category in CATEGORIES: # do dogs and cats
path = os.path.join(DATADIR,category) # create path to dogs and cats
class_num = CATEGORIES.index(category) # get the classification (0 or a 1). 0=dog 1=cat
for img in tqdm(os.listdir(path)):
img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
new_array = np.array(new_array).astype('float32') / 255.
source_images_m1.append(new_array)
source_path_m1.append(class_num)
create_source_data()
print(len(source_images_m1))
print(len(source_path_m1))
source_path_m1 = np.array(source_path_m1)
i = 0
while(i<6074):
if source_path_m1[i] == 2 && source_path_m1[i] == 3:
source_path_m1[i] = 1
if source_path_m1[i] == 5 && source_path_m1[i] == 6 :
source_path_m1[i] = 2
if source_path_m1[i] == 6 && :
source_path_m1[i] = 3
if source_path_m1[i] == 8 :
source_path_m1[i] = 4
i = i+1
print(len(source_path_m1))
#for second model
source_images_m2 = []
source_path_m2 = []
def create_source_data():
for category in CATEGORIES(2): # do dogs and cats
path = os.path.join(DATADIR(2),category) # create path to dogs and cats
class_num = CATEGORIES(2).index(category) # get the classification (0 or a 1). 0=dog 1=cat
for img in tqdm(os.listdir(path)): # iterate over each image per dogs and cats
try:
img_array = cv2.imread(os.path.join(path,img) ,cv2.IMREAD_GRAYSCALE) # convert to array
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize to normalize data size
source_images_m2.append(new_array)
source_path_m2.append(class_num)# add this to our training_data
except Exception as e: # in the interest in keeping the output clean...
pass
#except OSError as e:
# print("OSErrroBad img most likely", e, os.path.join(path,img))
#except Exception as e:
# print("general exception", e, os.path.join(path,img))
create_source_data()
print(len(source_images_m2))
print(len(source_path_m2))
X = np.array(source_images_m1)
Y = np.array(source_path_m1)
# print(source_images_m1[1])
# print(source_path_m1)
from keras.utils import to_categorical
Y = to_categorical(Y , num_classes = 4)
print(X.shape)
print(Y.shape)
#source_images_m2 = np.array(source_images_m2)
#source_path_m2 = np.array(source_path_m2)
#print(source_images_m2.shape)
#print(source_path_m2.shape)
#print(source_images_m2[1])
#print(source_path_m2)
#print(b[1,0])
#training_images = np.array(b[:,0])
import sklearn.model_selection as model_selection
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, Y, train_size=0.9,test_size=0.1)
# print ("X_train_m1: ", X_train_m1)
# print ("y_train_m1: ", y_train_m1)
# print ("X_test_m1: ", X_test_m1)
# print ("y_test_m1: ", y_test_m1)
# print (X_train_m1.shape)
#X_train: [4, 9, 3, 5, 7, 6, 1]
#y_train: [16, 81, 9, 25, 49, 36, 1]
#X_test: [8, 2, 0]
#y_test: [64, 4, 0]
#X_train_m2, X_test_m2, y_train_m2, y_test_m2 = model_selection.train_test_split(source_images_m2, source_path_m2, train_size=0.9,test_size=0.1, random_state=101)
#print ("X_train_m2: ", X_train_m2)
#print ("y_train_m2: ", y_train_m2)
#print ("X_test_m2: ", X_test_m2)
#print ("y_test_m2: ", y_test_m2)
#print (X_train_m2.shape)
X_train = np.expand_dims(X_train, axis=3)
X_test = np.expand_dims(X_test, axis=3)
# print(X_train_m1_images.shape)
# print(X_test_m1_images.shape)
#print(X_train_m2_images.shape)
#print(X_test_m2_images.shape)
print(y_test.shape)
print(X_test.shape)
from keras import optimizers
from keras import regularizers
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Flatten, BatchNormalization
from keras.models import Sequential
model = Sequential()
model.add(Conv2D(16 , kernel_size = (3,3) , strides = (1,1) , activation = 'relu' , padding = 'same' , input_shape = (250,250,1)))
model.add(MaxPooling2D(pool_size = (2,2)))
model.add(BatchNormalization())
# model.add(Conv2D(32 , kernel_size = (3,3) , strides = (1,1) , activation = 'relu' , padding = 'same'))
# model.add(MaxPooling2D(pool_size = (2,2)))
# model.add(BatchNormalization())
# model.add(Conv2D(64 , kernel_size = (3,3) , strides = (1,1) , activation = 'relu' , padding = 'same' ))
# model.add(MaxPooling2D(pool_size = (2,2)))
# model.add(BatchNormalization())
model.add(Flatten())
# model.add(Dense(1024 , activation = 'relu'))
# model.add(BatchNormalization())
# model.add(Dropout(0.25))
model.add(Dense(128 , activation = 'relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(4 , activation = 'softmax'))
sgd = optimizers.SGD(lr=0.0001, decay=1e-4, momentum=0.9, nesterov=True)
model.compile(loss= 'categorical_crossentropy' ,
optimizer="sgd",
metrics=["accuracy"])
# history = model.fit_generator(train_datagen.flow(X_train_m1_images, y_train_m1, batch_size=32),
# steps_per_epoch=len(X_train_m1_images) / 32,
# epochs=15,
# validation_data=validation_datagen.flow(X_test_m1_images, y_test_m1, batch_size=32),
# validation_steps=len(X_test_m1_images) / 32)
history = model.fit(X_train , y_train , epochs = 10 , batch_size = 16 , validation_split = 0.2)
acc = model.evaluate(X_test , y_test)
acc
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'r', label='Training Loss')
plt.plot(epochs, val_loss, 'b', label='Validation Loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
rounded_prediction = model.predict_classes(X_test)
print(rounded_prediction)
model.save('/content/drive/My Drive/signimage5.h5')
from keras.models import load_model
m = load_model('/content/drive/My Drive/signimage.h5')
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import SGD
model = Sequential()
# input: 100x100 images with 3 channels -> (100, 100, 3) tensors.
# this applies 32 convolution filters of size 3x3 each.
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(100, 100, 3)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
sgd = optimizers.SGD(lr=0.1, decay=1e-4, momentum=0.9, nesterov=True)
model.compile(loss= 'sparse_categorical_crossentropy' ,
optimizer="sgd",
metrics=["accuracy"])
history = model.fit_generator(train_datagen.flow(X_train_m2_images, y_train_m2, batch_size=32),
steps_per_epoch=len(X_train_m2_images) / 32,
epochs=15,
validation_data=validation_datagen.flow(X_test_m2_images, y_test_m2, batch_size=32),
validation_steps=len(X_test_m2_images) / 32)
model.evaluate(X_test_m2_images, y_test_m2)
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'r', label='Training Loss')
plt.plot(epochs, val_loss, 'b', label='Validation Loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
from IPython.display import display, Javascript
from google.colab.output import eval_js
from base64 import b64decode
def take_photo(filename='photo.jpg', quality=0.8):
js = Javascript('''
async function takePhoto(quality) {
const div = document.createElement('div');
const capture = document.createElement('button');
capture.textContent = 'Capture';
div.appendChild(capture);
const video = document.createElement('video');
video.style.display = 'block';
const stream = await navigator.mediaDevices.getUserMedia({video: true});
document.body.appendChild(div);
div.appendChild(video);
video.srcObject = stream;
await video.play();
// Resize the output to fit the video element.
google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
// Wait for Capture to be clicked.
await new Promise((resolve) => capture.onclick = resolve);
const canvas = document.createElement('canvas');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0);
stream.getVideoTracks()[0].stop();
div.remove();
return canvas.toDataURL('image/jpeg', quality);
}
''')
display(js)
data = eval_js('takePhoto({})'.format(quality))
binary = b64decode(data.split(',')[1])
with open(filename, 'wb') as f:
f.write(binary)
return filename
from IPython.display import Image
try:
filename = take_photo()
print('Saved to {}'.format(filename))
# Show the image which was just taken.
display(Image(filename))
except Exception as err:
# Errors will be thrown if the user does not have a webcam or if they do not
# grant the page permission to access it.
print(str(err))
``` |
{
"source": "JPTa/g81-to-heatmap-relative",
"score": 2
} |
#### File: JPTa/g81-to-heatmap-relative/g81-to-heatmap.py
```python
g81_output_raw = """
0.33000 0.29435 0.25157 0.20167 0.14463 0.08046 0.00917
0.30509 0.28572 0.25841 0.22315 0.17995 0.12881 0.06972
0.27731 0.27026 0.25516 0.23204 0.20087 0.16168 0.11444
0.24667 0.24796 0.24185 0.22833 0.20741 0.17907 0.14333
0.21315 0.21884 0.21847 0.21204 0.19955 0.18100 0.15639
0.17676 0.18288 0.18501 0.18315 0.17729 0.16745 0.15361
0.13750 0.14009 0.14148 0.14167 0.14065 0.13843 0.13500
"""
import re
# Define your regex rules here depending on how
# your raw output looks. Ultimately, you want to
# arrive at several lines of comma separated
# float values, so split() works well later.
g81_output_parsed = re.sub(r"\n[ ]+", "\n", g81_output_raw.strip())
# No need to edit anything below this :)
#
import datetime
import numpy as np
from numpy import radians as rad
import matplotlib
from matplotlib.patches import Arc, RegularPolygon
import matplotlib.image as mpimg
# Tell matplotlib to not worry about DISPLAY
matplotlib.use('Agg')
# Import pyplot as plt for ease of use
import matplotlib.pyplot as plt
# Draw arc arrow
def arcArrow(ax,radius,centX,centY,direction='ccw',color_='black'):
angle_ = 165
# Line
arc = Arc([centX,centY],radius,radius,angle=angle_,
theta1=205,capstyle='round',linestyle='-',lw=3,color=color_)
ax.add_patch(arc)
dir = 1
if direction == 'cw':
dir = -1
# Create the arrow head
endX=centX+(radius/2)*dir*np.cos(rad(angle_)) #Do trig to determine end position
endY=centY+(radius/2)*np.sin(rad(angle_))
ax.add_patch(
RegularPolygon(
(endX, endY),
3, # triangle
radius/5, # radius
dir*rad(360+angle_), # orientation
color=color_
)
)
ax.set_xlim([centX-radius,centY+radius]) and ax.set_ylim([centY-radius,centY+radius])
# Calculate how many degrees to turn per distance
def dist2deg(distance):
screw_pitch = 0.5
return str(int(round(distance / screw_pitch * 360))) + "°"
# Add adjustment points
def addAdjuster(ax,x,y,z):
if z < 0:
z_marker = '_'
z_mcolor = 'r'
dir = 'ccw'
elif z > 0:
z_marker = '+'
z_mcolor = 'g'
dir = 'cw'
plt.plot(x, y, z_marker, color=z_mcolor)
plt.text(x, y-9, dist2deg(z), ha="center", va="center",
bbox=dict(boxstyle="round", facecolor="white", lw=.75, alpha=.65)
)
arcArrow(ax,15,x,y,dir,z_mcolor)
# We're about to convert these strings into floats,
# this list will hold onto those.
g81_list_of_lists = []
# Split our regex corrected output by line, then
# split each line by its commas and convert the
# string values to floats.
for line in g81_output_parsed.splitlines():
g81_list_of_lists.append([float(i) for i in re.split(r"[ ]+", line)])
g81_xyz_list_of_lists = []
row_count = 0
col_count = 0
x_size = 250
y_size = 210
# These values come from mesh_bed_calibration.cpp
ZERO_REF_X = 2
ZERO_REF_Y = 9.4
sheet_margin_front = 24.5
sheet_margin_back = 16.5
sheet_left_x = 0
sheet_right_x = sheet_left_x + x_size
sheet_front_y = -(sheet_margin_front)
sheet_back_y = sheet_front_y + y_size + sheet_margin_front + sheet_margin_back
left_probe_bed_position = 38.5 - ZERO_REF_X
front_probe_bed_position = 18.4 - ZERO_REF_Y
right_probe_bed_position = 243.5 - ZERO_REF_X
back_probe_bed_position = 210.4 - ZERO_REF_Y
x_inc = (right_probe_bed_position - left_probe_bed_position) / 6
y_inc = (back_probe_bed_position - front_probe_bed_position) / 6
x_vals = np.zeros(7)
y_vals = np.zeros(7)
z_vals = np.zeros(shape=(7,7))
center_z = g81_list_of_lists[3][3];
for col in g81_list_of_lists:
for val in col:
x_vals[col_count] = col_count*x_inc + left_probe_bed_position
y_vals[row_count] = row_count*y_inc + front_probe_bed_position
z_vals[col_count][row_count] = val - center_z
row_count = row_count + 1
col_count = col_count + 1
row_count = 0
# Set figure and gca objects, this will let us
# adjust things about our heatmap image as well
# as adjust axes label locations.
fig = plt.figure(dpi=96, figsize=(10, 8))
ax = plt.gca()
for x in x_vals:
for y in y_vals:
plt.plot(x, y, '.', color='k')
# Show bolt adjustment values
# Bolt location Y values inverted
x_points = [16.7, 0, 0, 125.4, 0, 0, 228.8]
y_points = [210.4, 0, 0, 105.6, 0, 0, 0.8]
y_vals_r = list(reversed(y_vals))
output_mm_txt = "\nMeasured distances (in mm):"
output_deg_txt = "\n\nBolt adjustments (in degrees):"
for y in [0, 3, 6]:
output_mm_txt = output_mm_txt + "\n"
output_deg_txt = output_deg_txt + "\n"
for x in [0, 3, 6]:
z_val = round(z_vals[y][x], 3)
output_mm_txt = output_mm_txt + "\t" + str(z_val)
output_deg_txt = output_deg_txt + "\t" + dist2deg(z_val)
if x == 3 and y == 3:
marker = '*'
mcolor = 'g'
msize = 15
else:
marker = 'o'
mcolor = 'b'
msize = 10
# Draw marker
plt.plot(x_vals[x], y_vals[y], marker, color=mcolor, linewidth=1, markersize=msize)
# Add label for markers
if z_val:
ecolor = "red"
box = "larrow"
y_off = x_off = 25
rot = 45
if y == 0:
rot = -45
y_off = -(y_off)
if x == 3:
rot = 90
x_off = 0
y_off = 25
if y == 0:
y_off = -(y_off)
box = "rarrow"
if x == 6:
box = "rarrow"
rot = -(rot)
x_off = -(x_off)
if z_val > 0:
ecolor = "blue"
plt.text(x_vals[x] + x_off, y_vals_r[y] + y_off, str(round(z_val,3)), ha="center", va="center", rotation=rot,
bbox=dict(boxstyle=box + ",pad=.4", facecolor="white", lw=2, edgecolor=ecolor, alpha=.60)
)
addAdjuster(ax, x_points[x], y_points[y], z_val)
# Print results as text to stdout
print(output_mm_txt + output_deg_txt)
# Select color theme
cmap_theme = plt.cm.get_cmap("RdBu")
contour = plt.contourf(x_vals, y_vals[::-1], z_vals, alpha=.90, antialiased=True, cmap=cmap_theme)
img = mpimg.imread('Heatbed-MK52.png')
#img = mpimg.imread('mk52_steel_sheet.png')
plt.imshow(img, extent=[sheet_left_x, sheet_right_x, sheet_front_y, sheet_back_y], interpolation="lanczos", cmap=cmap_theme)
ax.set_xlim(left=0, right=x_size)
ax.set_ylim(bottom=0, top=y_size)
# Set various options about the graph image before
# we generate it. Things like labeling the axes and
# colorbar, and setting the X axis label/ticks to
# the top to better match the G81 output.
plt.title("Mesh Level: " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
plt.axis('image')
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
plt.colorbar(contour, label="Bed Level (mm) Maximum variance: " + str(round(z_vals.max() - z_vals.min(), 3)))
# Save our graph as an image in the current directory.
fig.savefig('g81_heatmap.png', bbox_inches="tight")
``` |
{
"source": "jptarqu/PhillipiansProxy",
"score": 3
} |
#### File: python/predictor/convertToOnnx.py
```python
import os
import tensorflow as tf
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
import numpy as np
import onnxruntime
import tf2onnx
import onnxruntime as rt
# image preprocessing
def predictImg(img_path):
img_size = 224
img = image.load_img(img_path, target_size=(img_size, img_size))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# load keras model
#from keras.applications.resnet50 import ResNet50
#model = ResNet50(include_top=True, weights='imagenet')
# convert to onnx model
#onnx_model = keras2onnx.convert_keras(model, model.name)
output_path = "E:\\OneDrive\\sources\\PhillipiansProxy\\python\\predictor\\model.onnx"
# runtime prediction
providers = ['CPUExecutionProvider']
m = rt.InferenceSession(output_path, providers=providers)
onnx_pred = m.run(['prediction'], {"input": x})
for pred in onnx_pred[0][0]:
print('%.4f' %pred)
predictImg('E:\\test\\s1.JPG')
print('-----------------')
predictImg('E:\\test\\test3.jpg')
# print(onnx_pred[0][0])
# print('ONNX Predicted:', decode_predictions(onnx_pred[0], top=3)[0])
``` |
{
"source": "JP-Tek-Services/open_weather_radio",
"score": 3
} |
#### File: root/scripts/mqtt.py
```python
import argparse
import json
import sys
import paho.mqtt.client as mqttClient
import time
import os
import sched, time
import datetime;
timesec = 60
sleep = 1
s = sched.scheduler(time.time, time.sleep)
global Connected
Connected = False
#Unervisal Alert
def build_alert(
TYPE="online",
MSG="",
ORG="",
EEE="",
PSSCCC="",
TTTT="",
JJJHHMM="",
LLLLLLLL="",
LANG=""
):
#Build alert array
alert = {
"alert": TYPE,
"attr": {
"message": ' '.join(MSG.split()),
"org": ORG,
"event_code": EEE,
"location_codes": PSSCCC,
"purge_time": TTTT,
"utc_purge_time": JJJHHMM,
"station_callsign": LLLLLLLL,
"language": LANG,
"last_push": datetime.datetime.now().isoformat()
}
}
return alert
#Setup MQTT Client
def mqtt_connect():
#MQTT Server config
broker_address = os.getenv('mqttsvr')
port = int(os.getenv('mqttport'))
user = os.getenv('mqttusr')
password = os.getenv('<PASSWORD>')
client = mqttClient.Client("open_weather_radio") #create new instance
client.username_pw_set(user, password=password) #set username and password
client.on_connect = on_connect #attach function to callback
client.connect(broker_address, port=port) #connect to broker
client.loop_start() #start the loop
while Connected != True: #Wait for connection
time.sleep(0.1)
return client
def on_connect(client, userdata, flags, rc):
if rc == 0:
print(datetime.datetime.now(),"- MQTT broker Connected")
global Connected
Connected = True #Signal connection
else:
print(datetime.datetime.now(),"- MQTT broker Connection failed")
#MQTT Client Disconnect
def mqtt_disconnect(client):
client.disconnect(print(datetime.datetime.now(),"- MQTT Client disconnected"))
client.loop_stop()
global Connected
Connected = False
#MQTT status topic
def owr_status():
while True:
try:
client = mqtt_connect()
print(datetime.datetime.now(),"- Sending MQTT online")
client.publish("open_weather_radio/status","online")
client.publish("open_weather_radio/alerts",json.dumps(build_alert()))
mqtt_disconnect(client)
time.sleep(300)
except Exception as e:
print("somethin went wrong: " + str(e))
#Send SAME message when triggered
def owr_send_alert():
#Alert values
TYPE = sys.argv[1]
MSG = sys.argv[2]
ORG = sys.argv[3]
EEE = sys.argv[4]
PSSCCC = sys.argv[5]
TTTT = sys.argv[6]
JJJHHMM = sys.argv[7]
LLLLLLLL = sys.argv[8]
LANG = sys.argv[9]
print(TYPE, MSG, ORG, EEE, PSSCCC, TTTT, JJJHHMM, LLLLLLLL, LANG)
#Send via mqtt
client = mqtt_connect()
print(datetime.datetime.now(),"- Sending EAS Alert via MQTT")
client.publish("open_weather_radio/status","online")
client.publish("open_weather_radio/alerts",json.dumps(build_alert(TYPE, MSG, ORG, EEE, PSSCCC, TTTT, JJJHHMM, LLLLLLLL, LANG)))
mqtt_disconnect(client)
#main
if sys.argv[1] == 'status':
owr_status()
else:
owr_send_alert()
``` |
{
"source": "jpt/Glyphs-Scripts",
"score": 3
} |
#### File: Glyphs-Scripts/App/Copy Download URL for Current App Version.py
```python
from __future__ import division, print_function, unicode_literals
__doc__="""
Puts the download URL of the current Glyphs app version into your clipboard for easy pasting.
"""
from AppKit import NSPasteboard, NSStringPboardType
def setClipboard( myText ):
"""
Sets the contents of the clipboard to myText.
Returns True if successful, False if unsuccessful.
"""
try:
myClipboard = NSPasteboard.generalPasteboard()
myClipboard.declareTypes_owner_( [NSStringPboardType], None )
myClipboard.setString_forType_( myText, NSStringPboardType )
return True
except Exception as e:
return False
appURL = "https://updates.glyphsapp.com/Glyphs%s-%s.zip" % (
Glyphs.versionString,
Glyphs.buildNumber,
)
if not setClipboard(appURL):
print("Warning: could not set clipboard to %s" % ( "clipboard text" ))
Message(title="Clipboard Error", message="Could not set the clipboard for whatever reason, so here is the URL:\n%s"%appURL, OKButton=None)
else:
# Floating notification:
Glyphs.showNotification(
"Download link copied",
"Ready for pasting: %s"%appURL,
)
```
#### File: Glyphs-Scripts/Components/Find and Replace Cap and Corner Components.py
```python
from __future__ import division, print_function, unicode_literals
__doc__="""
Replaces Caps/Corners in all selected glyphs.
"""
import vanilla
class FindAndReplaceCorners( object ):
def __init__( self ):
# Window 'self.w':
windowWidth = 340
windowHeight = 135
self.w = vanilla.Window(
( windowWidth, windowHeight ), # default window size
"Replace Corners", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth, windowHeight ), # maximum size (for resizing)
autosaveName = "com.mekkablue.FindAndReplaceCorners.mainwindow" # stores last window position and size
)
thisFont = Glyphs.font
self.corners = self.allCorners(thisFont)
# UI elements:
margin = 25
self.w.textSearch = vanilla.TextBox((margin+5, 12+2, 80, 18), "Find:")
self.w.searchFor = vanilla.PopUpButton((margin+80, 12, -margin, 22), self.corners)
self.w.textReplace = vanilla.TextBox((margin, 32+12+2, 80, 18), "Replace with:")
self.w.replaceBy = vanilla.PopUpButton((margin+80, 32+12, -margin, 22), self.corners)
self.w.replaceButton = vanilla.Button((-70 - margin, 63+12+1, -margin, 22), "Replace", callback=self.FindAndReplaceCornersMain)
self.w.setDefaultButton( self.w.replaceButton )
# Load Settings:
if not self.LoadPreferences():
print("Note: 'Replace Corners' could not load preferences. Will resort to defaults")
# Open window and focus on it:
self.w.open()
self.w.makeKey()
def SavePreferences( self, sender ):
try:
Glyphs.defaults["com.mekkablue.FindAndReplaceCorners.searchFor"] = self.w.searchFor.get()
Glyphs.defaults["com.mekkablue.FindAndReplaceCorners.replaceBy"] = self.w.replaceBy.get()
except:
return False
return True
def LoadPreferences( self ):
try:
self.w.searchFor.set( Glyphs.defaults["com.mekkablue.FindAndReplaceCorners.searchFor"] )
self.w.replaceBy.set( Glyphs.defaults["com.mekkablue.FindAndReplaceCorners.replaceBy"] )
except:
return False
return True
def allCorners(self, font):
corners = []
for g in font.glyphs:
if g.name.startswith("_corner.") or g.name.startswith("_cap."):
corners.append(g.name)
return corners
def FindAndReplaceCornersMain( self, sender ):
try:
if not self.SavePreferences( self ):
print("Note: 'Replace Corners' could not write preferences.")
searchString = self.corners[self.w.searchFor.get()]
replaceString = self.corners[self.w.replaceBy.get()]
# print "__searchString", searchString, " replaceString", replaceString
if len(searchString) < 2 or not (searchString.startswith("_cap.") or searchString.startswith("_corner.")):
Message("Invalid search", "A string needs to given and it has to start with '_cap.' or '_corner.'")
return
thisFont = Glyphs.font # frontmost font
listOfSelectedLayers = thisFont.selectedLayers # active layers of currently selected glyphs
newCornerGlyph = thisFont.glyphs[replaceString]
if newCornerGlyph is None:
Message("Missing Glyph", "Could not find glyph: \"%s\"" % replaceString)
return
for thisLayer in listOfSelectedLayers: # loop through layers
for thisHint in thisLayer.hints:
if thisHint.type == CORNER or thisHint.type == CAP:
if thisHint.name == searchString:
thisHint.setName_(replaceString)
print(" %s" % ( thisLayer.parent.name ))
# displayReportString = True
# if displayReportString:
# Glyphs.showMacroWindow()
self.w.close() # delete if you want window to stay open
except Exception as e:
# brings macro window to front and reports error:
import traceback
print(traceback.format_exc())
Glyphs.showMacroWindow()
# brings macro window to front and clears its log:
Glyphs.clearLog()
FindAndReplaceCorners()
```
#### File: Glyphs-Scripts/Kerning/KernCrash Current Glyph.py
```python
Current Glyph.py
#MenuTitle: KernCrash Current Glyph
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
__doc__="""
Opens a new tab containing kerning combos with the current glyph that collide in the current fontmaster.
"""
from AppKit import NSNotFound, NSAffineTransform
from kernanalysis import effectiveKerning
exceptions="""
.notdef
Ldot ldot ldot.sc
Jacute jacute jacute.sc
periodcentered.loclCAT periodcentered.loclCAT.case periodcentered.loclCAT.sc
currency
emptyset
infinity
integral
product
summation
radical
partialdiff
lozenge
paragraph
asciicircum
"""
# def effectiveKerning( leftGlyphName, rightGlyphName, thisFont, thisFontMasterID):
# leftLayer = thisFont.glyphs[leftGlyphName].layers[thisFontMasterID]
# rightLayer = thisFont.glyphs[rightGlyphName].layers[thisFontMasterID]
# if Glyphs.versionNumber < 3:
# effectiveKerning = leftLayer.rightKerningForLayer_( rightLayer )
# else:
# effectiveKerning = leftLayer.nextKerningForLayer_direction_(rightLayer, leftLayer.parent.direction)
# if effectiveKerning < NSNotFound:
# return effectiveKerning
# else:
# return 0.0
def pathCountOnLayer( thisLayer ):
thisLayer.removeOverlap()
return len( thisLayer.paths )
def pathCount( thisGlyph, thisFontMasterID ):
thisLayer = thisGlyph.layers[thisFontMasterID].copyDecomposedLayer()
return pathCountOnLayer(thisLayer)
def pathCountForGlyphName( glyphName, thisFont, thisFontMasterID ):
thisGlyph = thisFont.glyphs[glyphName]
return pathCount( thisGlyph, thisFontMasterID )
def pathCountInKernPair( firstGlyphName, secondGlyphName, thisFont, thisFontMasterID, minDistance ):
#ligatureName = "%s_%s" % ( nameUntilFirstPeriod(firstGlyphName), nameUntilFirstPeriod(secondGlyphName) )
#newGlyph = thisFont.newGlyphWithName_changeName_( "_deleteMe", False )
ligatureLayer = thisFont.glyphs[secondGlyphName].layers[thisFontMasterID].copyDecomposedLayer()
addedLayer = thisFont.glyphs[firstGlyphName].layers[thisFontMasterID].copyDecomposedLayer()
# position of right component:
kerning = effectiveKerning( firstGlyphName, secondGlyphName, thisFont, thisFontMasterID )
rightShift = NSAffineTransform.transform()
rightShift.translateXBy_yBy_( addedLayer.width + kerning - minDistance, 0.0 )
ligatureLayer.transform_checkForSelection_( rightShift, False )
for addedPath in addedLayer.paths:
if Glyphs.versionNumber < 3:
ligatureLayer.addPath_( addedPath.copy() )
else:
ligatureLayer.addShape_( addedPath.copy() )
return pathCountOnLayer( ligatureLayer )
try:
# query frontmost fontmaster:
thisFont = Glyphs.font
thisFontMaster = thisFont.selectedFontMaster
thisFontMasterID = thisFontMaster.id
if not thisFont.selectedLayers:
Message(title="No glyph selected", message="The script could not determine the current glyph. Please select a glyph and try again.", OKButton=None)
else:
thisGlyph = thisFont.selectedLayers[0].parent
# brings macro window to front and clears its log:
Glyphs.clearLog()
Glyphs.showMacroWindow()
print("KernCrash Current Glyph Report for %s, master %s:\n" % (thisFont.familyName, thisFontMaster.name))
# get list of glyph names:
currentGlyphName = thisGlyph.name
exceptionList = exceptions.split()
completeSet = [g.name for g in thisFont.glyphs
if g.export
and g.name not in exceptionList # excluded glyphs, list at beginning of this .py
and g.subCategory != "Nonspacing" # no combining accents
]
# get pathcounts for every glyph:
pathCountDict = {}
for thisGlyphName in completeSet:
pathCountDict[thisGlyphName] = pathCountForGlyphName( thisGlyphName, thisFont, thisFontMasterID )
# all possible kern pairs:
tabStringLeftGlyphs = []
tabStringRightGlyphs = []
for otherGlyphName in completeSet:
firstCount = pathCountDict[currentGlyphName]
secondCount = pathCountDict[otherGlyphName]
# current glyph on left side:
kernCount = pathCountInKernPair( currentGlyphName, otherGlyphName, thisFont, thisFontMasterID, 0.0 )
if firstCount + secondCount > kernCount:
tabStringLeftGlyphs.append(otherGlyphName)
# += "/%s/%s/space" % ( firstGlyphName, secondGlyphName )
# current glyph on left side:
kernCount = pathCountInKernPair( otherGlyphName, currentGlyphName, thisFont, thisFontMasterID, 0.0 )
if firstCount + secondCount > kernCount:
tabStringRightGlyphs.append(otherGlyphName)
#tabStringLeft += "/%s/%s/space" % ( firstGlyphName, secondGlyphName )
# open new Edit tab:
if tabStringLeftGlyphs or tabStringRightGlyphs:
Glyphs.showNotification('KernCrash Current Glyph', 'Some kerning crashes have been found.')
# opens new Edit tab:
tabStrings = []
if tabStringLeftGlyphs:
inBetween = " /%s/" % currentGlyphName
tabStrings.append( "/%s/"%currentGlyphName + inBetween.join(tabStringLeftGlyphs) )
print("Colliding glyphs when %s is on the LEFT:\n%s\n" % ( currentGlyphName, " ".join(tabStringLeftGlyphs) ))
if tabStringRightGlyphs:
inBetween = "/%s /" % currentGlyphName
tabStrings.append( "/" + inBetween.join(tabStringRightGlyphs) + "/%s"%currentGlyphName )
print("Colliding glyphs when %s is on the RIGHT:\n%s\n" % ( currentGlyphName, " ".join(tabStringRightGlyphs) ))
thisFont.newTab( "\n\n".join(tabStrings) )
# Floating notification:
Glyphs.showNotification(
"KernCrashed %s, master ‘%s’" % (thisFont.familyName, thisFontMaster.name),
"Found %i kerning collisions with %s. Details in Macro Window" % ( len(tabStringRightGlyphs)+len(tabStringLeftGlyphs), currentGlyphName ),
)
# or report that nothing was found:
else:
# Floating notification:
Glyphs.showNotification(
"KernCrashed %s, master ‘%s’:" % (thisFont.familyName, thisFontMaster.name),
"No collisions found for %s." % currentGlyphName,
)
except Exception as e:
Message("KernCrash Error", "KernCrash Current Glyph Error: %s\nTraceback in Macro Window." % e, OKButton=None)
import traceback
print(traceback.format_exc())
print(pathCountDict)
```
#### File: Glyphs-Scripts/Kerning/Remove all kerning exceptions.py
```python
from __future__ import division, print_function, unicode_literals
__doc__="""
Removes all kernings glyph-glyph, group-glyph, and glyph-group; only keeps group-group kerning.
"""
import vanilla
class RemoveKerningExceptions( object ):
prefDomain = "com.mekkablue.RemoveKerningExceptions"
def __init__( self ):
# Window 'self.w':
windowWidth = 300
windowHeight = 160
windowWidthResize = 100 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Remove Kerning Exceptions", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "%s.mainwindow"%self.prefDomain # stores last window position and size
)
# UI elements:
linePos, inset, lineHeight = 12, 15, 22
self.w.glyphGlyph = vanilla.CheckBox( (inset, linePos-1, -inset, 20), "Remove 🅰️🅰️ glyph-to-glyph pairs", value=True, callback=self.SavePreferences, sizeStyle='small' )
linePos += lineHeight
self.w.glyphGroup = vanilla.CheckBox( (inset, linePos-1, -inset, 20), "Remove 🅰️🔠 glyph-to-group pairs", value=True, callback=self.SavePreferences, sizeStyle='small' )
linePos += lineHeight
self.w.groupGlyph = vanilla.CheckBox( (inset, linePos-1, -inset, 20), "Remove 🔠🅰️ group-to-glyph pairs", value=True, callback=self.SavePreferences, sizeStyle='small' )
linePos += lineHeight
self.w.removeOnMastersText = vanilla.TextBox( (inset, linePos+2, 70, 14), "Remove on:", sizeStyle='small', selectable=True )
self.w.removeOnMasters = vanilla.PopUpButton( (inset+70, linePos, -inset, 17), ("current master", "⚠️ all masters of current font", "⚠️ all masters of ⚠️ all open fonts"), sizeStyle='small', callback=self.SavePreferences )
linePos += lineHeight
# Run Button:
self.w.runButton = vanilla.Button( (-100-inset, -20-inset, -inset, -inset), "Remove", sizeStyle='regular', callback=self.RemoveKerningExceptionsMain )
self.w.setDefaultButton( self.w.runButton )
# Load Settings:
if not self.LoadPreferences():
print("Note: 'Remove Kerning Exceptions' could not load preferences. Will resort to defaults")
# Open window and focus on it:
self.w.open()
self.w.makeKey()
def updateGUI(self, sender=None):
anyOptionIsSelected = self.w.glyphGlyph.get() or self.w.glyphGroup.get() or self.w.groupGlyph.get()
self.w.runButton.enable(anyOptionIsSelected)
def domain(self, key):
return "%s.%s" % (self.prefDomain, key)
def preference(self, key):
domain = self.domain(key)
return Glyphs.defaults[domain]
def SavePreferences( self, sender=None ):
try:
# write current settings into prefs:
Glyphs.defaults[self.domain("glyphGlyph")] = self.w.glyphGlyph.get()
Glyphs.defaults[self.domain("glyphGroup")] = self.w.glyphGroup.get()
Glyphs.defaults[self.domain("groupGlyph")] = self.w.groupGlyph.get()
Glyphs.defaults[self.domain("removeOnMasters")] = self.w.removeOnMasters.get()
self.updateGUI()
return True
except:
import traceback
print(traceback.format_exc())
return False
def LoadPreferences( self ):
try:
# register defaults:
Glyphs.registerDefault(self.domain("glyphGlyph"), 1)
Glyphs.registerDefault(self.domain("glyphGroup"), 1)
Glyphs.registerDefault(self.domain("groupGlyph"), 1)
Glyphs.registerDefault(self.domain("removeOnMasters"), 0)
# load previously written prefs:
self.w.glyphGlyph.set( self.preference("glyphGlyph") )
self.w.glyphGroup.set( self.preference("glyphGroup") )
self.w.groupGlyph.set( self.preference("groupGlyph") )
self.w.removeOnMasters.set( self.preference("removeOnMasters") )
return True
except:
import traceback
print(traceback.format_exc())
return False
def RemoveKerningExceptionsMain( self, sender=None ):
try:
# clear macro window log:
Glyphs.clearLog()
# update settings to the latest user input:
if not self.SavePreferences():
print("Note: 'Remove Kerning Exceptions' could not write preferences.")
thisFont = Glyphs.font # frontmost font
if thisFont is None:
Message(title="No Font Open", message="The script requires at least one font. Open a font and run the script again.", OKButton=None)
else:
glyphGlyph = self.preference("glyphGlyph")
glyphGroup = self.preference("glyphGroup")
groupGlyph = self.preference("groupGlyph")
removeOnMasters = self.preference("removeOnMasters")
if removeOnMasters==2:
fonts = Glyphs.fonts
allMasters = True
else:
fonts = (thisFont,)
if removeOnMasters==0:
allMasters = False
else:
allMasters = True
for thisFont in fonts:
print("\nRemoving kerning exceptions in: %s" % thisFont.familyName)
if thisFont.filepath:
print("📄 %s" % thisFont.filepath)
else:
print("⚠️ The font file has not been saved yet.")
totalCount = 0
for thisMaster in thisFont.masters:
if allMasters or thisMaster==thisFont.selectedFontMaster:
pairsToBeRemoved = []
for leftSide in thisFont.kerning[thisMaster.id].keys():
leftSideIsGlyph = not leftSide.startswith("@")
for rightSide in thisFont.kerning[thisMaster.id][leftSide].keys():
rightSideIsGlyph = not rightSide.startswith("@")
removeGlyphGlyph = leftSideIsGlyph and rightSideIsGlyph and glyphGlyph
removeGlyphGroup = leftSideIsGlyph and not rightSideIsGlyph and glyphGroup
removeGroupGlyph = not leftSideIsGlyph and rightSideIsGlyph and groupGlyph
if removeGroupGlyph or removeGlyphGroup or removeGlyphGlyph:
pairsToBeRemoved.append( (leftSide, rightSide) )
countOfDeletions = len(pairsToBeRemoved)
totalCount += countOfDeletions
print("🚫 Removing %i pairs in master ‘%s’..." % ( countOfDeletions, thisMaster.name))
for pair in pairsToBeRemoved:
left, right = pair
if not left.startswith("@"):
left = thisFont.glyphForId_(left).name
if not right.startswith("@"):
right = thisFont.glyphForId_(right).name
thisFont.removeKerningForPair(thisMaster.id, left, right)
# Final report:
Glyphs.showNotification(
"Removed %i Exceptions" % (totalCount),
"Processed %i font%s. Details in Macro Window" % (
len(fonts),
"" if len(fonts)!=1 else "s",
),
)
print("\nDone.")
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print("Remove Kerning Exceptions Error: %s" % e)
import traceback
print(traceback.format_exc())
RemoveKerningExceptions()
``` |
{
"source": "jpthewes/Airbnb_Seattle_data_analysis",
"score": 3
} |
#### File: jpthewes/Airbnb_Seattle_data_analysis/analysis.py
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import seaborn as sns
# numerical columns to drop
NUM_DROP = ['id', 'listing_url', 'scrape_id', 'host_id', 'host_listings_count',
'host_total_listings_count', 'latitude', 'longitude',
'availability_30', 'availability_60', 'availability_90',
'availability_365', 'calculated_host_listings_count',
'security_deposit', 'reviews_per_month']
# categorical values to drop
CAT_DROP = ['host_since', 'host_location', 'street', 'neighbourhood_cleansed',
'neighbourhood_group_cleansed', 'zipcode', 'country_code',
'requires_license', 'host_verifications', 'market',
'smart_location', 'amenities', 'calendar_updated', 'last_review',
'has_availability', 'country', 'last_scraped', 'name', 'summary',
'space', 'description', 'experiences_offered',
'neighborhood_overview', 'notes', 'transit', 'city', 'state',
'thumbnail_url', 'medium_url', 'picture_url', 'xl_picture_url',
'host_neighbourhood', 'host_url', 'host_name','host_about',
'host_thumbnail_url', 'host_picture_url','calendar_last_scraped',
'first_review', 'jurisdiction_names']
def coef_weights(coefficients, X_train):
'''
INPUT:
coefficients - the coefficients of the linear model
X_train - the training data, so the column names can be used
OUTPUT:
coefs_df - a dataframe holding the coefficient, estimate, and abs(estimate)
'''
coefs_df = pd.DataFrame()
coefs_df['est_int'] = X_train.columns
coefs_df['coefs'] = coefficients
coefs_df['abs_coefs'] = np.abs(coefficients)
coefs_df = coefs_df.sort_values('abs_coefs', ascending=False)
return coefs_df
def create_dummy_df(df, cat_cols, dummy_na):
'''
INPUT:
df - pandas dataframe with categorical variables you want to dummy
cat_cols - list of strings that are associated with names of the categorical columns
dummy_na - Bool holding whether you want to dummy NA vals of categorical columns or not
OUTPUT:
df - a new dataframe that has the following characteristics:
1. contains all columns that were not specified as categorical
2. removes all the original columns in cat_cols
3. dummy columns for each of the categorical columns in cat_cols
4. if dummy_na is True - it also contains dummy columns for the NaN values
5. Use a prefix of the column name with an underscore (_) for separating
'''
for col in cat_cols:
try:
# for each cat add dummy var, drop original column
df = pd.concat([df.drop(col, axis=1), pd.get_dummies(df[col], prefix=col, prefix_sep='_', drop_first=True, dummy_na=dummy_na)], axis=1)
except:
continue
return df
def remove_much_nans(df, rate_max=0.7):
cols_to_drop = set(df.columns[df.isnull().mean()>rate_max])
print("dropping columns because of to many NaN Values:", cols_to_drop)
df = df.drop(columns=cols_to_drop)
return df
def clean_data(df, response_value, extra_drop_for_X):
'''
INPUT
df - pandas dataframe
OUTPUT
X - A matrix holding all of the variables you want to consider when predicting the response
y - the corresponding response vector
'''
# EXTRA: make the prices a float64 type
df['price'] = df['price'].str.replace(',','').str.replace('$','').astype('float')
df['weekly_price'] = df['weekly_price'].str.replace(',','').str.replace('$','').astype('float')
df['monthly_price'] = df['monthly_price'].str.replace(',','').str.replace('$','').astype('float')
df['extra_people'] = df['extra_people'].str.replace(',','').str.replace('$','').astype('float')
df['cleaning_fee'] = df['cleaning_fee'].str.replace(',','').str.replace('$','').astype('float')
df['security_deposit'] = df['security_deposit'].str.replace(',','').str.replace('$','').astype('float')
# make also float
df['host_response_rate'] = df['host_response_rate'].str.replace('%','').astype('float')
df['host_acceptance_rate'] = df['host_acceptance_rate'].str.replace('%','').astype('float')
# 1-4:
# remove rows where response value is missing
df = df.dropna(subset=[response_value], axis=0)
# remove columns without any values
df = df.dropna(how='all', axis=1)
# drop not useful columns for prediction
df = df.drop(columns=NUM_DROP, axis=1)
df = df.drop(columns=CAT_DROP, axis=1)
df = remove_much_nans(df)
# drop data which confuses the prediction / has not much meaning because of missing data
num_df = df.select_dtypes(exclude=['object'])
num_columns = num_df.columns
df = df.dropna(subset=num_columns, thresh=len(num_columns)-2)
df.to_csv("after_thresh_drop_dataframe.csv")
# split off response dataframe
y = df[response_value]
# plot numerical graphs
pre_plot(num_df)
# drop response values from future X
extra_drop_for_X.append(response_value) # drop also values which might be related to response value
print(f"Excluding {extra_drop_for_X} from the model.")
df = df.drop(columns=extra_drop_for_X, axis=1)
# fill remaining NaN values of numerical columns with mean
num_df = df.select_dtypes(exclude=['object'])
num_columns = num_df.columns
for col in num_columns:
df[col].fillna((df[col].mean()), inplace=True)
# take care of categorical columns
cat_columns = df.select_dtypes(include=['object']).columns
df = create_dummy_df(df, cat_columns, dummy_na=True)
# save for overviiew of final dataframe
df.to_csv("X_dataframe.csv")
X = df
return X, y
def pre_plot(df):
df.hist()
plt.show()
sns.heatmap(df.corr(), annot=True, fmt=".2f")
plt.show()
def fit_train_test_model(X, y, test_size=0.3, rand_state=42):
# only use columns where more than a certain number of values are provided
cutoff = 40
X = X.iloc[:, np.where((X.sum() > cutoff) == True)[0]]
#Split into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=rand_state)
lm_model = LinearRegression(normalize=True) # Instantiate
lm_model.fit(X_train, y_train) #Fit
#Predict using your model
y_test_preds = lm_model.predict(X_test)
y_train_preds = lm_model.predict(X_train)
#Score using your model
test_score = mean_squared_error(y_test, y_test_preds)
train_score = mean_squared_error(y_train, y_train_preds)
return test_score, train_score, lm_model, X_train, X_test, y_train, y_test
def main():
"""
This analysis aims to answer the following questions (Business Understanding):
1. What are the key factors determining the price of the appartments?
2. Will I be happier if I spend those extra dollars for the upgrade?
For that, the price and rating scores of Airbnbs are used as a response
value of a linear regression model. After training and testing the models,
taking a look at the coefficients of the model should lead to answering to
above questions.
This can bring us a better Business Understanding through Data Science
while following the CRISP-DM process (steps are commented in code)!
"""
df = pd.read_csv("data/listings.csv")
# get an insight of the data --> CRISP-DM Data Understanding
pre_plot(df)
print("##################### Price")
# --> CRISP-DM Data Preparation
X,y = clean_data(df, "price", ['weekly_price', 'monthly_price'])
#--> CRISP-DM Modeling and Evaluation
test_score, train_score, lm_model, X_train, X_test, y_train, y_test = fit_train_test_model(X, y)
print("The score on the test data is: ", test_score)
print("The score on the training data is: ", train_score)
print("These are the highest and lowest 20 coefficients:")
coef_df = coef_weights(lm_model.coef_, X_train)
print(coef_df.head(20))
print(coef_df.tail(20))
print("##################### Price w/o neighbourhood")
df = pd.read_csv("data/listings.csv")
# --> CRISP-DM Data Preparation
X,y = clean_data(df, "price", ['weekly_price', 'monthly_price', 'neighbourhood'])
#--> CRISP-DM Modeling and Evaluation
test_score, train_score, lm_model, X_train, X_test, y_train, y_test = fit_train_test_model(X, y)
print("The score on the test data is: ", test_score)
print("The score on the training data is: ", train_score)
print("These are the highest and lowest 20 coefficients:")
coef_df = coef_weights(lm_model.coef_, X_train)
print(coef_df.head(20))
print(coef_df.tail(20))
print("##################### Ratings")
df = pd.read_csv("data/listings.csv")
# --> CRISP-DM Data Preparation
X,y = clean_data(df, 'review_scores_rating', ['review_scores_accuracy',
'review_scores_cleanliness', 'review_scores_checkin',
'review_scores_communication', 'review_scores_location',
'review_scores_value'])
#--> CRISP-DM Modeling and Evaluation
test_score, train_score, lm_model, X_train, X_test, y_train, y_test = fit_train_test_model(X, y)
print("The score on the test data is: ", test_score)
print("The score on the training data is: ", train_score)
print("These are the highest and lowest 20 coefficients:")
coef_df = coef_weights(lm_model.coef_, X_train)
print(coef_df.head(20))
print(coef_df.tail(20))
if __name__ == "__main__":
main()
``` |
{
"source": "jpthewes/SDC_behavioral_cloning",
"score": 3
} |
#### File: jpthewes/SDC_behavioral_cloning/model.py
```python
import cv2
import os
import csv
import cv2
import numpy as np
import sklearn
from math import ceil
from keras.models import Sequential, Model
from keras.layers import Cropping2D, Flatten, Dense, Lambda, Convolution2D, Dropout
from sklearn.model_selection import train_test_split
from scipy import ndimage
DATAPATH = '/home/workspace/full_round/'
# read in driving_log
samples = []
with open(DATAPATH + 'driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
samples = samples[1:] # skip first line
print("read in SAMPLES")
# samle into training and validation set (20% is validation set)
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
# use generator to get data on the fly while training the model
def generator(samples, batch_size=32):
num_samples = len(samples)
batch_size = int(batch_size/3) # for 3 pictures(left, right, center) each iteration, therefore roughly keeping batch size
while 1: # Loop forever so the generator never terminates
sklearn.utils.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
center_name = os.path.join(DATAPATH , './IMG/' , batch_sample[0].split('/')[-1])
center_image = ndimage.imread(center_name)
center_angle = float(batch_sample[3])
images.append(center_image)
angles.append(center_angle)
right_name = os.path.join(DATAPATH , './IMG/' , batch_sample[2].split('/')[-1])
right_image = ndimage.imread(right_name)
right_angle = float(batch_sample[3]) - 0.2 # 0.2 degrees correction
images.append(right_image)
angles.append(right_angle)
left_name = os.path.join(DATAPATH , './IMG/' , batch_sample[1].split('/')[-1])
left_image = ndimage.imread(left_name)
left_angle = float(batch_sample[3]) + 0.2 # 0.2 degrees correction
images.append(left_image)
angles.append(left_angle)
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
# Set batch size
batch_size=32
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=batch_size)
validation_generator = generator(validation_samples, batch_size=batch_size)
## model layout:
model = Sequential()
model.add(Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160, 320,3))) # normalize input
model.add(Cropping2D(cropping=((60,20),(0,0)))) # crop input images to relevant fiel of view
model.add(Convolution2D(24, kernel_size=(5,5), strides=(2,2), activation="relu"))
model.add(Convolution2D(36, kernel_size=(5,5), strides=(2,2), activation="relu"))
#model.add(Dropout(0.5)) # These have been shown to decrease driving performance
model.add(Convolution2D(48, kernel_size=(5,5), strides=(2,2), activation="relu"))
#model.add(Dropout(0.5)) # These have been shown to decrease driving performance
model.add(Convolution2D(64, kernel_size=(3,3), activation="relu"))
model.add(Convolution2D(64, kernel_size=(3,3), activation="relu"))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1)) # 1 output as 1 param is to be predicted (steering angle)
print(model.summary())
###Compile and train model
model.compile(loss='mse', optimizer='adam')
print("compiled")
model.fit_generator(train_generator,
steps_per_epoch=ceil(len(train_samples)/batch_size),
validation_data=validation_generator,
validation_steps=ceil(len(validation_samples)/batch_size),
epochs=4, verbose=1)
model.save('my_loop_model.h5')
print("saved the model")
``` |
{
"source": "jpthiery/moodcaptor",
"score": 2
} |
#### File: moodcaptor-backend/tests/test_hello.py
```python
from domain.model import Group
def test_hello():
group = Group('test')
assert group.name == 'test'
``` |
{
"source": "jpthiery/paclair",
"score": 3
} |
#### File: paclair/paclair/exceptions.py
```python
class PaclairException(Exception):
"""
Error base class
"""
pass
class ConfigurationError(PaclairException):
"""
Error reading configuration file
"""
pass
class ClairConnectionError(PaclairException):
"""
Error reaching Clair
"""
def __init__(self, response):
"""
Constructor
:param response: requests.response
"""
super().__init__(response.reason)
self.response = response
class ResourceNotFoundException(PaclairException):
"""
Resource not found
"""
pass
class PluginNotFoundException(PaclairException):
"""
Unknown plugin
"""
pass
class RegistryAccessError(PaclairException):
"""
Error reaching registry
"""
pass
```
#### File: paclair/paclair/logged_object.py
```python
import logging
class LoggedObject:
"""
Easy access to logging
"""
def __init__(self, logger=None):
"""
Constructor
:param logger: logger
"""
self.logger = logger or logging.getLogger(__name__)
```
#### File: paclair/plugins/http_plugin.py
```python
import requests
from paclair.ancestries.generic import GenericAncestry, Layer
from paclair.plugins.abstract_plugin import AbstractPlugin
from paclair.exceptions import ResourceNotFoundException
class HttpPlugin(AbstractPlugin):
"""
Http plugin
"""
def __init__(self, clair, clair_format, base_url, verify):
"""
Constructor
:param clair: ClairRequest object
:param clair_format: Clair format
:param base_url: base url
:param verify: request verify certificate
"""
super().__init__(clair, clair_format)
self.base_url = base_url
self.verify = verify
@staticmethod
def _clean_name(name):
"""
Delete extension and path
:param name: the name to clean
:return:
"""
# Delete ext
if name.endswith('.tar.gz'):
name = name[:-7]
elif name.endswith('.tgz'):
name = name[:-4]
# Delete subpath
_, _, name = name.rpartition('/')
return name
def create_ancestry(self, name):
path = "{}/{}".format(self.base_url, name)
result = requests.head(path, verify=self.verify)
if result.status_code != requests.codes.ok:
raise ResourceNotFoundException("{} not found".format(name))
name = self._clean_name(name)
return GenericAncestry(self._clean_name(name), self.clair_format, [Layer(name, name, path)])
def analyse(self, name, output=None):
return super().analyse(self._clean_name(name), output)
```
#### File: paclair/paclair_tests/test_http_plugin.py
```python
import logging
import unittest
import requests_mock
from paclair.api.clair_requests_v1 import ClairRequestsV1
from paclair.exceptions import ResourceNotFoundException
from paclair.plugins.http_plugin import HttpPlugin
class TestHttpPlugin(unittest.TestCase):
"""
Test de l'objet CfPlugin
"""
artifacURI = 'http://artifac'
artifacVERIFY = 'verifymock'
clairURI = 'http://clair'
def setUp(self):
self.cf = HttpPlugin(ClairRequestsV1(self.clairURI), "cflinuxfs", self.artifacURI, self.artifacVERIFY)
@requests_mock.mock()
def test_push_not_found(self, m):
"""
Test de la méthode push quand l'image n'existe pas
"""
# mock artifactory reponse
m.head(self.artifacURI + "/tutu", status_code = 404)
with self.assertRaises(ResourceNotFoundException):
self.cf.push("tutu")
@requests_mock.mock()
def test_push_found(self, m):
"""
Test de la méthode push quand l'image existe
"""
# mock artifactory reponse
m.head(self.artifacURI + "/tutu", status_code = 200)
# mock clair reponse
m.post(self.clairURI + '/v1/layers')
self.cf.push("tutu")
clair_data = {'Layer': {'Path': self.artifacURI + "/tutu", 'Format': 'cflinuxfs', 'Name': 'tutu'}}
self.assertEqual(m.last_request.json(), clair_data)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
``` |
{
"source": "jpthompson17/InterMol",
"score": 3
} |
#### File: intermol/forces/angle_restraint.py
```python
from intermol.decorators import *
class AngleRestraint(object):
@accepts_compatible_units(None, None, None, None,
units.degrees, units.kilojoules_per_mole, None)
def __init__(self, atom1, atom2, atom3, atom4, theta, k, multiplicity):
"""
"""
if atom1:
self.atom1 = atom1
if atom2:
self.atom2 = atom2
if atom3:
self.atom3 = atom3
if atom4:
self.atom4 = atom4
self.theta = theta
self.k = k
self.multiplicity = multiplicity
def get_parameters(self):
return (self. atom1, self.atom2, self.atom3, self.atom4, self.theta, self.k, self.multiplicity)
```
#### File: intermol/forces/bond.py
```python
from intermol.decorators import *
from abstract_bond import *
class Bond(AbstractBond):
__slots__ = ['length', 'k', 'order', 'c']
@accepts_compatible_units(None,
None,
units.nanometers,
units.kilojoules_per_mole * units.nanometers**(-2),
None,
None)
def __init__(self, atom1, atom2, length, k, order=1, c=False): # default bond order is 1
"""
"""
AbstractBond.__init__(self, atom1, atom2)
self.length = length
self.k = k
self.order = order
self.c = c #constrained or not, Desmond only
def getparameters(self):
return (self.atom1, self.atom2, self.length, self.k)
def __repr__(self):
return str(self.atom1) +' '+ str(self.atom2) +' '+ str(self.length) +' '+ str(self.k)
def __str__(self):
return str(self.atom1) +' '+ str(self.atom2) +' '+ str(self.length) +' '+ str(self.k)
"""
def __hash__(self):
return hash(tuple([type(self), self.length._value, self.k._value]))
def __eq__(self, object):
if (type(self) == type(object)) and (self.length._value == object.length._value) and (self.k._value == object.k._value):
return True
else:
return False
"""
```
#### File: intermol/forces/constraint.py
```python
from intermol.decorators import *
import re
class Constraint(object):
@accepts_compatible_units(None, None, None, None, None, None, None, None, None,
units.nanometers, units.nanometers, units.nanometers, units.nanometers,
units.nanometers, units.nanometers, units.nanometers, units.nanometers,
None)
def __init__(self, atom1, atom2, length1, type, atom3=None, length2=None, atom4=None, length3=None, atom5=None, length4=None, atom6=None, length5=None, atom7=None, length6=None, atom8=None, length7=None, atom9=None, length8=None):
"""
"""
self.type = type
if re.match(type,'HOH'):
self.n = 2
self.atom1 = atom1
self.atom2 = atom2
self.atom3 = atom3
self.length1 = length1
self.length2 = length2
self.length3 = length3
elif re.match(str(list(type)[0:2]),'AH'):
self.n = int(list(type)[-1])
if self.n >= 1:
self.atom1 = atom1
self.atom2 = atom2
self.length1 = length1
if self.n >= 2:
self.atom3 = atom3
self.length2 = length2
self.length3 = None
if self.n >= 3:
self.atom4 = atom4
self.length3 = length3
if self.n >= 4:
self.atom5 = atom5
self.length4 = length4
if self.n >= 5:
self.atom6 = atom6
self.length5 = length5
if self.n >= 6:
self.atom7 = atom7
self.length6 = length6
if self.n >= 7:
self.atom8 = atom8
self.length7 = length7
if self.n == 8:
self.atom9 = atom9
self.length8 = length8
def getparameters(self):
if self.n == 1:
return (self.atom1, self.atom2, self.length1, type)
elif self.n == 2:
if self.length3:
return (self.atom1, self.atom2, self.atom3, self.length1, self.length2, self.length3, self.type)
else:
return (self.atom1, self.atom2, self.atom3, self.length1, self.length2, self.type)
elif self.n == 3:
return (self.atom1, self.atom2, self.atom3, self.atom4, self.length1, self.length2, self.length3, self.type)
elif self.n == 4:
return (self.atom1, self.atom2, self.atom3, self.atom4, self.atom5, self.length1, self.length2, self.length3, self.length4, self.type)
elif self.n == 5:
return (self.atom1, self.atom2, self.atom3, self.atom4, self.atom5, self.atom6, self.length1, self.length2, self.length3, self.length4, self.length5, self.type)
elif self.n == 6:
return (self.atom1, self.atom2, self.atom3, self.atom4, self.atom5, self.atom6, self.atom7, self.length1, self.length2, self.length3, self.length4, self.length5, self.length6, self.type)
elif self.n == 7:
return (self.atom1, self.atom2, self.atom3, self.atom4, self.atom5, self.atom6, self.atom7, self.atom8, self.length1, self.length2, self.length3, self.length4, self.length5, self.length6, self.length7, self.type)
elif self.n == 8:
return (self.atom1, self.atom2, self.atom3, self.atom4, self.atom5, self.atom6, self.atom7, self.atom8, self.length1, self.length2, self.length3, self.length4, self.length5, self.length6, self.length7, self.type)
```
#### File: intermol/forces/cubic_bond.py
```python
from intermol.decorators import *
from abstract_bond import *
class CubicBond(AbstractBond):
__slots__ = ['length', 'C2', 'C3', 'order','c']
@accepts_compatible_units(None, None,
units.nanometers, units.kilojoules_per_mole * units.nanometers**(-2),
units.kilojoules_per_mole * units.nanometers**(-3), None, None)
def __init__(self, atom1, atom2, length, C2, C3, order=1, c=False): # default bond order is 1
"""
"""
AbstractBond.__init__(self, atom1, atom2)
self.length = length
self.C2 = C2
self.C3 = C3
self.order = order
self.c = c #constrained or not, Desmond only
def get_parameters(self):
return (self.atom1, self.atom2, self.length, self.C2, self.C3)
def __repr__(self):
return str(self.atom1) +' '+ str(self.atom2) +' '+ str(self.length) +' '+ str(self.C2) +' '+ str(self.C3)
def __str__(self):
return str(self.atom1) +' '+ str(self.atom2) +' '+ str(self.length) +' '+ str(self.C2) +' '+ str(self.C3)
```
#### File: intermol/forces/g96_bond.py
```python
from intermol.decorators import *
from abstract_bond import *
class G96Bond(AbstractBond):
__slots__ = ['length', 'k', 'order', 'c']
@accepts_compatible_units(None, None, units.nanometers, units.kilojoules_per_mole * units.nanometers**(-4), None, None)
def __init__(self, atom1, atom2, length, k, order=1, c=False): # default bond order is 1
"""
"""
AbstractBond.__init__(self, atom1, atom2)
self.length = length
self.k = k
self.order = order
self.c = c #constrained or not, Desmond only
def get_parameters(self):
return (self.atom1, self.atom2, self.length, self.k)
def __repr__(self):
return str(self.atom1) +' '+ str(self.atom2) +' '+ str(self.length) +' '+ str(self.k)
def __str__(self):
return str(self.atom1) +' '+ str(self.atom2) +' '+ str(self.length) +' '+ str(self.k)
```
#### File: intermol/forces/LJ1_pair_CR1.py
```python
from intermol.decorators import *
from abstract_pair import *
class LJ1PairCR1(AbstractPair):
__slots__ = ['V', 'W']
@accepts_compatible_units(None, None,
units.kilojoules_per_mole * units.nanometers**(6),
units.kilojoules_per_mole * units.nanometers**(12))
def __init__(self, atom1, atom2, V=None, W=None):
"""
"""
AbstractPair.__init__(self, atom1, atom2)
self.V = V
self.W = W
def get_parameters(self):
return (self.atom1, self.atom2, self.V, self.W)
def __repr__(self):
return str(self.atom1) +' '+ str(self.atom2) +' '+ str(self.V) +' '+ str(self.W)
def __str__(self):
return str(self.atom1) +' '+ str(self.atom2) +' '+ str(self.V) +' '+ str(self.W)
```
#### File: intermol/forces/orientation_restraint.py
```python
from intermol.decorators import *
class OrientationRestraint(object):
@accepts_compatible_units(None, None, None, None, None, None, units.amu, units.amu**(-1))
### units for 'c' should actually be units.amu * units.nanometers**(alpha) - unsure of method for implementation
def __init__(self, atom1, atom2, exp, label, alpha, c, obs, weight):
"""
"""
if atom1:
self.atom1 = atom1
if atom2:
self.atom2 = atom2
self.exp = exp
self.label = label
self.alpha = alpha
self.c = c
self.obs = obs
self.weight = weight
def get_parameters(self):
return (self.atom1, self.atom2, self.exp, self.label, self.alpha, self.c, self.obs, self.weight)
```
#### File: intermol/forces/RB_dihedral.py
```python
from intermol.decorators import *
from abstract_dihedral import *
class RBDihedral(AbstractDihedral):
@accepts_compatible_units(None, None, None, None,
units.kilojoules_per_mole, units.kilojoules_per_mole,
units.kilojoules_per_mole, units.kilojoules_per_mole,
units.kilojoules_per_mole, units.kilojoules_per_mole,
units.kilojoules_per_mole, None)
def __init__(self, atom1, atom2, atom3, atom4, C0, C1, C2, C3, C4, C5, C6, i=0):
"""
"""
AbstractDihedral.__init__(self, atom1, atom2, atom3, atom4, -1)
self.C0 = C0
self.C1 = C1
self.C2 = C2
self.C3 = C3
self.C4 = C4
self.C5 = C5
self.C6 = C6
self.i = i #improper or not--Desmond only
def get_parameters(self):
return (self.atom1, self.atom2, self.atom3, self.atom4,
self.C0, self.C1, self.C2, self.C3, self.C4, self.C5, self.C6)
def __repr__(self):
return "{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}".format(
self.atom1, self.atom2, self.atom3, self.atom4,
self.C0, self.C1, self.C2, self.C3, self.C4, self.C5, self.C6)
def __str__(self):
return "{0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10}".format(
self.atom1, self.atom2, self.atom3, self.atom4,
self.C0, self.C1, self.C2, self.C3, self.C4, self.C5, self.C6)
```
#### File: intermol/forces/settles.py
```python
from intermol.decorators import *
class Settles(object):
@accepts_compatible_units(None, units.nanometers, units.nanometers)
def __init__(self, atom1, dOH, dHH):
"""
"""
if atom1:
self.atom1 = atom1
self.dOH = dOH
self.dHH = dHH
def get_parameters(self):
return (self.atom1, dOH, dHH)
def __repr__(self):
print self.atom1+' '+self.dOH+' '+self.dHH
def __str__(self):
print self.atom1+' '+self.dOH+' '+self.dHH
```
#### File: intermol/forces/torsion_torsion_CMAP.py
```python
from intermol.decorators import *
class TorsionTorsionCMAP(object):
@accepts_compatible_units(None, None, None, None, None, None, None, None,
None, None)
def __init__(self, atom1, atom2, atom3, atom4, atom5, atom6, atom7, atom8, type, chart):
"""
"""
self.type = type
self.atom1 = atom1
self.atom2 = atom2
self.atom3 = atom3
self.atom4 = atom4
self.atom5 = atom5
self.atom6 = atom6
self.atom7 = atom7
self.atom8 = atom8
self.chart = chart
def getparameters(self):
return (self.atom1, self.atom2, self.atom3, self.atom4, self.atom5, self.atom6, self.atom7, self.atom8, self.type, self.chart)
```
#### File: intermol/forces/virtual_site2.py
```python
from intermol.decorators import *
class VSite2(object):
def __init__(self, atom1, atom2, atom3, a):
if atom1:
self.atom1 = atom1
if atom2:
self.atom2 = atom2
if atom3:
self.atom3 = atom3
self.a = a
def get_parameters(self):
return(self.atom1, self.atom2, self.atom3, self.a)
```
#### File: intermol/forces/virtual_site3_fad.py
```python
from intermol.decorators import *
class VSite3fad(object):
@accepts_compatible_units(None, None, None, None, units.degrees, units.nanometers)
def __init__(self, atom1, atom2, atom3, atom4, theta, d):
if atom1:
self.atom1 = atom1
if atom2:
self.atom2 = atom2
if atom3:
self.atom3 = atom3
if atom4:
self.atom4 = atom4
self.theta = theta
self.d = d
def get_parameters(self):
return(self.atom1, self.atom2, self.atom3, self.atom4, self.theta, self.d)
```
#### File: intermol/lammps_extension/lammps_parser.py
```python
import os
import pdb
import logging
import numpy as np
from collections import OrderedDict
import intermol.unit as units
from intermol.atom import Atom
from intermol.molecule import Molecule
from intermol.system import System
from intermol.types import *
from intermol.forces import *
logger = logging.getLogger('InterMolLog')
class LammpsParser(object):
"""A class containing methods to read and write LAMMPS files."""
def __init__(self):
"""
"""
self.box_vector = np.zeros(shape=(3, 3), dtype=float)
def read_system(self, input_file):
"""Reads a LAMMPS input file and a data file specified within.
Args:
input_file (str): Name of LAMMPS input file to read in.
"""
self.read_input(input_file)
if self.data_file:
self.read_data(self.data_file)
else:
raise Exception("No data file found in input script")
def read_input(self, input_file):
"""Reads a LAMMPS input file.
Args:
input_file (str): Name of LAMMPS input file to read in.
"""
self.basepath = os.path.dirname(os.path.realpath(input_file))
parsable_keywords = {'units': self.parse_units,
'atom_style': self.parse_atom_style,
'dimension': self.parse_dimension,
'boundary': self.parse_boundary,
'pair_style': self.parse_pair_style,
'kspace_style': self.parse_kspace_style,
'pair_modify': self.parse_pair_modify,
'bond_style': self.parse_bond_style,
'angle_style': self.parse_angle_style,
'dihedral_style': self.parse_dihedral_style,
'improper_style': self.parse_improper_style,
'special_bonds': self.parse_special_bonds,
'read_data': self.parse_read_data,
'fix': self.parse_fix}
self.shake = False # SHAKE constraints?
with open(input_file, 'r') as input_lines:
for line in input_lines:
if line.strip():
keyword = line.split()[0]
if keyword in parsable_keywords:
parsable_keywords[keyword](line.split())
def read_data(self, data_file):
"""Reads a LAMMPS data file.
Args:
data_file (str): name of LAMMPS data file to read in.
"""
# Read box, masses and forcefield info from data file.
parsable_keywords = {'Masses': self.parse_masses,
'Pair Coeffs': self.parse_pair_coeffs,
'Bond Coeffs': self.parse_bond_coeffs,
'Angle Coeffs': self.parse_angle_coeffs,
'Dihedral Coeffs': self.parse_dihedral_coeffs,
'Improper Coeffs': self.parse_improper_coeffs}
with open(data_file, 'r') as data_lines:
for line in data_lines:
if line.strip():
line = line.partition('#')[0] # Remove trailing comment
fields = line.split()
if len(fields) == 2 and fields[1] == 'atoms':
self.natoms = int(fields[0])
# catch all box dimensions
if (('xlo' in line) and
('xhi' in line)):
self.parse_box(line.split(), 0)
elif (('ylo' in line) and
('yhi' in line)):
self.parse_box(line.split(), 1)
elif (('zlo' in line) and
('zhi' in line)):
self.parse_box(line.split(), 2)
# other headers
else:
keyword = line.strip()
if keyword in parsable_keywords:
parsable_keywords[keyword](data_lines)
# Read atoms and connectivity information from data file.
parsable_keywords = {'Atoms': self.parse_atoms,
'Bonds': self.parse_bonds,
'Angles': self.parse_angles,
'Dihedrals': self.parse_dihedrals,
'Impropers': self.parse_impropers}
with open(data_file, 'r') as data_lines:
for line in data_lines:
if line.strip():
keyword = line.partition('#')[0].strip()
if keyword in parsable_keywords:
parsable_keywords[keyword](data_lines)
# SETTLE constraints (for now limited to rigid 3-point light water)
for mol_type in System._sys._molecules.itervalues():
if (len(mol_type.angleForceSet) == 1 and
len(mol_type.bondForceSet) == 2):
mol_mass = units.sum([a._mass[0] for a in mol_type.moleculeSet[0].getAtoms()])
if np.round(mol_mass.in_units_of(units.amu)._value) != 18.0:
continue
is_rigid_water = True
for angle in mol_type.angleForceSet.itervalues():
if isinstance(angle, Angle) and angle.c:
thetaHOH = angle.theta
else:
is_rigid_water = False
for bond in mol_type.bondForceSet.itervalues():
if isinstance(bond, Bond) and bond.c:
dOH = bond.length
else:
is_rigid_water = False
if is_rigid_water:
dHH = units.sqrt(2 * dOH**2 * (1 - units.cos(thetaHOH)))
for i, atom in enumerate(mol_type.moleculeSet[0]._atoms, start=1):
if atom._mass[0].in_units_of(units.amu)._value > 15:
iOW = i
break
mol_type.settles = Settles(iOW, dOH, dHH)
mol_type.nrexcl = 1
mol_type.exclusions.add(Exclusions([1, 2, 3]))
mol_type.exclusions.add(Exclusions([2, 1, 3]))
mol_type.exclusions.add(Exclusions([3, 1, 2]))
# Indentify 1-3 and 1-4 neighbors
onethree = [[] for i in range(self.natoms + 1)]
onefour = [[] for i in range(self.natoms + 1)]
for i in range(1, self.natoms):
# 1-3 neighbors
for j in self.onetwo[i]:
for k in self.onetwo[j]:
if not ((k == i) or (k in self.onetwo[i])):
onethree[i].append(k)
# 1-4 neighbors
for j in onethree[i]:
for k in self.onetwo[j]:
if not ((k == i) or (k in self.onetwo[i]) or (k in onethree[i])):
onefour[i].append(k)
# Generate 1-4 pairs for each moleculetype
for mol_type in System._sys._molecules.itervalues():
molecule = mol_type.moleculeSet[0]
for atom in molecule.getAtoms():
ai = self.nr[atom.index]
for j in onefour[atom.index]:
aj = self.nr[j]
mol_type.pairForceSet.add(AbstractPair(ai, aj, "Both"))
def parse_units(self, line):
""" """
assert(len(line) == 2), "Invalid units specified in input file."
self.unit_set = line[1]
self.RAD = units.radians
self.DEGREE = units.degrees
if self.unit_set == 'real':
self.DIST = units.angstroms
self.VEL = units.angstroms / units.femtosecond
self.ENERGY = units.kilocalorie / units.mole
self.MASS = units.grams / units.mole
self.CHARGE = units.elementary_charge
self.MOLE = units.mole
else:
raise Exception("Unsupported unit set specified in input file: "
"{0}".format(self.unit_set))
def parse_atom_style(self, line):
"""
"""
if len(line) > 2:
logger.warn("Unsupported atom_style in input file.")
self.atom_style = line[1]
# Currently only support atom_style 'full'
if self.atom_style != 'full':
raise Exception("Unsupported atom_style in input file: {0}".format(self.atom_style))
def parse_dimension(self, line):
""" """
self.dimension = int(line[1])
if self.dimension not in [2, 3]:
raise ValueError("Invalid dimension specified in input file"
" (must be 2 or 3).")
def parse_boundary(self, line):
""" """
self.boundaries = [line[1], line[2], line[3]]
if len(self.boundaries) != self.dimension:
raise ValueError("Boundaries do not match specified dimension "
"in input file")
def parse_pair_style(self, line):
""" """
self.pair_style = []
if line[1] == 'hybrid':
logger.warn("Hybrid pair styles not yet implemented.")
elif line[1] == 'lj/cut/coul/long':
self.pair_style.append(line[1])
System._sys.nonbonded_function = 1
def parse_kspace_style(self, line):
"""
Note:
Currently ignored.
"""
if line[1] == 'pppm':
pass
def parse_pair_modify(self, line):
"""
"""
if line[1] == 'mix':
if line[2] == 'geometric':
System._sys.combination_rule = 3
elif line[2] == 'arithmetic':
System._sys.combination_rule = 2
else:
logger.warn("Unsupported pair_modify mix argument in input file!")
else:
logger.warn("Unsupported pair_modify style in input file!")
def parse_bond_style(self, line):
""" """
self.bond_style = []
self.hybrid_bond_style = False
if len(line) == 2:
self.bond_style.append(line[1])
elif len(line) > 2 and line[1] == 'hybrid':
self.hybrid_bond_style = True
for style in line[2:]:
self.bond_style.append(style)
else:
raise ValueError("Invalid bond_style in input file!")
def parse_angle_style(self, line):
""" """
self.angle_style = []
self.hybrid_angle_style = False
if len(line) == 2:
self.angle_style.append(line[1])
elif len(line) > 2 and line[1] == 'hybrid':
self.hybrid_angle_style = True
for style in line[2:]:
self.angle_style.append(style)
else:
raise ValueError("Invalid angle_style in input file!")
def parse_dihedral_style(self, line):
""" """
self.dihedral_style = []
self.hybrid_dihedral_style = False
if len(line) == 2:
self.dihedral_style.append(line[1])
elif len(line) > 2 and line[1] == 'hybrid':
self.hybrid_dihedral_style = True
for style in line[2:]:
self.dihedral_style.append(style)
else:
raise ValueError("Invalid dihedral_style in input file!")
def parse_improper_style(self, line):
""" """
self.improper_style = []
self.hybrid_improper_style = False
if len(line) == 2:
self.improper_style.append(line[1])
elif len(line) > 2 and line[1] == 'hybrid':
self.hybrid_improper_style = True
for style in line[2:]:
self.improper_style.append(style)
else:
raise ValueError("Invalid improper_style in input file!")
def parse_special_bonds(self, line):
""" """
if line[1] == 'amber':
System._sys.lj_correction = 0.5
System._sys.coulomb_correction = 5.0 / 6.0
elif 'lj/coul' in line:
System._sys.lj_correction = float(line[line.index('lj/coul') + 3])
System._sys.coulomb_correction = float(line[line.index('lj/coul') + 3])
elif 'lj' in line and 'coul' in line:
System._sys.lj_correction = float(line[line.index('lj') + 3])
System._sys.coulomb_correction = float(line[line.index('coul') + 3])
elif 'lj' in line:
System._sys.lj_correction = float(line[line.index('lj') + 3])
elif 'coul' in line:
System._sys.coulomb_correction = float(line[line.index('coul') + 3])
else:
logger.warn("Unsupported special_bonds in input file.")
def parse_read_data(self, line):
""" """
if len(line) == 2:
self.data_file = os.path.join(self.basepath, line[1])
else:
logger.warn("Unsupported read_data arguments in input file.")
def parse_fix(self, line):
""" """
if len(line) > 3 and line[3] == 'shake':
self.parse_fix_shake(line)
def parse_fix_shake(self, line):
""" """
if line[2] != 'all':
logger.warn("Unsupported group-ID in fix shake command")
return
if 'mol' in line:
logger.warn("Unsupported keyword 'mol' in fix shake command")
return
self.shake_bond_types_i = set()
self.shake_angle_types_i = set()
self.shake_masses = set()
line = line[7:]
for field in line:
if field == 't':
logger.warn("SHAKE 't' (atom type) constraints not yet supported: fix shake ignored.")
return
elif field == 'b':
container = self.shake_bond_types_i
str2num = int
elif field == 'a':
container = self.shake_angle_types_i
str2num = int
elif field == 'm':
container = self.shake_masses
str2num = float
else:
container.add(str2num(field))
self.shake = True
def parse_box(self, line, dim):
"""Read box information from data file.
Args:
line (str): Current line in input file.
dim (int): Dimension specified in line.
"""
fields = [float(field) for field in line[:2]]
box_length = fields[1] - fields[0]
if box_length > 0:
self.box_vector[dim, dim] = box_length
else:
raise ValueError("Negative box length specified in data file.")
System._sys.box_vector = self.box_vector * self.DIST
def parse_masses(self, data_lines):
"""Read masses from data file."""
next(data_lines) # toss out blank line
self.mass_dict = dict()
for line in data_lines:
if not line.strip():
break # found another blank line
fields = line.partition('#')[0].split()
self.mass_dict[int(fields[0])] = float(fields[1]) * self.MASS
def parse_pair_coeffs(self, data_lines):
"""Read pair coefficients from data file."""
next(data_lines) # toss out blank line
self.nb_types = dict()
for line in data_lines:
if not line.strip():
break # found another blank line
fields = [float(field) for field in line.partition('#')[0].split()]
if len(self.pair_style) == 1:
# TODO: lookup of type of pairstyle to determine format
if System._sys.nonbonded_function == 1:
self.nb_types[int(fields[0])] = [fields[1] * self.ENERGY,
fields[2] * self.DIST]
else:
logger.warn("Unsupported pair coeff formatting in data file!")
else:
logger.warn("Unsupported pair coeff formatting in data file!")
def parse_bond_coeffs(self, data_lines):
"""Read bond coefficients from data file."""
next(data_lines) # toss out blank line
self.bond_types = dict()
for line in data_lines:
if not line.strip():
break # found another blank line
fields = line.partition('#')[0].split()
if self.hybrid_bond_style:
style = fields[1]
if style not in self.bond_style:
raise Exception("Bond type found in Bond Coeffs that "
"was not specified in bond_style: {0}".format(style))
coeffs = fields[2:]
else:
style = self.bond_style[0]
coeffs = fields[1:]
if style == 'harmonic':
self.bond_types[int(fields[0])] = [
style,
2 * float(coeffs[0]) * self.ENERGY / (self.DIST*self.DIST),
float(coeffs[1]) * self.DIST]
elif style == 'morse':
self.bond_types[int(fields[0])] = [
style,
float(coeffs[0]) * self.ENERGY,
float(coeffs[1]) * self.DIST**(-1),
float(coeffs[2]) * self.DIST]
else:
logger.warn("Unsupported bond style: {0}".format(style))
def parse_angle_coeffs(self, data_lines):
"""Read angle coefficients from data file."""
next(data_lines) # toss out blank line
self.angle_types = dict()
for line in data_lines:
if not line.strip():
break # found another blank line
fields = line.partition('#')[0].split()
if self.hybrid_angle_style:
style = fields[1]
if style not in self.angle_style:
raise Exception("Angle type found in Angle Coeffs that "
"was not specified in angle_style: {0}".format(style))
coeffs = fields[2:]
else:
style = self.angle_style[0]
coeffs = fields[1:]
if style == 'harmonic':
self.angle_types[int(fields[0])] = [
style,
2 * float(coeffs[0]) * self.ENERGY / self.RAD**2,
float(coeffs[1]) * self.DEGREE]
else:
logger.warn("Unsupported angle style: {0}".format(style))
def parse_dihedral_coeffs(self, data_lines):
"""Read dihedral coefficients from data file."""
next(data_lines) # toss out blank line
self.dihedral_types = dict()
for line in data_lines:
if not line.strip():
break # found another blank line
fields = line.partition('#')[0].split()
if self.hybrid_dihedral_style:
style = fields[1]
if style not in self.dihedral_style:
raise Exception("Dihedral type found in Dihedral Coeffs that "
"was not specified in dihedral_style: {0}".format(style))
coeffs = fields[2:]
else:
style = self.dihedral_style[0]
coeffs = fields[1:]
if style == 'opls':
self.dihedral_types[int(fields[0])] = [
style,
float(coeffs[0]) * self.ENERGY,
float(coeffs[1]) * self.ENERGY,
float(coeffs[2]) * self.ENERGY,
float(coeffs[3]) * self.ENERGY]
elif style == 'multi/harmonic':
self.dihedral_types[int(fields[0])] = [
style,
float(coeffs[0]) * self.ENERGY,
float(coeffs[1]) * self.ENERGY,
float(coeffs[2]) * self.ENERGY,
float(coeffs[3]) * self.ENERGY,
float(coeffs[4]) * self.ENERGY]
elif style == 'fourier':
self.dihedral_types[int(fields[0])] = [style, int(coeffs[0])]
for i in range(int(coeffs[0])):
self.dihedral_types[int(fields[0])] += [
float(coeffs[i*3+1]) * self.ENERGY,
int( coeffs[i*3+2]),
float(coeffs[i*3+3]) * self.DEGREE]
else:
logger.warn("Unsupported dihedral style: {0}".format(style))
def parse_improper_coeffs(self, data_lines):
"""Read improper coefficients from data file."""
next(data_lines) # toss out blank line
self.improper_types = dict()
for line in data_lines:
if not line.strip():
break # found another blank line
fields = line.partition('#')[0].split()
if self.hybrid_improper_style:
style = fields[1]
if style not in self.improper_style:
raise Exception("Improper type found in Improper Coeffs that "
"was not specified in improper_style: {0}".format(style))
coeffs = fields[2:]
else:
style = self.improper_style[0]
coeffs = fields[1:]
if style == 'harmonic':
self.improper_types[int(fields[0])] = [
style,
float(coeffs[0]) * self.ENERGY / self.RAD**2,
float(coeffs[1]) * self.DEGREE]
if style == 'cvff': # E = K * (1 + d*cos(n*phi))
self.improper_types[int(fields[0])] = [
style,
float(coeffs[0]) * self.ENERGY, # K
int(coeffs[1]), # d
int(coeffs[2])] # n
else:
logger.warn("Unsupported improper style: {0}".format(style))
def parse_atoms(self, data_lines):
"""Read atoms from data file."""
molecules = OrderedDict()
next(data_lines) # toss out blank line
for line in data_lines:
if not line.strip():
break # found another blank line
fields = line.partition('#')[0].split()
if len(fields) in [7, 10]:
if len(fields) == 10:
# TODO: store image flags?
pass
new_atom_type = None
bondtype = atomtype = 'lmp_{:03d}'.format(int(fields[2]))
if System._sys.combination_rule == 1:
logger.warn("Combination rule '1' not yet implemented")
elif System._sys.combination_rule in [2, 3]:
new_atom_type = AtomCR23Type(atomtype, bondtype,
-1, # atomic_number
self.mass_dict[int(fields[2])],
0 * self.CHARGE, # charge (0 for atomtype)
'A', # ptype
self.nb_types[int(fields[2])][1], # sigma
self.nb_types[int(fields[2])][0]) # epsilon
System._sys._atomtypes.add(new_atom_type)
atom = Atom(int(fields[0]),
'', # atom name (set below)
1) # residue index
atom.setAtomType(0, atomtype)
atom.cgnr = 1
# TODO: Set cg nr 'automatically' in gromacs_topology_parser.
# See, e.g., topogromacs.tcl in VMD topotools.
atom.setCharge(0, float(fields[3]) * self.CHARGE)
atom.setMass(0, self.mass_dict[int(fields[2])])
atom.setPosition(float(fields[4]) * self.DIST,
float(fields[5]) * self.DIST,
float(fields[6]) * self.DIST)
for ab_state, atom_type in enumerate(atom._atomtype):
# Searching for a matching atom_type
temp = AbstractAtomType(atom._atomtype[ab_state])
atom_type = System._sys._atomtypes.get(temp)
if atom_type:
atom.setSigma(ab_state, atom_type.sigma)
atom.setEpsilon(ab_state, atom_type.epsilon)
atom.bondtype = atom_type.bondtype
else:
logger.warn("Corresponding AtomType was not found. "
"Insert missing values yourself.")
if int(fields[1]) not in molecules:
molecules[int(fields[1])] = Molecule()
molecules[int(fields[1])].addAtom(atom)
# Add molecules to system
atomtype_list_old = []
moleculetype_i = 0
mol_name = None
self.nr = dict() # atom index => index within moleculetype (i.e. nr)
self.mol_type = dict() # atom index => MoleculeType
for molecule in molecules.values():
molecule.getAtoms().list.sort()
atomtype_list = [atom.getAtomType(0) for atom in molecule.getAtoms()]
if atomtype_list != atomtype_list_old: # new moleculetype
moleculetype_i += 1
mol_name = 'moleculetype{:02d}'.format(moleculetype_i)
atomtype_list_old = atomtype_list
molecule.name = mol_name
System._sys.add_molecule(molecule)
# TODO: Move this elsewhere and determine nrexcl from special_bonds
System._sys._molecules[molecule.name].nrexcl = 3
for i, atom in enumerate(molecule.getAtoms(), start=1):
self.nr[atom.index] = i
self.mol_type[atom.index] = System._sys._molecules[molecule.name]
atom.residue_name = 'R{:02d}'.format(moleculetype_i)
atom.name = 'A{:x}'.format(i)
def parse_bonds(self, data_lines):
"""Read bonds from data file."""
self.onetwo = [[] for i in range(self.natoms + 1)] # 1-2 neighbors
next(data_lines) # toss out blank line
for line in data_lines:
if not line.strip():
break # found another blank line
fields = [int(field) for field in line.partition('#')[0].split()]
new_bond_force = None
coeff_num = fields[1]
ai = self.nr[fields[2]]
aj = self.nr[fields[3]]
if self.shake and coeff_num in self.shake_bond_types_i:
constrained = True
else:
constrained = False
# Bond
if self.bond_types[coeff_num][0] == 'harmonic':
r = self.bond_types[coeff_num][2]
k = self.bond_types[coeff_num][1]
new_bond_force = Bond(ai, aj, r, k, c=constrained)
# Morse
elif self.bond_types[coeff_num][0] == 'morse':
r = self.bond_types[coeff_num][3]
D = self.bond_types[coeff_num][1]
beta = self.bond_types[coeff_num][2]
new_bond_force = MorseBond(ai, aj, r, D, beta)
self.current_mol_type = self.mol_type[fields[2]]
if (self.mol_type[fields[3]] is not self.current_mol_type):
raise Exception("Attempted to define bond between atoms {0:d}, {1:d} across different molecules.".format(fields[2], fields[3]))
self.current_mol_type.bondForceSet.add(new_bond_force)
# Keep track of 1st neighbors
self.onetwo[fields[2]].append(fields[3])
self.onetwo[fields[3]].append(fields[2])
def parse_angles(self, data_lines):
"""Read angles from data file."""
next(data_lines) # toss out blank line
for line in data_lines:
if not line.strip():
break # found another blank line
fields = [int(field) for field in line.partition('#')[0].split()]
new_angle_force = None
coeff_num = fields[1]
ai = self.nr[fields[2]]
aj = self.nr[fields[3]]
ak = self.nr[fields[4]]
if self.shake and coeff_num in self.shake_angle_types_i:
constrained = True
else:
constrained = False
# Angle
if self.angle_types[coeff_num][0] == 'harmonic':
theta = self.angle_types[coeff_num][2]
k = self.angle_types[coeff_num][1]
new_angle_force = Angle(ai, aj, ak, theta, k, c=constrained)
self.current_mol_type = self.mol_type[fields[2]]
if ((self.mol_type[fields[3]] is not self.current_mol_type) or
(self.mol_type[fields[4]] is not self.current_mol_type)):
raise Exception("Attempted to define angle between atoms {0:d}, {1:d}, {2:d} across different molecules.".format(fields[2], fields[3], fields[4]))
self.current_mol_type.angleForceSet.add(new_angle_force)
def parse_dihedrals(self, data_lines):
"""Read dihedrals from data file."""
next(data_lines) # toss out blank line
for line in data_lines:
if not line.strip():
break # found another blank line
fields = [int(field) for field in line.partition('#')[0].split()]
new_dihed_force = None
coeff_num = fields[1]
ai = self.nr[fields[2]]
aj = self.nr[fields[3]]
ak = self.nr[fields[4]]
al = self.nr[fields[5]]
if self.dihedral_types[coeff_num][0] == 'opls':
fc = ConvertDihedralFromFourierToDihedralTrig(
self.dihedral_types[coeff_num][1],
self.dihedral_types[coeff_num][2],
self.dihedral_types[coeff_num][3],
self.dihedral_types[coeff_num][4])
new_dihed_force = DihedralTrigDihedral(
ai, aj, ak, al,
0 * self.DEGREE, *fc)
elif self.dihedral_types[coeff_num][0] == 'multi/harmonic':
fc = ConvertDihedralFromRBToDihedralTrig(
self.dihedral_types[coeff_num][1],
-self.dihedral_types[coeff_num][2],
self.dihedral_types[coeff_num][3],
-self.dihedral_types[coeff_num][4],
self.dihedral_types[coeff_num][5],
0 * self.ENERGY,
0 * self.ENERGY)
new_dihed_force = DihedralTrigDihedral(
ai, aj, ak, al,
0 * self.DEGREE, *fc)
elif self.dihedral_types[coeff_num][0] == 'fourier':
fc = ConvertDihedralFromProperDihedralToDihedralTrig(
self.dihedral_types[coeff_num][2], # K1
self.dihedral_types[coeff_num][3]) # n1
new_dihed_force = DihedralTrigDihedral(
ai, aj, ak, al,
self.dihedral_types[coeff_num][4], # d1 (phase)
*fc)
for i in range(1, self.dihedral_types[coeff_num][1]):
fc = ConvertDihedralFromProperDihedralToDihedralTrig(
self.dihedral_types[coeff_num][3*i+2], # K[i+1]
self.dihedral_types[coeff_num][3*i+3]) # n[i+1]
addterms = DihedralTrigDihedral(
0, 0, 0, 0,
self.dihedral_types[coeff_num][3*i+4], # d[i+1]
*fc)
new_dihed_force.sum_parameters(addterms)
self.current_mol_type = self.mol_type[fields[2]]
if ((self.mol_type[fields[3]] is not self.current_mol_type) or
(self.mol_type[fields[4]] is not self.current_mol_type) or
(self.mol_type[fields[5]] is not self.current_mol_type)):
raise Exception("Attempted to define dihedral between atoms {0:d}, {1:d}, {2:d}, {3:d} across different molecules.".format(fields[2], fields[3], fields[4], fields[5]))
self.current_mol_type.dihedralForceSet.add(new_dihed_force)
def parse_impropers(self, data_lines):
"""Read impropers from data file."""
next(data_lines) # toss out blank line
for line in data_lines:
if not line.strip():
break # found another blank line
fields = [int(field) for field in line.partition('#')[0].split()]
new_dihed_force = None
coeff_num = fields[1]
ai = self.nr[fields[2]]
aj = self.nr[fields[3]]
ak = self.nr[fields[4]]
al = self.nr[fields[5]]
if self.improper_types[coeff_num][0] == 'harmonic':
k = self.improper_types[coeff_num][1]
xi = self.improper_types[coeff_num][2]
new_dihed_force = ImproperHarmonicDihedral(ai, aj, ak, al, xi, k)
elif self.improper_types[coeff_num][0] == 'cvff':
k = self.improper_types[coeff_num][1]
d = self.improper_types[coeff_num][2]
if d == 1:
phi = 0. * self.DEGREE
elif d == -1:
phi = 180. * self.DEGREE
else:
raise ValueError('Invalid coefficient d in cvff improper type {0:d}'.format(coeff_num))
multiplicity = self.improper_types[coeff_num][3]
fc = ConvertDihedralFromProperDihedralToDihedralTrig(
k, multiplicity)
new_dihed_force = DihedralTrigDihedral(
ai, aj, ak, al,
phi, *fc, improper=True)
self.current_mol_type = self.mol_type[fields[2]]
if ((self.mol_type[fields[3]] is not self.current_mol_type) or
(self.mol_type[fields[4]] is not self.current_mol_type) or
(self.mol_type[fields[5]] is not self.current_mol_type)):
raise Exception("Attempted to define improper dihedral between atoms {0:d}, {1:d}, {2:d}, {3:d} across different molecules.".format(fields[2], fields[3], fields[4], fields[5]))
self.current_mol_type.dihedralForceSet.add(new_dihed_force)
def write(self, data_file, unit_set='real', verbose=False):
"""Writes a LAMMPS data and corresponding input file.
Args:
data_file (str): Name of LAMMPS data file to write to.
unit_set (str): LAMMPS unit set for output file.
"""
self.RAD = units.radians
self.DEGREE = units.degrees
if unit_set == 'real':
self.DIST = units.angstroms
self.VEL = units.angstroms / units.femtosecond
self.ENERGY = units.kilocalorie / units.mole
self.MASS = units.grams / units.mole
self.CHARGE = units.elementary_charge
self.MOLE = units.mole
else:
raise Exception("Unsupported unit set specified: {0}".format(unit_set))
# Containers for lines which are ultimately written to output files.
mass_list = list()
mass_list.append('\n')
mass_list.append('Masses\n')
mass_list.append('\n')
pair_coeff_list = list()
pair_coeff_list.append('\n')
pair_coeff_list.append('Pair Coeffs\n')
pair_coeff_list.append('\n')
bond_coeffs = list()
bond_coeffs.append('\n')
bond_coeffs.append('Bond Coeffs\n')
bond_coeffs.append('\n')
angle_coeffs = list()
angle_coeffs.append('\n')
angle_coeffs.append('Angle Coeffs\n')
angle_coeffs.append('\n')
dihedral_coeffs = list()
dihedral_coeffs.append('\n')
dihedral_coeffs.append('Dihedral Coeffs\n')
dihedral_coeffs.append('\n')
improper_coeffs = list()
improper_coeffs.append('\n')
improper_coeffs.append('Improper Coeffs\n')
improper_coeffs.append('\n')
atom_list = list()
atom_list.append('\n')
atom_list.append('Atoms\n')
atom_list.append('\n')
vel_list = list()
vel_list.append('\n')
vel_list.append('Velocities\n')
vel_list.append('\n')
bond_list = list()
bond_list.append('\n')
bond_list.append('Bonds\n')
bond_list.append('\n')
angle_list = list()
angle_list.append('\n')
angle_list.append('Angles\n')
angle_list.append('\n')
dihedral_list = list()
dihedral_list.append('\n')
dihedral_list.append('Dihedrals\n')
dihedral_list.append('\n')
improper_list = list()
improper_list.append('\n')
improper_list.append('Impropers\n')
improper_list.append('\n')
# dicts for type information
atom_type_dict = dict() # str_type:int_type
a_type_i = 1 # counter for atom types
bond_style = set()
bond_type_dict = dict() # typeObject:int_type
b_type_i = 1 # counter for bond types
angle_style = set()
angle_type_dict = dict() # typeObject:int_type
ang_type_i = 1
dihedral_style = set()
dihedral_type_dict = dict() # typeObject:int_type
dih_type_i = 1
improper_style = set()
improper_type_dict = dict() # typeObject:int_type
imp_type_i = 1
# read all atom specific and FF information
offset = 0
bond_i = 1
angle_i = 1
dihedral_i = 1
improper_i = 1
shake_bond_types = set()
shake_angle_types = set()
x_min = y_min = z_min = np.inf
for mol_type in System._sys._molecules.itervalues():
logger.debug(" Writing moleculetype {0}...".format(mol_type.name))
# Settles (for rigid water in GROMACS)
# We'll convert these to SHAKE constraints in the input script.
if mol_type.settles:
for bond in mol_type.bondForceSet.itervalues():
shake_bond_types.add(BondType(
mol_type.moleculeSet[0]._atoms[bond.atom1 - 1].bondtype,
mol_type.moleculeSet[0]._atoms[bond.atom2 - 1].bondtype,
bond.length,
bond.k))
for angle in mol_type.angleForceSet.itervalues():
shake_angle_types.add(AngleType(
mol_type.moleculeSet[0]._atoms[angle.atom1 - 1].bondtype,
mol_type.moleculeSet[0]._atoms[angle.atom2 - 1].bondtype,
mol_type.moleculeSet[0]._atoms[angle.atom3 - 1].bondtype,
angle.theta,
angle.k))
# molecule = mol_type.moleculeSet[0]
# atoms = molecule._atoms
# for i, offset in enumerate(offsets):
for molecule in mol_type.moleculeSet:
# bonds
logger.debug(" Writing bonds...")
for bond in mol_type.bondForceSet.itervalues():
atomtype1 = molecule._atoms[bond.atom1 - 1].bondtype
atomtype2 = molecule._atoms[bond.atom2 - 1].bondtype
if isinstance(bond, Bond):
style = 'harmonic'
temp = BondType(atomtype1, atomtype2,
bond.length, bond.k)
# NOTE: k includes the factor of 0.5 for harmonic in LAMMPS
if temp not in bond_type_dict:
bond_type_dict[temp] = b_type_i
bond_coeffs.append('{0:d} {1} {2:18.8f} {3:18.8f} # {4:2s}-{5:2s}\n'.format(
b_type_i,
style,
0.5 * bond.k.in_units_of(self.ENERGY / (self.DIST*self.DIST))._value,
bond.length.in_units_of(self.DIST)._value,
atomtype1, atomtype2))
b_type_i += 1
elif isinstance(bond, MorseBond):
style = 'morse'
temp = MorseBondType(atomtype1, atomtype2,
bond.length, bond.D, bond.beta)
if temp not in bond_type_dict:
bond_type_dict[temp] = b_type_i
bond_coeffs.append('{0:d} {1} {2:18.8f} {3:18.8f} {4:18.8f}\n'.format(
b_type_i,
style,
bond.D.in_units_of(self.ENERGY)._value,
bond.beta.in_units_of(self.DIST**(-1))._value,
bond.length.in_units_of(self.DIST)._value))
b_type_i += 1
else:
logger.warn("Found unimplemented bond type for LAMMPS!")
continue
bond_list.append('{0:-6d} {1:6d} {2:6d} {3:6d}\n'.format(
bond_i,
bond_type_dict[temp],
bond.atom1 + offset,
bond.atom2 + offset))
bond_i += 1
bond_style.add(style)
if len(bond_style) > 1:
logger.warn("More than one bond style found!")
# angles
logger.debug(" Writing angles...")
for angle in mol_type.angleForceSet.itervalues():
atomtype1 = molecule._atoms[angle.atom1 - 1].bondtype
atomtype2 = molecule._atoms[angle.atom2 - 1].bondtype
atomtype3 = molecule._atoms[angle.atom3 - 1].bondtype
if isinstance(angle, Angle):
style = 'harmonic'
temp = AngleType(atomtype1, atomtype2, atomtype3,
angle.theta, angle.k)
# NOTE: k includes the factor of 0.5 for harmonic in LAMMPS
if temp not in angle_type_dict:
angle_type_dict[temp] = ang_type_i
angle_coeffs.append('{0:d} {1} {2:18.8f} {3:18.8f} # {4:2s}-{5:2s}-{6:2s}\n'.format(
ang_type_i,
style,
0.5 * angle.k.in_units_of(self.ENERGY / self.RAD**2)._value,
angle.theta.in_units_of(self.DEGREE)._value,
atomtype1, atomtype2, atomtype3))
ang_type_i += 1
elif isinstance(angle, UreyBradleyAngle):
style = 'charmm'
temp = UreyBradleyAngleType(atomtype1, atomtype2, atomtype3,
angle.theta, angle.k, angle.r, angle.kUB)
# NOTE: k includes the factor of 0.5 for harmonic in LAMMPS
if temp not in angle_type_dict:
angle_type_dict[temp] = ang_type_i
angle_coeffs.append('{0:d} {1} {2:18.8f} {3:18.8f} {4:18.8f} {5:18.8f}\n'.format(
ang_type_i,
style,
0.5 * angle.k.in_units_of(self.ENERGY / self.RAD**2)._value,
angle.theta.in_units_of(self.DEGREE)._value,
0.5 * angle.kUB.in_units_of(self.ENERGY / self.DIST**2)._value,
angle.r.in_units_of(self.DIST)._value))
ang_type_i += 1
elif isinstance(angle, G96Angle):
style = 'cosine/squared'
temp = G96AngleType(atomtype1, atomtype2, atomtype3,
angle.theta, angle.k)
# NOTE: k includes the factor of 0.5 for harmonic in LAMMPS
if temp not in angle_type_dict:
angle_type_dict[temp] = ang_type_i
angle_coeffs.append('{0:d} {1} {2:18.8f} {3:18.8f}\n'.format(
ang_type_i,
style,
0.5 * angle.k.in_units_of(self.ENERGY)._value,
angle.theta.in_units_of(self.DEGREE)._value))
ang_type_i += 1
else:
logger.warn("Found unimplemented angle type for LAMMPS!")
continue
angle_list.append('{0:-6d} {1:6d} {2:6d} {3:6d} {4:6d}\n'.format(
angle_i,
angle_type_dict[temp],
angle.atom1 + offset,
angle.atom2 + offset,
angle.atom3 + offset))
angle_i += 1
angle_style.add(style)
if len(angle_style) > 1:
logger.warn("More than one angle style found!")
# dihedrals
logger.debug(" Writing dihedrals...")
for dihedral in mol_type.dihedralForceSet.itervalues():
atomtype1 = molecule._atoms[dihedral.atom1 - 1].bondtype
atomtype2 = molecule._atoms[dihedral.atom2 - 1].bondtype
atomtype3 = molecule._atoms[dihedral.atom3 - 1].bondtype
atomtype4 = molecule._atoms[dihedral.atom4 - 1].bondtype
if isinstance(dihedral, DihedralTrigDihedral):
coefficients = [dihedral.fc1, dihedral.fc2, dihedral.fc3,
dihedral.fc4, dihedral.fc5, dihedral.fc6]
if dihedral.improper:
found_nonzero = False
for n, coeff in enumerate(coefficients):
if coeff._value != 0.0:
if found_nonzero == False:
found_nonzero = True
else:
raise ValueError("Found more than one nonzero "
"coefficient in improper trigonal dihedral!")
style = 'charmm'
temp = ProperPeriodicDihedralType(atomtype1, atomtype2,
atomtype3, atomtype4,
dihedral.phi, coeff, n + 1)
if temp not in dihedral_type_dict:
dihedral_type_dict[temp] = dih_type_i
# NOTE: weighting factor assumed to be 0.0
# May need to add some additional checks here in the future
dihedral_coeffs.append('{0:d} {1} {2:18.8f} {3:18d} '
'{4:18d} {5:18.4f}\n'.format(
dih_type_i, style,
coeff.in_units_of(self.ENERGY)._value,
n + 1,
int(dihedral.phi.in_units_of(units.degrees)._value),
0.0))
dih_type_i += 1
dihedral_list.append('{0:-6d} {1:6d} {2:6d} {3:6d} {4:6d} {5:6d}\n'.format(
dihedral_i,
dihedral_type_dict[temp],
dihedral.atom1 + offset,
dihedral.atom2 + offset,
dihedral.atom3 + offset,
dihedral.atom4 + offset))
dihedral_i += 1
dihedral_style.add(style)
else:
# NOTE: the following logic could instead default to printing
# out a series of charmm style dihedrals instead of attempting
# to write a multi/harmonic. I presume one multi/harmonic vs.
# multiple charmm dihedrals may be slightly (but probably
# negligibly) faster but if anyone has a better reason to do
# one or the other, please chime in!
rb_coeffs = ConvertDihedralFromDihedralTrigToRB(
np.cos(dihedral.phi.in_units_of(units.radians)._value),
dihedral.phi, dihedral.fc0, *coefficients)
# LAMMPS only supports multi/harmonic (Ryckaert-Bellemans)
# up to 5 coefficients.
if (dihedral.phi in [0*units.degrees, 180*units.degrees] and
rb_coeffs[5]._value == rb_coeffs[6]._value == 0.0):
style = 'multi/harmonic'
temp = RBDihedralType(atomtype1, atomtype2,
atomtype3, atomtype4,
rb_coeffs[0],
rb_coeffs[1],
rb_coeffs[2],
rb_coeffs[3],
rb_coeffs[4],
0.0*units.kilojoules_per_mole,
0.0*units.kilojoules_per_mole)
if temp not in dihedral_type_dict:
dihedral_type_dict[temp] = dih_type_i
# multiple alternating powers by -1 for sign convention
dihedral_coeffs.append('{0:d} {1} {2:18.8f} {3:18.8f} '
'{4:18.8f} {5:18.8f} {6:18.8f}\n'.format(
dih_type_i, style,
rb_coeffs[0].in_units_of(self.ENERGY)._value,
-rb_coeffs[1].in_units_of(self.ENERGY)._value,
rb_coeffs[2].in_units_of(self.ENERGY)._value,
-rb_coeffs[3].in_units_of(self.ENERGY)._value,
rb_coeffs[4].in_units_of(self.ENERGY)._value))
dih_type_i += 1
dihedral_list.append('{0:-6d} {1:6d} {2:6d} {3:6d} {4:6d} {5:6d}\n'.format(
dihedral_i,
dihedral_type_dict[temp],
dihedral.atom1 + offset,
dihedral.atom2 + offset,
dihedral.atom3 + offset,
dihedral.atom4 + offset))
dihedral_i += 1
dihedral_style.add(style)
# If the 6th and/or 7th coefficients are non-zero, we decompose
# the dihedral into multiple CHARMM style dihedrals.
else:
logger.warn("Found unsupported dihedral style.")
continue
"""
for n, coeff in enumerate(coefficients):
style = 'charmm'
temp = ProperPeriodicDihedralType(atomtype1, atomtype2,
atomtype3, atomtype4,
dihedral.phi, coeff, n + 1)
if temp not in dihedral_type_dict:
dihedral_type_dict[temp] = dih_type_i
# NOTE: weighting factor assumed to be 0.0
# May need to add some additional checks here in the future
dihedral_coeffs.append('{0:d} {1} {2:18.8f} {3:18d} '
'{4:18d} {5:18.4f}\n'.format(
dih_type_i, style,
coeff.in_units_of(self.ENERGY)._value,
n + 1,
int(dihedral.phi.in_units_of(units.degrees)._value),
0.0))
dih_type_i += 1
dihedral_list.append('{0:-6d} {1:6d} {2:6d} {3:6d} {4:6d} {5:6d}\n'.format(
i + j + 1,
dihedral_type_dict[temp],
dihedral.atom1 + offset,
dihedral.atom2 + offset,
dihedral.atom3 + offset,
dihedral.atom4 + offset))
dihedral_style.add(style)
"""
elif isinstance(dihedral, ImproperHarmonicDihedral):
stlye = 'harmonic'
temp = ImproperHarmonicDihedralType(atomtype1, atomtype2,
atomtype3, atomtype4, dihedral.xi, dihedral.k)
if temp not in improper_type_dict:
improper_type_dict[temp] = imp_type_i
# NOTE: k includes the factor of 0.5 for harmonic in LAMMPS
improper_coeffs.append('{0:d} {1} {2:18.8f} {3:18.8f}\n'.format(
imp_type_i, style,
0.5 * dihedral.k.in_units_of(self.ENERGY / self.RAD**2)._value,
dihedral.xi.in_units_of(self.DEGREE)._value))
imp_type_i += 1
improper_list.append('{0:-6d} {1:6d} {2:6d} {3:6d} {4:6d} {5:6d}\n'.format(
improper_i,
improper_type_dict[temp],
dihedral.atom1 + offset,
dihedral.atom2 + offset,
dihedral.atom3 + offset,
dihedral.atom4 + offset))
improper_i += 1
improper_style.add(style)
else:
raise Exception("InterMol expects all internally stored"
" dihedrals to be of types ImproperHarmonic"
" or DihedralTrig.")
if len(dihedral_style) > 1:
logger.warn("More than one dihedral style found!")
if len(improper_style) > 1:
logger.warn("More than one improper style found!")
# atom specific information
logger.debug(" Writing atoms...")
for atom in molecule._atoms:
# type, mass and pair coeffs
if atom._atomtype[0] not in atom_type_dict:
atom_type_dict[atom._atomtype[0]] = a_type_i
mass_list.append('%d %8.4f # %s\n'
% (a_type_i,
atom._mass[0].in_units_of(self.MASS)._value,
atom.bondtype))
pair_coeff_list.append('{0:d} {1:10.6f} {2:10.6f} # {3:s}\n'.format(
a_type_i,
atom._epsilon[0].in_units_of(self.ENERGY)._value,
atom._sigma[0].in_units_of(self.DIST)._value,
atom.bondtype))
a_type_i += 1
# box minima
x_coord = atom._position[0].in_units_of(self.DIST)._value
y_coord = atom._position[1].in_units_of(self.DIST)._value
z_coord = atom._position[2].in_units_of(self.DIST)._value
if x_coord < x_min:
x_min = x_coord
if y_coord < y_min:
y_min = y_coord
if z_coord < z_min:
z_min = z_coord
# atom
atom_list.append('{0:-6d} {1:-6d} {2:-6d} {3:5.8f} {4:8.5f} {5:8.5f} {6:8.5f}\n'.format(
atom.index + offset,
atom.residue_index,
atom_type_dict[atom._atomtype[0]],
atom._charge[0].in_units_of(self.CHARGE)._value,
x_coord,
y_coord,
z_coord))
# velocity
vel_list.append('{0:-6d} {1:8.4f} {2:8.4f} {3:8.4f}\n'.format(
atom.index + offset,
atom._velocity[0].in_units_of(self.VEL)._value,
atom._velocity[1].in_units_of(self.VEL)._value,
atom._velocity[2].in_units_of(self.VEL)._value))
offset += len(molecule._atoms)
# Write the actual data file.
with open(data_file, 'w') as f:
# front matter
f.write(System._sys._name + '\n')
f.write('\n')
n_atoms = len(atom_list) - 3
n_bonds = len(bond_list) - 3
n_angles = len(angle_list) - 3
n_dihedrals = len(dihedral_list) - 3
n_impropers = len(improper_list) - 3
n_atom_types = len(pair_coeff_list) - 3
n_bond_types = len(bond_coeffs) - 3
n_angle_types = len(angle_coeffs) - 3
n_dihedral_types = len(dihedral_coeffs) - 3
n_improper_types = len(improper_coeffs) - 3
f.write('{0} atoms\n'.format(n_atoms))
f.write('{0} bonds\n'.format(n_bonds))
f.write('{0} angles\n'.format(n_angles))
f.write('{0} dihedrals\n'.format(n_dihedrals))
f.write('{0} impropers\n'.format(n_impropers))
f.write('\n')
f.write('{0} atom types\n'.format(n_atom_types))
if n_bond_types > 0:
f.write('{0} bond types\n'.format(n_bond_types))
if n_angle_types > 0:
f.write('{0} angle types\n'.format(n_angle_types))
if n_dihedral_types > 0:
f.write('{0} dihedral types\n'.format(n_dihedral_types))
if n_improper_types > 0:
f.write('{0} improper types\n'.format(n_improper_types))
f.write('\n')
# shifting of box dimensions
f.write('{0:10.6f} {1:10.6f} xlo xhi\n'.format(
x_min,
x_min + System._sys.box_vector[0][0].in_units_of(self.DIST)._value))
f.write('{0:10.6f} {1:10.6f} ylo yhi\n'.format(
y_min,
y_min + System._sys.box_vector[1][1].in_units_of(self.DIST)._value))
f.write('{0:10.6f} {1:10.6f} zlo zhi\n'.format(
z_min,
z_min + System._sys.box_vector[2][2].in_units_of(self.DIST)._value))
# masses
for mass in mass_list:
f.write(mass)
# forcefield coefficients
if len(pair_coeff_list) > 3:
for pair in pair_coeff_list:
f.write(pair)
if len(bond_coeffs) > 3:
for bond in bond_coeffs:
f.write(bond)
if len(angle_coeffs) > 3:
for angle in angle_coeffs:
f.write(angle)
if len(dihedral_coeffs) > 3:
for dihedral in dihedral_coeffs:
f.write(dihedral)
if len(improper_coeffs) > 3:
for improper in improper_coeffs:
f.write(improper)
# atoms and velocities
for atom in atom_list:
f.write(atom)
for vel in vel_list:
f.write(vel)
# topology
if len(bond_list) > 3:
for bond in bond_list:
f.write(bond)
if len(angle_list) > 3:
for angle in angle_list:
f.write(angle)
if len(dihedral_list) > 3:
for dihedral in dihedral_list:
f.write(dihedral)
if len(improper_list) > 3:
for improper in improper_list:
f.write(improper)
# Write the corresponding input file.
basename = os.path.splitext(data_file)[0]
input_filename = '{0}.input'.format(basename)
with open(input_filename, 'w') as f:
f.write('units {0}\n'.format(unit_set))
f.write('atom_style full\n') # TODO
f.write('\n')
f.write('dimension 3\n') # TODO
f.write('boundary p p p\n') # TODO
f.write('\n')
# non-bonded
f.write('pair_style lj/cut/coul/long 10.0 10.0\n') # TODO: match mdp
if System._sys.combination_rule == 3:
f.write('pair_modify mix geometric\n')
elif System._sys.combination_rule == 2:
f.write('pair_modify mix arithmetic\n')
else:
logger.warn("Unsupported combination rule: {0}".format(
System._sys.combination_rule))
f.write('kspace_style pppm 1.0e-5\n') # TODO: match mdp
f.write('\n')
# bonded
if len(bond_coeffs) > 3:
f.write('bond_style hybrid {0}\n'.format(
" ".join(bond_style)))
if len(angle_coeffs) > 3:
f.write('angle_style hybrid {0}\n'.format(
" ".join(angle_style)))
if len(dihedral_coeffs) > 3:
f.write('dihedral_style hybrid {0}\n'.format(
" ".join(dihedral_style)))
if len(improper_coeffs) > 3:
f.write('improper_style hybrid {0}\n'.format(
" ".join(improper_style)))
f.write('special_bonds lj {0} {1} {2} coul {3} {4} {5}\n'.format(
0.0,
0.0,
System._sys.lj_correction,
0.0,
0.0,
System._sys.coulomb_correction))
f.write('\n')
# read data
f.write('read_data {0}\n'.format(os.path.basename(data_file)))
f.write('\n')
# output energies
energy_terms = " ".join(['ebond',
'eangle',
'edihed',
'eimp',
'epair',
'evdwl',
'ecoul',
'elong',
'etail',
'pe'])
f.write('thermo_style custom {0}\n'.format(energy_terms))
f.write('\n')
# SHAKE constraints
if len(shake_bond_types) > 0:
f.write('fix fSHAKE all shake 1.0e-4 20 10 b')
for btype in shake_bond_types:
f.write(' {0}'.format(bond_type_dict[btype]))
if len(shake_angle_types) > 0:
f.write(' a')
for atype in shake_angle_types:
f.write(' {0}'.format(angle_type_dict[atype]))
f.write('\n')
f.write('run 0\n')
```
#### File: intermol/types/abstract_angle_type.py
```python
class AbstractAngleType(object):
__slots__ = ['atom1', 'atom2', 'atom3']
def __init__(self, atom1, atom2, atom3):
"""An abstract representation of a generic angle type."""
self.atom1 = atom1
self.atom2 = atom2
self.atom3 = atom3
def __eq__(self, angle_type):
return ((self.atom1 == angle_type.atom1 and
self.atom2 == angle_type.atom2 and
self.atom3 == angle_type.atom3)
or
(self.atom1 == angle_type.atom3 and
self.atom2 == angle_type.atom2 and
self.atom3 == angle_type.atom1))
def __hash__(self):
return hash(tuple([self.atom1, self.atom2, self.atom3]))
```
#### File: intermol/types/abstract_bond_type.py
```python
class AbstractBondType(object):
__slots__ = ['atom1', 'atom2']
def __init__(self, atom1, atom2):
self.atom1 = atom1
self.atom2 = atom2
def __eq__(self, bond_type):
return ((self.atom1 == bond_type.atom1 and
self.atom2 == bond_type.atom2)
or
(self.atom2 == bond_type.atom2 and
self.atom3 == bond_type.atom1))
def __hash__(self):
return hash(tuple([self.atom1, self.atom2]))
```
#### File: intermol/types/fourier_dihedral_type.py
```python
import sys
sys.path.append('..')
from intermol.decorators import *
from abstract_dihedral_type import *
class FourierDihedralType(AbstractDihedralType):
@accepts_compatible_units(None,
None,
None,
None,
units.kilojoules_per_mole,
units.kilojoules_per_mole,
units.kilojoules_per_mole,
units.kilojoules_per_mole)
def __init__(self, atom1, atom2, atom3, atom4, c1, c2, c3, c4):
"""
"""
AbstractDihedralType.__init__(self, atom1, atom2, atom3, atom4)
self.c1 = c1
self.c2 = c2
self.c3 = c3
self.c4 = c4
```
#### File: intermol/unit/unit_operators.py
```python
__author__ = "<NAME>"
__version__ = "0.5"
from unit import Unit, is_unit
from quantity import Quantity, is_quantity
# Attach methods of Unit class that return a Quantity to Unit class.
# I put them here to avoid circular dependence in imports.
# i.e. Quantity depends on Unit, but not vice versa
def _unit_class_rdiv(self, other):
"""
Divide another object type by a Unit.
Returns a new Quantity with a value of other and units
of the inverse of self.
"""
if is_unit(other):
raise NotImplementedError('programmer is surprised __rtruediv__ was called instead of __truediv__')
else:
# print "R scalar / unit"
unit = pow(self, -1.0)
value = other
return Quantity(value, unit).reduce_unit(self)
Unit.__rtruediv__ = _unit_class_rdiv
Unit.__rdiv__ = _unit_class_rdiv
def _unit_class_mul(self, other):
"""Multiply a Unit by an object.
If other is another Unit, returns a new composite Unit.
Exponents of similar dimensions are added. If self and
other share similar BaseDimension, but
with different BaseUnits, the resulting BaseUnit for that
BaseDimension will be that used in self.
If other is a not another Unit, this method returns a
new Quantity... UNLESS other is a Quantity and the resulting
unit is dimensionless, in which case the underlying value type
of the Quantity is returned.
"""
if is_unit(other):
if self in Unit._multiplication_cache:
if other in Unit._multiplication_cache[self]:
return Unit._multiplication_cache[self][other]
else:
Unit._multiplication_cache[self] = {}
# print "unit * unit"
result1 = {} # dictionary of dimensionTuple: (BaseOrScaledUnit, exponent)
for unit, exponent in self.iter_base_or_scaled_units():
d = unit.get_dimension_tuple()
if d not in result1:
result1[d] = {}
assert unit not in result1[d]
result1[d][unit] = exponent
for unit, exponent in other.iter_base_or_scaled_units():
d = unit.get_dimension_tuple()
if d not in result1:
result1[d] = {}
if unit not in result1[d]:
result1[d][unit] = 0
result1[d][unit] += exponent
result2 = {} # stripped of zero exponents
for d in result1:
for unit in result1[d]:
exponent = result1[d][unit]
if exponent != 0:
assert unit not in result2
result2[unit] = exponent
new_unit = Unit(result2)
Unit._multiplication_cache[self][other] = new_unit
return new_unit
elif is_quantity(other):
# print "unit * quantity"
value = other._value
unit = self * other.unit
return Quantity(value, unit).reduce_unit(self)
else:
# print "scalar * unit"
value = other
unit = self
# Is reduce_unit needed here? I hope not, there is a performance issue...
# return Quantity(other, self).reduce_unit(self)
return Quantity(other, self)
Unit.__mul__ = _unit_class_mul
Unit.__rmul__ = Unit.__mul__
Unit._multiplication_cache = {}
# run module directly for testing
if __name__=='__main__':
# Test the examples in the docstrings
import doctest, sys
doctest.testmod(sys.modules[__name__])
```
#### File: InterMol/testing/gromacs_driver.py
```python
import subprocess
from collections import OrderedDict
import sys
import os
import pdb
import logging
import intermol.unit as units
from intermol.gromacs_extension.gromacs_topology_parser import GromacsTopologyParser
import intermol.gromacs_extension.gromacs_structure_parser as GromacsStructureParser
logger = logging.getLogger('InterMolLog')
def readFile(top_in, gro_in, gropath):
# ensure .gro and .top are a valid match
gromacs_energies(top_in, gro_in,
'inputs/Gromacs/grompp.mdp', gropath, '',
grompp_check=True)
logger.info('Reading GROMACS topology {0}'.format(top_in))
GromacsTopologyParser._GroTopParser = GromacsTopologyParser()
GromacsTopologyParser._GroTopParser.parse_topology(top_in)
logger.info('Topology loaded')
logger.info('Reading GROMACS structure {0}'.format(gro_in))
GromacsStructureParser.readStructure(gro_in)
logger.info('Structure loaded')
def writeFile(outtop, outgro):
logger.info('Writing GROMACS file {0}'.format(outgro))
GromacsStructureParser.writeStructure(outgro)
logger.info('Writing GROMACS file {0}'.format(outtop))
if not GromacsTopologyParser._GroTopParser:
GromacsTopologyParser._GroTopParser = GromacsTopologyParser()
GromacsTopologyParser._GroTopParser.write_topology(outtop)
logger.info('Write complete')
def gromacs_energies(top=None, gro=None, mdp=None, gropath='',grosuff='', grompp_check=False):
"""
gropath = path to gromacs binaries
grosuff = suffix of gromacs binaries, usually '' or '_d'
"""
if not grompp_check:
logger.info('Evaluating energy of {0}'.format(gro))
directory, _ = os.path.split(top)
tpr = os.path.join(directory , 'topol.tpr')
ener = os.path.join(directory , 'ener.edr')
ener_xvg = os.path.join(directory , 'energy.xvg')
conf = os.path.join(directory , 'confout.gro')
mdout = os.path.join(directory , 'mdout.mdp')
state = os.path.join(directory , 'state.cpt')
traj = os.path.join(directory , 'traj.trr')
log = os.path.join(directory , 'md.log')
stdout = os.path.join(directory, 'gromacs_stdout.txt')
stderr = os.path.join(directory, 'gromacs_stderr.txt')
grompp_bin = os.path.join(gropath, 'grompp' + grosuff)
mdrun_bin = os.path.join(gropath, 'mdrun' + grosuff)
genergy_bin = os.path.join(gropath, 'g_energy' + grosuff)
# grompp'n it up
cmd = [grompp_bin, '-f', mdp, '-c', gro, '-p', top, '-o', tpr, '-po', mdout, '-maxwarn', '1']
logger.debug('Running GROMACS with command:\n %s' % ' '.join(cmd))
with open(stdout, 'w') as out, open(stderr, 'w') as err:
exit = subprocess.call(cmd, stdout=out, stderr=err)
if exit:
logger.error('grompp failed. See %s' % stderr)
raise Exception('grompp failed for {0}'.format(top))
if grompp_check:
return
# mdrunin'
cmd = [mdrun_bin, '-nt', '1', '-s', tpr, '-o', traj, '-cpo', state, '-c',
conf, '-e', ener, '-g', log]
logger.debug('Running GROMACS with command:\n %s' % ' '.join(cmd))
with open(stdout, 'wa') as out, open(stderr, 'wa') as err:
exit = subprocess.call(cmd, stdout=out, stderr=err)
if exit:
logger.error('mdrun failed. See %s' % stderr)
raise Exception('mdrun failed for {0}'.format(top))
# energizin'
select = " ".join(map(str, range(1, 20))) + " 0 "
cmd = 'echo {select} | {genergy_bin} -f {ener} -o {ener_xvg} -dp'.format(
select=select, genergy_bin=genergy_bin, ener=ener, ener_xvg=ener_xvg)
logger.debug('Running GROMACS with command:\n %s' % cmd)
with open(stdout, 'wa') as out, open(stderr, 'wa') as err:
exit = subprocess.call(cmd, stdout=out, stderr=err, shell=True)
if exit:
logger.error('g_energy failed. See %s' % stderr)
raise Exception('g_energy failed for {0}'.format(top))
# extract g_energy output and parse initial energies
with open(ener_xvg) as f:
all_lines = f.readlines()
types = []
for line in all_lines:
if line[:3] == '@ s':
types.append(line.split('"')[1])
# take last line
data = map(float, all_lines[-1].split()[1:]) # [0] is the time
# give everything units
data = [value * units.kilojoules_per_mole for value in data]
# pack it up in a dictionary
e_out = OrderedDict(zip(types, data))
# discard non-energy terms
unwanted = ['Kinetic En.', 'Total Energy', 'Temperature', 'Pressure',
'Volume', 'Box-X', 'Box-Y', 'Box-atomic_number', 'Pres. DC']
for group in unwanted:
if group in e_out:
del e_out[group]
# dispersive energies - do buckingham energies also get dumped here?
dispersive = ['LJ (SR)', 'LJ-14', 'Disper.corr.']
e_out['Dispersive'] = 0 * units.kilojoules_per_mole
for group in dispersive:
if group in e_out:
e_out['Dispersive'] += e_out[group]
# electrostatic energies
electrostatic = ['Coulomb (SR)', 'Coulomb-14', 'Coul. recip.']
e_out['Electrostatic'] = 0 * units.kilojoules_per_mole
for group in electrostatic:
if group in e_out:
e_out['Electrostatic'] += e_out[group]
e_out['Non-bonded'] = e_out['Electrostatic'] + e_out['Dispersive']
# all the various dihedral energies - what else goes in here?
all_dihedrals = ['Ryckaert-Bell.', 'Proper Dih.', 'Improper Dih.']
e_out['All dihedrals'] = 0 * units.kilojoules_per_mole
for group in all_dihedrals:
if group in e_out:
e_out['All dihedrals'] += e_out[group]
return e_out, ener_xvg
``` |
{
"source": "jpthompson17/msm_we",
"score": 2
} |
#### File: msm_we/tests/test_fpt.py
```python
import numpy as np
import unittest
import msm_we.utils as utils
from msm_we.fpt import MatrixFPT, MarkovFPT
class TestMFPT(unittest.TestCase):
def setUp(self):
n_states = 5
self.T = utils.random_markov_matrix(n_states, seed=1)
def testMarkovFPTMean(self):
markov_mfpts = MarkovFPT.mean_fpts(self.T, [0], [4])
self.assertTrue(np.isclose(markov_mfpts['mfptAB'], 6.420918178038423))
self.assertTrue(np.isclose(markov_mfpts['mfptBA'], 4.920174169581114))
def testMatrixFPTDirectional(self):
directional_mfpt = MatrixFPT.directional_mfpt(self.T, [0], [4], [1])
self.assertTrue(np.isclose(directional_mfpt, 6.420918178038424))
def testMatrixFPT2TargetMicrostate(self):
mfpts_to_micro = MatrixFPT.mfpts_to_target_microstate(self.T, 4)
result = np.array([6.42091818, 5.35994556, 7.24671735, 6.81752892, 0.0])
self.assertTrue(np.allclose(mfpts_to_micro, result))
def testMatrixFPTMatrix(self):
mfpts_matrix = MatrixFPT.mfpts_matrix(self.T)
result = np.array(
[
[0.0, 2.62899481, 7.65074814, 4.37254081, 6.42091818],
[5.58481382, 0.0, 6.21851058, 4.00702426, 5.35994556],
[4.98545579, 3.15239528, 0.0, 3.43921253, 7.24671735],
[4.37802054, 3.33697554, 6.03713191, 0.0, 6.81752892],
[4.92017417, 3.26320798, 7.20000135, 4.36442835, 0.0],
]
)
self.assertTrue(np.allclose(mfpts_matrix, result))
def testMatrixFPTMinCommuteTime(self):
mfpts_matrix = MatrixFPT.mfpts_matrix(self.T)
min_comm_time, a, b = MatrixFPT.min_commute_time(mfpts_matrix)
self.assertTrue(np.isclose(min_comm_time, 7.343999799826479))
self.assertEqual(a, 1)
self.assertEqual(b, 3)
def testMatrixFPTMaxCommuteTime(self):
mfpts_matrix = MatrixFPT.mfpts_matrix(self.T)
max_comm_time, a, b = MatrixFPT.max_commute_time(mfpts_matrix)
self.assertTrue(np.isclose(max_comm_time, 14.446718700939037))
self.assertEqual(a, 2)
self.assertEqual(b, 4)
def testMatrixFPTDistribution(self):
fpt_distribution = MatrixFPT.fpt_distribution(self.T, [0], [4], [0.5], max_n_lags=10)
result = [[ 0., 0. ],
[ 1., 0.11289507],
[ 2., 0.24431041],
[ 3., 0.13232006],
[ 4., 0.11478687],
[ 5., 0.0968178 ],
[ 6., 0.08185738],
[ 7., 0.06886433],
[ 8., 0.05804509],
[ 9., 0.04890027],
[10., 0.04120272]]
self.assertTrue(np.allclose(fpt_distribution, result))
def testMatrixFPTDistributionLog(self):
fpt_distribution = MatrixFPT.fpt_distribution(
self.T, [0], [4], [0.5], max_n_lags=10, clean_recycling=True, logscale=True)
result = [[0.00000000e+000, 0.00000000e+000],
[1.00000000e+001, 1.00000000e+000],
[1.66000000e+002, 3.85697122e-011],
[2.78200000e+003, 1.48701736e-204],
[4.64150000e+004, 0.00000000e+000],
[7.74263000e+005, 0.00000000e+000],
[1.29154960e+007, 0.00000000e+000],
[2.15443469e+008, 0.00000000e+000],
[3.59381366e+009, 0.00000000e+000],
[5.99484250e+010, 0.00000000e+000],
[1.00000000e+012, 0.00000000e+000]]
self.assertTrue(np.allclose(fpt_distribution, result))
``` |
{
"source": "jpthor/digital-cash",
"score": 2
} |
#### File: jpthor/digital-cash/blockcoin_tests.py
```python
import pytest, time, uuid, ecdsa
import identities
from my blockcoin import *
def test_blocks():
bank = Bank(id=0, private_key=identities.bank_private_key(0))
# Good block
block = Block(txns=[])
block.sign(bank.private_key)
bank.handle_block(block)
assert len(bank.blocks) == 1
# Wrong bank signs
block = Block(txns=[])
wrong_private_key = identities.bank_private_key(1000)
block.sign(wrong_private_key)
with pytest.raises(ecdsa.keys.BadSignatureError):
bank.handle_block(block)
def test_bad_tx():
bank = Bank(id=0, private_key=identities.bank_private_key(0))
tx = identities.airdrop_tx()
bank.airdrop(tx)
tx = prepare_simple_tx(
utxos=bank.fetch_utxos(identities.alice_public_key),
sender_private_key=identities.alice_private_key,
recipient_public_key=identities.bob_public_key,
amount=10,
)
# Put in a phony signature
tx.tx_ins[0].signature = identities.alice_private_key.sign(b"bad")
with pytest.raises(ecdsa.keys.BadSignatureError):
bank.handle_tx(tx)
def test_airdrop():
bank = Bank(id=0, private_key=identities.bank_private_key(0))
tx = identities.airdrop_tx()
bank.airdrop(tx)
assert 500_000 == bank.fetch_balance(identities.alice_public_key)
assert 500_000 == bank.fetch_balance(identities.bob_public_key)
def test_utxo():
bank = Bank(id=0, private_key=identities.bank_private_key(0))
tx = identities.airdrop_tx()
bank.airdrop(tx)
assert len(bank.blocks) == 1
# Alice sends 10 to Bob
tx = prepare_simple_tx(
utxos=bank.fetch_utxos(identities.alice_public_key),
sender_private_key=identities.alice_private_key,
recipient_public_key=identities.bob_public_key,
amount=10
)
block = Block(txns=[tx])
block.sign(identities.bank_private_key(1))
bank.handle_block(block)
assert 500_000 - 10 == bank.fetch_balance(identities.alice_public_key)
assert 500_000 + 10 == bank.fetch_balance(identities.bob_public_key)
```
#### File: digital-cash/experiments/mining.py
```python
import time, logging, threading, hashlib
logger = logging.getLogger(__name__)
mining_interrupt = threading.Event()
chain_lock = threading.Lock()
chain = []
bits = 21
target = 1 << (256 - bits)
def message_generator():
"""Arbitrary message for blocks"""
number = 1
while True:
yield int.to_bytes(number, 'little')
number += 1
class Block:
def __init__(self, previous, nonce=None):
self.previous = previous
self.nonce = nonce
@property
def id(self):
return mining_hash(self.header(self.nonce))
def header(self, nonce):
return f"{self.previous}{nonce}"
def __repr__(self):
return (f"Block(previous={self.previous}, nonce={self.nonce}, "
f"id={self.id})")
def mining_hash(s):
if not isinstance(s, bytes):
s = s.encode()
return hashlib.sha256(s).hexdigest()
def mine_block(block):
nonce = 0
while int(mining_hash(block.header(nonce)), 16) >= target:
nonce += 1
if mining_interrupt.is_set():
print("Mining interrupted")
mining_interrupt.clear()
return
block.nonce = nonce
print(f"Nonce found {block}")
return block
def mine_forever():
while True:
unmined_block = Block(previous=chain[-1].id)
mined_block = mine_block(unmined_block)
# This is False if mining was interrupted
# Perhaps an exception would be wiser ...
if mined_block:
with chain_lock:
chain.append(mined_block)
def chain_is_valid():
current_block = chain[0]
for block in chain[1:]:
assert block.previous == current_block.id
assert int(block.id, 16) < target
current_block = block
def main():
global chain
thread = threading.Thread(target=mine_forever)
thread.start()
while True:
if len(chain) == 2:
mining_interrupt.set()
print("Set mining interrupt")
with chain_lock:
block = Block(
previous="0000070cfe252d71f30a7d66e652174fce5bb6dc90cb7c52997871ffbc731433",
nonce=62706,
)
chain.append(block)
chain_is_valid()
time.sleep(.1)
if __name__ == "__main__":
genesis = Block(
previous="0" * 64,
nonce=0
)
print("Setting genesis block: {genesis}")
chain.append(genesis)
main()
```
#### File: digital-cash/experiments/myalternating.py
```python
import socket, socketserver, sys, logging, os, threading, re
host = "0.0.0.0"
port = 10000
address = (host, port)
current = 0
ID = int(os.environ["ID"])
peer_hostnames= os.environ["PEERS"].split(',')
logging.basicConfig(
level=logging.INFO,
format='%(asctime)-15s %(levelname)s %(message)s')
logger = logging.getLogger(__name__)
class TCPHandler(socketserver.BaseRequestHandler):
def handle(self):
# import pdb; pdb.set_trace()
message_bytes = self.request.recv(10).strip()
peer = self.request.getpeername()
logger.info(f'Received {str(message_bytes)} from {peer}')
if message_bytes == b"ping":
self.request.sendall(b"pong")
logger.info(f'Sent b"pong"')
def serve():
schedule_pings()
server = socketserver.TCPServer(address, TCPHandler)
server.serve_forever()
def ping_peers():
for hostname in peer_hostnames:
ping(hostname)
schedule_pings()
def schedule_pings():
global current
current = (current + 1) % 3
if ID == current:
threading.Timer(1, ping_peers).start()
def ping(hostname):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
addr = (hostname, port)
s.connect(address)
s.sendall(b"ping")
logger.info(f'Sent b"ping"')
data = s.recv(10)
logger.info(f'Received {str(data)}')
if __name__ == "__main__":
command = sys.argv[1]
if command == "serve":
serve()
elif command == "ping":
ping()
else:
print("python ping_pong.py <serve|ping>")
```
#### File: digital-cash/experiments/thread.py
```python
import sys, time, random
from threading import Thread, Event, Lock
a = 0
event = Event()
lock = Lock()
l = []
def event_demo():
def thread1(threadname):
while True:
if event.is_set():
print(a)
event.clear()
def thread2(threadname):
global a
while 1:
a += 1
event.set()
time.sleep(1)
thread1 = Thread(target=thread1, args=("Thread-1",))
thread2 = Thread(target=thread2, args=("Thread-2",))
thread1.start()
thread2.start()
def no_lock_demo():
def appender():
global l
while True:
length = len(l)
if length < 100:
time.sleep(random.random() / 1000)
l.append(length)
threads = [Thread(target=appender) for i in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
print(l == list(range(1_000)))
print(l)
def lock_demo():
def appender():
global l
while True:
# Note: program halts if we return without releasing
lock.acquire()
length = len(l)
if length < 100:
time.sleep(random.random() / 1000)
l.append(length)
lock.release()
if length >= 100:
return
threads = [Thread(target=appender) for i in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
print(l == list(range(100)))
print(l)
def lock_cm_demo():
def appender():
global l
while True:
# Note: releasing handle for us
with lock:
length = len(l)
if length >= 100:
return
time.sleep(random.random() / 1000)
l.append(length)
threads = [Thread(target=appender) for i in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
print(l == list(range(100)))
print(l)
if __name__ == "__main__":
eval(f"{sys.argv[1]}()")
```
#### File: jpthor/digital-cash/pngcoin.py
```python
import io
import pickle
from PIL import Image
###########
# Helpers #
###########
def handle_user_input(user_input):
if user_input.lower() == "y":
return True
elif user_input.lower() == "n":
return False
else:
user_input = input('Please enter "y" or "n"')
return handle_user_input(user_input)
############
# PNG Coin #
############
class PNGCoin:
def __init__(self, transfers):
self.transfers = transfers # PIL.Image instances
def serialize(self):
return pickle.dumps(self)
@classmethod
def deserialize(cls, serialized):
return pickle.loads(serialized)
def to_disk(self, filename):
serialized = self.serialize()
with open(filename, "wb") as f:
f.write(serialized)
@classmethod
def from_disk(cls, filename):
with open(filename, "rb") as f:
serialized = f.read()
return cls.deserialize(serialized)
def validate(self):
for transfer in self.transfers:
transfer.show()
user_input = input("Is this a valid minting signature? (y/n)")
if not handle_user_input(user_input):
return False
return True
```
#### File: digital-cash/powcoin/powcoin_tests.py
```python
from copy import deepcopy
import pytest
import mypowcoin as p
import identities as ids
###########
# Helpers #
###########
# Set difficuly very low
p.POW_TARGET = 2 ** (256 - 2)
def send_tx(node, sender_private_key, recipient_public_key, amount):
utxos = node.fetch_utxos(sender_private_key.get_verifying_key())
return p.prepare_simple_tx(utxos, sender_private_key, recipient_public_key, amount)
def mine_block(node, miner_public_key, prev_block, mempool, nonce=0):
coinbase = p.prepare_coinbase(miner_public_key)
unmined_block = p.Block(
txns=[coinbase] + deepcopy(mempool),
prev_id=prev_block.id,
nonce=nonce,
)
mined_block = p.mine_block(unmined_block)
node.handle_block(mined_block)
return mined_block
#########
# Tests #
#########
def test_duplicate():
node = p.Node(address="")
alice_node = p.Node(address="")
# Bob mines height=0,1
p.mine_genesis_block(node, ids.bob_public_key)
p.mine_genesis_block(alice_node, ids.bob_public_key)
block = mine_block(node, ids.bob_public_key, node.blocks[0], [])
# Assert handling block already in blocks
with pytest.raises(Exception):
node.handle_block(block)
assert alice_node.blocks[0] == node.blocks[0]
block = mine_block(alice_node, ids.alice_public_key, node.blocks[0], [])
node.handle_block(block) # goes into branches
assert len(node.branches) == 1
# Assert handling block already in branches
with pytest.raises(Exception):
node.handle_block(block)
def test_extend_chain():
node = p.Node(address="")
# Bob mines height=0,1
p.mine_genesis_block(node, ids.bob_public_key)
block = mine_block(node, ids.bob_public_key, node.blocks[0], [])
# Alice's balance unchanged, Bob received block subsidy
assert node.fetch_balance(ids.alice_public_key) == 0
assert node.fetch_balance(ids.bob_public_key) == 2*p.BLOCK_SUBSIDY
# Chain extended
assert len(node.blocks) == 2
assert node.blocks[-1] == block
# Branches empty
assert node.branches == []
def test_fork_chain():
node = p.Node(address="")
# Bob mines height=0,1
p.mine_genesis_block(node, ids.bob_public_key)
bob_block = mine_block(node, ids.bob_public_key, node.blocks[0], [])
# Alice mines height=1 too
alice_block = mine_block(node, ids.alice_public_key, node.blocks[0], [])
# UTXO database unchanged
assert node.fetch_balance(ids.alice_public_key) == 0
assert node.fetch_balance(ids.bob_public_key) == 2*p.BLOCK_SUBSIDY
# Chain unchanged
assert len(node.blocks) == 2
assert alice_block not in node.blocks
# One more chain with one block on it
assert len(node.branches) == 1
assert node.branches[0] == [alice_block]
def test_block_extending_fork():
node = p.Node(address="")
# Bob mines height=0,1,2
p.mine_genesis_block(node, ids.bob_public_key)
mine_block(node, ids.bob_public_key, node.blocks[0], [])
bob_block = mine_block(node, ids.bob_public_key, node.blocks[1], [])
# Alice mines height=1
alice_block = mine_block(node, ids.alice_public_key, node.blocks[0], [])
# Alice mines block on top of her branch
alice_block = mine_block(node, ids.alice_public_key, node.branches[0][0], [])
# UTXOs
assert node.fetch_balance(ids.alice_public_key) == 0
assert node.fetch_balance(ids.bob_public_key) == 3*p.BLOCK_SUBSIDY
# Now new branches
assert len(node.blocks) == 3
assert len(node.branches) == 1
assert len(node.branches[0]) == 2
def test_block_forking_fork():
node = p.Node(address="")
# Bob mines height=0,1,2
p.mine_genesis_block(node, ids.bob_public_key)
mine_block(node, ids.bob_public_key, node.blocks[0], [])
bob_block = mine_block(node, ids.bob_public_key, node.blocks[1], [])
# Alice mines height=1
first = mine_block(node, ids.alice_public_key, node.blocks[0], [])
# Alice mines 2 separate blocks top of her branch, each at height 2
second = mine_block(node, ids.alice_public_key, node.branches[0][0], [])
third = mine_block(node, ids.alice_public_key, node.branches[0][0], [],
nonce=second.nonce+1)
# UTXOs and chain unaffected
assert node.fetch_balance(ids.alice_public_key) == 0
assert node.fetch_balance(ids.bob_public_key) == 3*p.BLOCK_SUBSIDY
# One more branch added, which contains alice's first block and this one
assert len(node.blocks) == 3
assert len(node.branches) == 2
assert node.branches[0] == [first, second]
assert node.branches[1] == [first, third]
def test_successful_reorg():
node = p.Node(address="")
alice_node = p.Node(address="")
# Bob mines height=0,1,2
b0 = p.mine_genesis_block(node, ids.bob_public_key)
b1 = mine_block(node, ids.bob_public_key, node.blocks[0], [])
# height=2 contains a bob->alice txn
bob_to_alice = send_tx(node, ids.bob_private_key,
ids.alice_public_key, 10)
b2 = mine_block(node, ids.bob_public_key, node.blocks[1],
[bob_to_alice])
# Alice accepts bob's first two blocks, but not the third
p.mine_genesis_block(alice_node, ids.bob_public_key) # FIXME confusing
alice_node.handle_block(b1)
# FIXME just borrow everything up until this point from another test
# Create and handle two blocks atop Alice's chain
a2 = mine_block(alice_node, ids.alice_public_key, node.blocks[1], [])
node.handle_block(a2)
# Chains
assert len(node.blocks) == 3
print([b0, b1, b2])
assert node.blocks == [b0, b1, b2]
assert len(node.branches) == 1
assert node.branches[0] == [a2]
# Balances
assert (bob_to_alice.id, 0) in node.utxo_set
assert (bob_to_alice.id, 1) in node.utxo_set
assert node.fetch_balance(ids.alice_public_key) == 10
assert node.fetch_balance(ids.bob_public_key) == 3*p.BLOCK_SUBSIDY - 10
# Use alice's node to assemble this txn b/c she doesn't have any utxos in bob's view of world
alice_to_bob = send_tx(alice_node, ids.alice_private_key,
ids.bob_public_key, 20)
a3 = mine_block(node, ids.alice_public_key, node.branches[0][0],
[alice_to_bob])
# Chains
assert len(node.blocks) == 4
assert node.blocks == [b0, b1, a2, a3]
assert len(node.branches) == 1
assert node.branches[0] == [b2]
# Balances
assert (bob_to_alice.id, 0) not in node.utxo_set
assert (bob_to_alice.id, 1) not in node.utxo_set
assert (alice_to_bob.id, 0) in node.utxo_set
assert (alice_to_bob.id, 1) in node.utxo_set
assert node.fetch_balance(ids.alice_public_key) == 2*p.BLOCK_SUBSIDY - 20
assert node.fetch_balance(ids.bob_public_key) == 2*p.BLOCK_SUBSIDY + 20
# Mempool
assert len(node.mempool) == 1
assert bob_to_alice in node.mempool
def test_unsuccessful_reorg():
# FIXME: ideally this would assert that a reorg was attempted ...
# passes even when reorgs were never tried ...
node = p.Node(address="")
alice_node = p.Node(address="")
# Bob mines height=0,1,2
b0 = p.mine_genesis_block(node, ids.bob_public_key)
b1 = mine_block(node, ids.bob_public_key, node.blocks[0], [])
b2 = mine_block(node, ids.bob_public_key, node.blocks[1], [])
# Alice accepts bob's first two blocks, but not the third
p.mine_genesis_block(alice_node, ids.bob_public_key) # FIXME confusing
alice_node.handle_block(b1)
# FIXME just borrow everything up until this point from another test
# Create one valid block for Alice
a2 = mine_block(alice_node, ids.alice_public_key, node.blocks[1], [])
node.handle_block(a2)
# Create one invalid block for Alice
alice_to_bob = send_tx(alice_node, ids.alice_private_key,
ids.bob_public_key, 20)
# txn invalid b/c changing amount arbitrarily after signing ...
alice_to_bob.tx_outs[0].amount = 100000
initial_utxo_set = deepcopy(node.utxo_set)
initial_chain = deepcopy(node.blocks)
initial_branches = deepcopy(node.branches)
# This block shouldn't make it into branches or chain
# b/c it contains an invalid transaction that will only be discovered
# during reorg
a3 = mine_block(node, ids.alice_public_key, node.branches[0][0],
[alice_to_bob])
# UTXO, chain, branches unchanged
assert str(node.utxo_set.keys()) == str(initial_utxo_set.keys()) # FIXME
assert node.blocks == initial_chain
assert node.branches == initial_branches
```
#### File: jpthor/digital-cash/utils.py
```python
import pickle, uuid
def serialize(coin):
return pickle.dumps(coin)
def deserialize(serialized):
return pickle.loads(serialized)
def to_disk(coin, filename):
serialized = serialize(coin)
with open(filename, "wb") as f:
f.write(serialized)
def from_disk(filename):
with open(filename, "rb") as f:
serialized = f.read()
return deserialize(serialized)
def prepare_simple_tx(utxos, sender_private_key, recipient_public_key, amount):
from mybankcoin import Tx, TxIn, TxOut
sender_public_key = sender_private_key.get_verifying_key()
# Construct tx.tx_outs
tx_ins = []
tx_in_sum = 0
for tx_out in utxos:
tx_ins.append(TxIn(tx_id=tx_out.tx_id, index=tx_out.index, signature=None))
tx_in_sum += tx_out.amount
if tx_in_sum > amount:
break
# Make sure sender can afford it
assert tx_in_sum >= amount
# Construct tx.tx_outs
tx_id = uuid.uuid4()
change = tx_in_sum - amount
tx_outs = [
TxOut(tx_id=tx_id, index=0, amount=amount, public_key=recipient_public_key),
TxOut(tx_id=tx_id, index=1, amount=change, public_key=sender_public_key),
]
# Construct tx and sign inputs
tx = Tx(id=tx_id, tx_ins=tx_ins, tx_outs=tx_outs)
for i in range(len(tx.tx_ins)):
tx.sign_input(i, sender_private_key)
return tx
``` |
{
"source": "JPTIZ/asciimatics",
"score": 3
} |
#### File: asciimatics/asciimatics/sprites.py
```python
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from asciimatics.effects import Sprite
from asciimatics.renderers import StaticRenderer
import random
# Images for Sam-ple sprite.
from asciimatics.screen import Screen
sam_default = [
"""
______
.` `.
/ - - \\
| __ |
| |
\\ /
'.______.'
""",
"""
______
.` `.
/ o o \\
| __ |
| |
\\ /
'.______.'
"""
]
sam_left = """
______
.` `.
/ o \\
| |
|-- |
\\ /
'.______.'
"""
sam_right = """
______
.` `.
/ o \\
| |
| --|
\\ /
'.______.'
"""
sam_down = """
______
.` `.
/ \\
| |
| ^ ^ |
\\ __ /
'.______.'
"""
sam_up = """
______
.` __ `.
/ v v \\
| |
| |
\\ /
'.______.'
"""
# Images for an arrow Sprite.
left_arrow = """
/____
/
\\ ____
\\
"""
up_arrow = """
/\\
/ \\
/| |\\
| |
"""
right_arrow = """
____\\
\\
____ /
/
"""
down_arrow = """
| |
\\| |/
\\ /
\\/
"""
default_arrow = [
"""
/\\
/ \\
/|><|\\
| |
""",
"""
/\\
/ \\
/|oo|\\
| |
""",
]
# Simple static function to swap between 2 images to make a sprite blink.
def _blink():
if random.random() > 0.9:
return 0
else:
return 1
class Sam(Sprite):
"""
Sam Paul sprite - an simple sample animated character.
"""
def __init__(self, screen, path, start_frame=0, stop_frame=0):
"""
See :py:obj:`.Sprite` for details.
"""
super(Sam, self).__init__(
screen,
renderer_dict={
"default": StaticRenderer(images=sam_default, animation=_blink),
"left": StaticRenderer(images=[sam_left]),
"right": StaticRenderer(images=[sam_right]),
"down": StaticRenderer(images=[sam_down]),
"up": StaticRenderer(images=[sam_up]),
},
path=path,
start_frame=start_frame,
stop_frame=stop_frame)
class Arrow(Sprite):
"""
Sample arrow sprite - points where it is going.
"""
def __init__(self, screen, path, colour=Screen.COLOUR_WHITE, start_frame=0,
stop_frame=0):
"""
See :py:obj:`.Sprite` for details.
"""
super(Arrow, self).__init__(
screen,
renderer_dict={
"default": StaticRenderer(images=default_arrow,
animation=_blink),
"left": StaticRenderer(images=[left_arrow]),
"right": StaticRenderer(images=[right_arrow]),
"down": StaticRenderer(images=[down_arrow]),
"up": StaticRenderer(images=[up_arrow]),
},
path=path,
colour=colour,
start_frame=start_frame,
stop_frame=stop_frame)
class Plot(Sprite):
"""
Sample Sprite that simply plots an "X" for each step in the path. Useful
for plotting a path to the screen.
"""
def __init__(self, screen, path, colour=Screen.COLOUR_WHITE, start_frame=0,
stop_frame=0):
"""
See :py:obj:`.Sprite` for details.
"""
super(Plot, self).__init__(
screen,
renderer_dict={
"default": StaticRenderer(images=["X"])
},
path=path,
colour=colour,
clear=False,
start_frame=start_frame,
stop_frame=stop_frame)
```
#### File: asciimatics/widgets/checkbox.py
```python
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from asciimatics.event import KeyboardEvent, MouseEvent
from asciimatics.widgets.widget import Widget
class CheckBox(Widget):
"""
A CheckBox widget is used to ask for Boolean (i.e. yes/no) input.
It consists of an optional label (typically used for the first in a group of CheckBoxes),
the box and a field name.
"""
__slots__ = ["_text", "_label", "_on_change"]
def __init__(self, text, label=None, name=None, on_change=None, **kwargs):
"""
:param text: The text to explain this specific field to the user.
:param label: An optional label for the widget.
:param name: The internal name for the widget.
:param on_change: Optional function to call when text changes.
Also see the common keyword arguments in :py:obj:`.Widget`.
"""
super(CheckBox, self).__init__(name, **kwargs)
self._text = text
self._label = label
self._on_change = on_change
def update(self, frame_no):
self._draw_label()
# Render this checkbox.
check_char = u"✓" if self._frame.canvas.unicode_aware else "X"
(colour, attr, bg) = self._pick_colours("control", self._has_focus)
self._frame.canvas.print_at(
"[{}] ".format(check_char if self._value else " "),
self._x + self._offset,
self._y,
colour, attr, bg)
(colour, attr, bg) = self._pick_colours("field", self._has_focus)
self._frame.canvas.print_at(
self._text,
self._x + self._offset + 4,
self._y,
colour, attr, bg)
def reset(self):
pass
def process_event(self, event):
if isinstance(event, KeyboardEvent):
if event.key_code in [ord(" "), 10, 13]:
# Use property to trigger events.
self.value = not self._value
else:
# Ignore any other key press.
return event
elif isinstance(event, MouseEvent):
# Mouse event - rebase coordinates to Frame context.
if event.buttons != 0:
if self.is_mouse_over(event, include_label=False):
# Use property to trigger events.
self.value = not self._value
return None
# Ignore other mouse events.
return event
else:
# Ignore other events
return event
# If we got here, we processed the event - swallow it.
return None
def required_height(self, offset, width):
return 1
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
# Only trigger the notification after we've changed the value.
old_value = self._value
self._value = new_value if new_value else False
if old_value != self._value and self._on_change:
self._on_change()
```
#### File: asciimatics/widgets/filebrowser.py
```python
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from re import compile as re_compile
import os
import unicodedata
from collections import namedtuple
from asciimatics.utilities import readable_timestamp, readable_mem
from asciimatics.widgets.multicolumnlistbox import MultiColumnListBox
class FileBrowser(MultiColumnListBox):
"""
A FileBrowser is a widget for finding a file on the local disk.
"""
def __init__(self, height, root, name=None, on_select=None, on_change=None, file_filter=None):
r"""
:param height: The desired height for this widget.
:param root: The starting root directory to display in the widget.
:param name: The name of this widget.
:param on_select: Optional function that gets called when user selects a file (by pressing
enter or double-clicking).
:param on_change: Optional function that gets called on any movement of the selection.
:param file_filter: Optional RegEx string that can be passed in to filter the files to be displayed.
Most people will want to use a filter to finx files with a particular extension. In this case,
you must use a regex that matches to the end of the line - e.g. use ".*\.txt$" to find files ending
with ".txt". This ensures that you don't accidentally pick up files containing the filter.
"""
super(FileBrowser, self).__init__(
height,
[0, ">8", ">14"],
[],
titles=["Filename", "Size", "Last modified"],
name=name,
on_select=self._on_selection,
on_change=on_change)
# Remember the on_select handler for external notification. This allows us to wrap the
# normal on_select notification with a function that will open new sub-directories as
# needed.
self._external_notification = on_select
self._root = root
self._in_update = False
self._initialized = False
self._file_filter = None if file_filter is None else re_compile(file_filter)
def update(self, frame_no):
# Defer initial population until we first display the widget in order to avoid race
# conditions in the Frame that may be using this widget.
if not self._initialized:
self._populate_list(self._root)
self._initialized = True
super(FileBrowser, self).update(frame_no)
def _on_selection(self):
"""
Internal function to handle directory traversal or bubble notifications up to user of the
Widget as needed.
"""
if self.value and os.path.isdir(self.value):
self._populate_list(self.value)
elif self._external_notification:
self._external_notification()
def clone(self, new_widget):
# Copy the data into the new widget. Notes:
# 1) I don't really want to expose these methods, so am living with the protected access.
# 2) I need to populate the list and then assign the values to ensure that we get the
# right selection on re-sizing.
new_widget._populate_list(self._root)
new_widget._start_line = self._start_line
new_widget._root = self._root
new_widget.value = self.value
def _populate_list(self, value):
"""
Populate the current multi-column list with the contents of the selected directory.
:param value: The new value to use.
"""
# Nothing to do if the value is rubbish.
if value is None:
return
# Stop any recursion - no more returns from here to end of fn please!
if self._in_update:
return
self._in_update = True
# We need to update the tree view.
self._root = os.path.abspath(value if os.path.isdir(value) else os.path.dirname(value))
# The absolute expansion of "/" or "\" is the root of the disk, so is a cross-platform
# way of spotting when to insert ".." or not.
tree_view = []
if len(self._root) > len(os.path.abspath(os.sep)):
tree_view.append((["|-+ .."], os.path.abspath(os.path.join(self._root, ".."))))
tree_dirs = []
tree_files = []
try:
files = os.listdir(self._root)
except OSError:
# Can fail on Windows due to access permissions
files = []
for my_file in files:
full_path = os.path.join(self._root, my_file)
try:
details = os.lstat(full_path)
except OSError:
# Can happen on Windows due to access permissions
details = namedtuple("stat_type", "st_size st_mtime")
details.st_size = 0
details.st_mtime = 0
name = "|-- {}".format(my_file)
tree = tree_files
if os.path.isdir(full_path):
tree = tree_dirs
if os.path.islink(full_path):
# Show links separately for directories
real_path = os.path.realpath(full_path)
name = "|-+ {} -> {}".format(my_file, real_path)
else:
name = "|-+ {}".format(my_file)
elif self._file_filter and not self._file_filter.match(my_file):
# Skip files that don't match the filter (if present)
continue
elif os.path.islink(full_path):
# Check if link target exists and if it does, show statistics of the
# linked file, otherwise just display the link
try:
real_path = os.path.realpath(full_path)
except OSError:
# Can fail on Linux prof file system.
real_path = None
if real_path and os.path.exists(real_path):
details = os.stat(real_path)
name = "|-- {} -> {}".format(my_file, real_path)
else:
# Both broken directory and file links fall to this case.
# Actually using the files will cause a FileNotFound exception
name = "|-- {} -> {}".format(my_file, real_path)
# Normalize names for MacOS and then add to the list.
tree.append(([unicodedata.normalize("NFC", name),
readable_mem(details.st_size),
readable_timestamp(details.st_mtime)], full_path))
tree_view.extend(sorted(tree_dirs))
tree_view.extend(sorted(tree_files))
self.options = tree_view
self._titles[0] = self._root
# We're out of the function - unset recursion flag.
self._in_update = False
```
#### File: asciimatics/widgets/utilities.py
```python
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from logging import getLogger
from math import sqrt
from builtins import str
from collections import defaultdict
from wcwidth import wcswidth, wcwidth
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
from asciimatics.screen import Screen
# Logging
logger = getLogger(__name__)
THEMES = {
"default": {
"background": (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_BLUE),
"shadow": (Screen.COLOUR_BLACK, None, Screen.COLOUR_BLACK),
"disabled": (Screen.COLOUR_BLACK, Screen.A_BOLD, Screen.COLOUR_BLUE),
"invalid": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_RED),
"label": (Screen.COLOUR_GREEN, Screen.A_BOLD, Screen.COLOUR_BLUE),
"borders": (Screen.COLOUR_BLACK, Screen.A_BOLD, Screen.COLOUR_BLUE),
"scroll": (Screen.COLOUR_CYAN, Screen.A_NORMAL, Screen.COLOUR_BLUE),
"title": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_BLUE),
"edit_text": (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_BLUE),
"focus_edit_text": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_CYAN),
"readonly": (Screen.COLOUR_BLACK, Screen.A_BOLD, Screen.COLOUR_BLUE),
"focus_readonly": (Screen.COLOUR_BLACK, Screen.A_BOLD, Screen.COLOUR_CYAN),
"button": (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_BLUE),
"focus_button": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_CYAN),
"control": (Screen.COLOUR_YELLOW, Screen.A_NORMAL, Screen.COLOUR_BLUE),
"selected_control": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_BLUE),
"focus_control": (Screen.COLOUR_YELLOW, Screen.A_NORMAL, Screen.COLOUR_BLUE),
"selected_focus_control": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_CYAN),
"field": (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_BLUE),
"selected_field": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_BLUE),
"focus_field": (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_BLUE),
"selected_focus_field": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_CYAN),
},
"monochrome": defaultdict(
lambda: (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_BLACK),
{
"invalid": (Screen.COLOUR_BLACK, Screen.A_NORMAL, Screen.COLOUR_RED),
"label": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_BLACK),
"title": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_BLACK),
"selected_focus_field": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_BLACK),
"focus_edit_text": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_BLACK),
"focus_button": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_BLACK),
"selected_focus_control": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_BLACK),
"disabled": (Screen.COLOUR_BLACK, Screen.A_BOLD, Screen.COLOUR_BLACK),
}
),
"green": defaultdict(
lambda: (Screen.COLOUR_GREEN, Screen.A_NORMAL, Screen.COLOUR_BLACK),
{
"invalid": (Screen.COLOUR_BLACK, Screen.A_NORMAL, Screen.COLOUR_RED),
"label": (Screen.COLOUR_GREEN, Screen.A_BOLD, Screen.COLOUR_BLACK),
"title": (Screen.COLOUR_GREEN, Screen.A_BOLD, Screen.COLOUR_BLACK),
"selected_focus_field": (Screen.COLOUR_GREEN, Screen.A_BOLD, Screen.COLOUR_BLACK),
"focus_edit_text": (Screen.COLOUR_GREEN, Screen.A_BOLD, Screen.COLOUR_BLACK),
"focus_button": (Screen.COLOUR_GREEN, Screen.A_BOLD, Screen.COLOUR_BLACK),
"selected_focus_control": (Screen.COLOUR_GREEN, Screen.A_BOLD, Screen.COLOUR_BLACK),
"disabled": (Screen.COLOUR_BLACK, Screen.A_BOLD, Screen.COLOUR_BLACK),
}
),
"bright": defaultdict(
lambda: (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_BLACK),
{
"invalid": (Screen.COLOUR_BLACK, Screen.A_NORMAL, Screen.COLOUR_RED),
"label": (Screen.COLOUR_GREEN, Screen.A_BOLD, Screen.COLOUR_BLACK),
"control": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_BLACK),
"focus_control": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_BLACK),
"selected_focus_control": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_BLACK),
"selected_focus_field": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_BLACK),
"focus_button": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_BLACK),
"focus_edit_text": (Screen.COLOUR_YELLOW, Screen.A_BOLD, Screen.COLOUR_BLACK),
"disabled": (Screen.COLOUR_BLACK, Screen.A_BOLD, Screen.COLOUR_BLACK),
}
),
"tlj256": defaultdict(
lambda: (16, 0, 15),
{
"invalid": (0, 0, 196),
"label": (88, 0, 15),
"title": (88, 0, 15),
"selected_focus_field": (15, 0, 88),
"focus_edit_text": (15, 0, 88),
"focus_button": (15, 0, 88),
"selected_focus_control": (15, 0, 88),
"disabled": (8, 0, 15),
}
),
"warning": defaultdict(
lambda: (Screen.COLOUR_WHITE, Screen.A_NORMAL, Screen.COLOUR_RED),
{
"label": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_RED),
"title": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_RED),
"focus_edit_text": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_RED),
"focus_field": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_RED),
"focus_button": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_YELLOW),
"focus_control": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_RED),
"disabled": (Screen.COLOUR_WHITE, Screen.A_BOLD, Screen.COLOUR_RED),
"shadow": (Screen.COLOUR_BLACK, None, Screen.COLOUR_BLACK),
}
),
}
def _enforce_width(text, width, unicode_aware=True):
"""
Enforce a displayed piece of text to be a certain number of cells wide. This takes into
account double-width characters used in CJK languages.
:param text: The text to be truncated
:param width: The screen cell width to enforce
:return: The resulting truncated text
"""
# Double-width strings cannot be more than twice the string length, so no need to try
# expensive truncation if this upper bound isn't an issue.
if (2 * len(text) < width) or (len(text) < width and not unicode_aware):
return text
# Can still optimize performance if we are not handling unicode characters.
if unicode_aware:
size = 0
for i, char in enumerate(str(text)):
c_width = wcwidth(char) if ord(char) >= 256 else 1
if size + c_width > width:
return text[0:i]
size += c_width
elif len(text) + 1 > width:
return text[0:width]
return text
def _find_min_start(text, max_width, unicode_aware=True, at_end=False):
"""
Find the starting point in the string that will reduce it to be less than or equal to the
specified width when displayed on screen.
:param text: The text to analyze.
:param max_width: The required maximum width
:param at_end: At the end of the editable line, so allow spaced for cursor.
:return: The offset within `text` to start at to reduce it to the required length.
"""
# Is the solution trivial? Worth optimizing for text heavy UIs...
if 2 * len(text) < max_width:
return 0
# OK - do it the hard way...
result = 0
string_len = wcswidth if unicode_aware else len
char_len = wcwidth if unicode_aware else lambda x: 1
display_end = string_len(text)
while display_end > max_width:
result += 1
display_end -= char_len(text[0])
text = text[1:]
if at_end and display_end == max_width:
result += 1
return result
def _get_offset(text, visible_width, unicode_aware=True):
"""
Find the character offset within some text for a given visible offset (taking into account the
fact that some character glyphs are double width).
:param text: The text to analyze
:param visible_width: The required location within that text (as seen on screen).
:return: The offset within text (as a character offset within the string).
"""
result = 0
width = 0
if unicode_aware:
for char in text:
if visible_width - width <= 0:
break
result += 1
width += wcwidth(char)
if visible_width - width < 0:
result -= 1
else:
result = min(len(text), visible_width)
return result
@lru_cache(256)
def _split_text(text, width, height, unicode_aware=True):
"""
Split text to required dimensions.
This will first try to split the text into multiple lines, then put a "..." on the last
3 characters of the last line if this still doesn't fit.
:param text: The text to split.
:param width: The maximum width for any line.
:param height: The maximum height for the resulting text.
:return: A list of strings of the broken up text.
"""
# At a high level, just try to split on whitespace for the best results.
tokens = text.split(" ")
result = []
current_line = ""
string_len = wcswidth if unicode_aware else len
for token in tokens:
for i, line_token in enumerate(token.split("\n")):
if string_len(current_line + line_token) > width or i > 0:
# Don't bother inserting completely blank lines
# which should only happen on the very first
# line (as the rest will inject whitespace/newlines)
if len(current_line) > 0:
result.append(current_line.rstrip())
current_line = line_token + " "
else:
current_line += line_token + " "
# At this point we've either split nicely or have a hugely long unbroken string
# (e.g. because the language doesn't use whitespace.
# Either way, break this last line up as best we can.
current_line = current_line.rstrip()
while string_len(current_line) > 0:
new_line = _enforce_width(current_line, width, unicode_aware)
result.append(new_line)
current_line = current_line[len(new_line):]
# Check for a height overrun and truncate.
if len(result) > height:
result = result[:height]
result[height - 1] = result[height - 1][:width - 3] + "..."
# Very small columns could be shorter than individual words - truncate
# each line if necessary.
for i, line in enumerate(result):
if len(line) > width:
result[i] = line[:width - 3] + "..."
return result
def _euclidian_distance(widget1, widget2):
"""
Find the Euclidian distance between 2 widgets.
:param widget1: first widget
:param widget2: second widget
"""
point1 = widget1.get_location()
point2 = widget2.get_location()
return sqrt((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2)
```
#### File: asciimatics/samples/kaleidoscope.py
```python
from math import sqrt
from asciimatics.renderers import Kaleidoscope, FigletText, Rainbow, RotatedDuplicate, \
StaticRenderer
from asciimatics.scene import Scene
from asciimatics.screen import Screen
from asciimatics.effects import Print
from asciimatics.exceptions import ResizeScreenError
import sys
def demo(screen):
scenes = []
cell1 = Rainbow(screen,
RotatedDuplicate(screen.width // 2,
max(screen.width // 2, screen.height),
FigletText("ASCII" if screen.width < 80 else "ASCII rules",
font="banner",
width=screen.width // 2)))
cell2 = ""
size = int(sqrt(screen.height ** 2 + screen.width ** 2 // 4))
for _ in range(size):
for x in range(size):
c = x * screen.colours // size
cell2 += "${%d,2,%d}:" % (c, c)
cell2 += "\n"
for i in range(8):
scenes.append(
Scene([Print(screen,
Kaleidoscope(screen.height, screen.width, cell1, i),
0,
speed=1,
transparent=False),
Print(screen,
FigletText(str(i)), screen.height - 6, x=screen.width - 8, speed=1)],
duration=360))
scenes.append(
Scene([Print(screen,
Kaleidoscope(screen.height, screen.width, StaticRenderer([cell2]), i),
0,
speed=1,
transparent=False)],
duration=360))
screen.play(scenes, stop_on_resize=True)
if __name__ == "__main__":
while True:
try:
Screen.wrapper(demo)
sys.exit(0)
except ResizeScreenError:
pass
```
#### File: asciimatics/samples/maps.py
```python
from __future__ import division
from __future__ import print_function
import traceback
from math import pi, exp, atan, log, tan, sqrt
import sys
import os
import json
import threading
from ast import literal_eval
from collections import OrderedDict
from asciimatics.event import KeyboardEvent
from asciimatics.renderers import ColourImageFile
from asciimatics.widgets import Effect, Button, Text, Layout, Frame, Divider, PopUpDialog
from asciimatics.scene import Scene
from asciimatics.screen import Screen
from asciimatics.exceptions import ResizeScreenError, StopApplication, InvalidFields
try:
import mapbox_vector_tile
import requests
from google.protobuf.message import DecodeError
except ImportError:
print("Run `pip install mapbox-vector-tile protobuf requests` to fix your dependencies.")
print("See https://github.com/Toblerity/Shapely#installing-shapely-16b2 for Shapely install.")
sys.exit(0)
# Global constants for the applications
# Replace `_KEY` with the free one that you get from signing up with www.mapbox.com
_KEY = ""
_VECTOR_URL = \
"http://a.tiles.mapbox.com/v4/mapbox.mapbox-streets-v7/{}/{}/{}.mvt?access_token={}"
_IMAGE_URL = \
"https://api.mapbox.com/styles/v1/mapbox/satellite-v9/tiles/256/{}/{}/{}?access_token={}"
_START_SIZE = 64
_ZOOM_IN_SIZE = _START_SIZE * 2
_ZOOM_OUT_SIZE = _START_SIZE // 2
_ZOOM_ANIMATION_STEPS = 6
_ZOOM_STEP = exp(log(2) / _ZOOM_ANIMATION_STEPS)
_CACHE_SIZE = 180
_HELP = """
You can moved around using the cursor keys. To jump to any location in the world, press Enter and \
then fill in the longitude and latitude of the location and press 'OK'.
To zoom in and out use '+'/'-'. To zoom all the way in/out, press '9'/'0'.
To swap between satellite and vector views, press 'T'. To quit, press 'Q'.
"""
class EnterLocation(Frame):
"""Form to enter a new desired location to display on the map."""
def __init__(self, screen, longitude, latitude, on_ok):
super(EnterLocation, self).__init__(
screen, 7, 40, data={"long": str(longitude), "lat": str(latitude)}, name="loc",
title="Enter New Location", is_modal=True)
self._on_ok = on_ok
layout = Layout([1, 18, 1])
self.add_layout(layout)
layout.add_widget(Divider(draw_line=False), 1)
layout.add_widget(Text(label="Longitude:", name="long", validator=r"^[-]?\d+?\.\d+?$"), 1)
layout.add_widget(Text(label="Latitude:", name="lat", validator=r"^[-]?\d+?\.\d+?$"), 1)
layout.add_widget(Divider(draw_line=False), 1)
layout2 = Layout([1, 1, 1])
self.add_layout(layout2)
layout2.add_widget(Button("OK", self._ok), 1)
layout2.add_widget(Button("Cancel", self._cancel), 2)
self.fix()
def _ok(self):
try:
self.save(validate=True)
except InvalidFields:
return
self._on_ok(self)
self._scene.remove_effect(self)
def _cancel(self):
self._scene.remove_effect(self)
class Map(Effect):
"""Effect to display a satellite image or vector map of the world."""
# Colour palettes
_256_PALETTE = {
"landuse": 193,
"water": 153,
"waterway": 153,
"marine_label": 12,
"admin": 7,
"country_label": 9,
"state_label": 1,
"place_label": 0,
"building": 252,
"road": 15,
"poi_label": 8
}
_16_PALETTE = {
"landuse": Screen.COLOUR_GREEN,
"water": Screen.COLOUR_BLUE,
"waterway": Screen.COLOUR_BLUE,
"marine_label": Screen.COLOUR_BLUE,
"admin": Screen.COLOUR_WHITE,
"country_label": Screen.COLOUR_RED,
"state_label": Screen.COLOUR_RED,
"place_label": Screen.COLOUR_YELLOW,
"building": Screen.COLOUR_WHITE,
"road": Screen.COLOUR_WHITE,
"poi_label": Screen.COLOUR_RED
}
def __init__(self, screen):
super(Map, self).__init__(screen)
# Current state of the map
self._screen = screen
self._zoom = 0
self._latitude = 51.4778
self._longitude = -0.0015
self._tiles = OrderedDict()
self._size = _START_SIZE
self._satellite = False
# Desired viewing location and animation flags
self._desired_zoom = self._zoom
self._desired_latitude = self._latitude
self._desired_longitude = self._longitude
self._next_update = 100000
# State for the background thread which reads in the tiles
self._running = True
self._updated = threading.Event()
self._updated.set()
self._oops = None
self._thread = threading.Thread(target=self._get_tiles)
self._thread.daemon = True
self._thread.start()
def _scale_coords(self, x, y, extent, xo, yo):
"""Convert from tile coordinates to "pixels" - i.e. text characters."""
return xo + (x * self._size * 2 / extent), yo + ((extent - y) * self._size / extent)
def _convert_longitude(self, longitude):
"""Convert from longitude to the x position in overall map."""
return int((180 + longitude) * (2 ** self._zoom) * self._size / 360)
def _convert_latitude(self, latitude):
"""Convert from latitude to the y position in overall map."""
return int((180 - (180 / pi * log(tan(
pi / 4 + latitude * pi / 360)))) * (2 ** self._zoom) * self._size / 360)
def _inc_lat(self, latitude, delta):
"""Shift the latitude by the required number of pixels (i.e. text lines)."""
y = self._convert_latitude(latitude)
y += delta
return 360 / pi * atan(
exp((180 - y * 360 / (2 ** self._zoom) / self._size) * pi / 180)) - 90
def _get_satellite_tile(self, x_tile, y_tile, z_tile):
"""Load up a single satellite image tile."""
cache_file = "mapscache/{}.{}.{}.jpg".format(z_tile, x_tile, y_tile)
if cache_file not in self._tiles:
if not os.path.isfile(cache_file):
url = _IMAGE_URL.format(z_tile, x_tile, y_tile, _KEY)
data = requests.get(url).content
with open(cache_file, 'wb') as f:
f.write(data)
self._tiles[cache_file] = [
x_tile, y_tile, z_tile,
ColourImageFile(self._screen, cache_file, height=_START_SIZE, dither=True,
uni=self._screen.unicode_aware),
True]
if len(self._tiles) > _CACHE_SIZE:
self._tiles.popitem(False)
self._screen.force_update()
def _get_vector_tile(self, x_tile, y_tile, z_tile):
"""Load up a single vector tile."""
cache_file = "mapscache/{}.{}.{}.json".format(z_tile, x_tile, y_tile)
if cache_file not in self._tiles:
if os.path.isfile(cache_file):
with open(cache_file, 'rb') as f:
tile = json.loads(f.read().decode('utf-8'))
else:
url = _VECTOR_URL.format(z_tile, x_tile, y_tile, _KEY)
data = requests.get(url).content
try:
tile = mapbox_vector_tile.decode(data)
with open(cache_file, mode='w') as f:
json.dump(literal_eval(repr(tile)), f)
except DecodeError:
tile = None
if tile:
self._tiles[cache_file] = [x_tile, y_tile, z_tile, tile, False]
if len(self._tiles) > _CACHE_SIZE:
self._tiles.popitem(False)
self._screen.force_update()
def _get_tiles(self):
"""Background thread to download map tiles as required."""
while self._running:
self._updated.wait()
self._updated.clear()
# Save off current view and find the nearest tile.
satellite = self._satellite
zoom = self._zoom
size = self._size
n = 2 ** zoom
x_offset = self._convert_longitude(self._longitude)
y_offset = self._convert_latitude(self._latitude)
# Get the visible tiles around that location - getting most relevant first
for x, y, z in [(0, 0, 0), (1, 0, 0), (0, 1, 0), (-1, 0, 0), (0, -1, 0),
(0, 0, -1), (0, 0, 1),
(1, 1, 0), (1, -1, 0), (-1, -1, 0), (-1, 1, 0)]:
# Restart if we've already zoomed to another level
if self._zoom != zoom:
break
# Don't get tile if it falls off the grid
x_tile = int(x_offset // size) + x
y_tile = int(y_offset // size) + y
z_tile = zoom + z
if (x_tile < 0 or x_tile >= n or y_tile < 0 or y_tile >= n or
z_tile < 0 or z_tile > 20):
continue
# noinspection PyBroadException
try:
# Don't bother rendering if the tile is not visible
top = y_tile * size - y_offset + self._screen.height // 2
left = (x_tile * size - x_offset + self._screen.width // 4) * 2
if z == 0 and (left > self._screen.width or left + self._size * 2 < 0 or
top > self._screen.height or top + self._size < 0):
continue
if satellite:
self._get_satellite_tile(x_tile, y_tile, z_tile)
else:
self._get_vector_tile(x_tile, y_tile, z_tile)
# pylint: disable=broad-except
except Exception:
self._oops = "{} - tile loc: {} {} {}".format(
traceback.format_exc(), x_tile, y_tile, z_tile)
# Generally refresh screen after we've downloaded everything
self._screen.force_update()
def _get_features(self):
"""Decide which layers to render based on current zoom level and view type."""
if self._satellite:
return [("water", [], [])]
elif self._zoom <= 2:
return [
("water", [], []),
("marine_label", [], [1]),
]
elif self._zoom <= 7:
return [
("admin", [], []),
("water", [], []),
("road", ["motorway"], []),
("country_label", [], []),
("marine_label", [], [1]),
("state_label", [], []),
("place_label", [], ["city", "town"]),
]
elif self._zoom <= 10:
return [
("admin", [], []),
("water", [], []),
("road", ["motorway", "motorway_link", "trunk"], []),
("country_label", [], []),
("marine_label", [], [1]),
("state_label", [], []),
("place_label", [], ["city", "town"]),
]
else:
return [
("landuse", ["agriculture", "grass", "park"], []),
("water", [], []),
("waterway", ["river", "canal"], []),
("building", [], []),
("road",
["motorway", "motorway_link", "trunk", "primary", "secondary"]
if self._zoom <= 14 else
["motorway", "motorway_link", "trunk", "primary", "secondary", "tertiary",
"link", "street", "tunnel"],
[]),
("poi_label", [], []),
]
def _draw_lines_internal(self, coords, colour, bg):
"""Helper to draw lines connecting a set of nodes that are scaled for the Screen."""
for i, (x, y) in enumerate(coords):
if i == 0:
self._screen.move(x, y)
else:
self._screen.draw(x, y, colour=colour, bg=bg, thin=True)
def _draw_polygons(self, feature, bg, colour, extent, polygons, xo, yo):
"""Draw a set of polygons from a vector tile."""
coords = []
for polygon in polygons:
coords.append([self._scale_coords(x, y, extent, xo, yo) for x, y in polygon])
# Polygons are expensive to draw and the buildings layer is huge - so we convert to
# lines in order to process updates fast enough to animate.
if "type" in feature["properties"] and "building" in feature["properties"]["type"]:
for line in coords:
self._draw_lines_internal(line, colour, bg)
else:
self._screen.fill_polygon(coords, colour=colour, bg=bg)
def _draw_lines(self, bg, colour, extent, line, xo, yo):
"""Draw a set of lines from a vector tile."""
coords = [self._scale_coords(x, y, extent, xo, yo) for x, y in line]
self._draw_lines_internal(coords, colour, bg)
def _draw_feature(self, feature, extent, colour, bg, xo, yo):
"""Draw a single feature from a layer in a vector tile."""
geometry = feature["geometry"]
if geometry["type"] == "Polygon":
self._draw_polygons(feature, bg, colour, extent, geometry["coordinates"], xo, yo)
elif feature["geometry"]["type"] == "MultiPolygon":
for multi_polygon in geometry["coordinates"]:
self._draw_polygons(feature, bg, colour, extent, multi_polygon, xo, yo)
elif feature["geometry"]["type"] == "LineString":
self._draw_lines(bg, colour, extent, geometry["coordinates"], xo, yo)
elif feature["geometry"]["type"] == "MultiLineString":
for line in geometry["coordinates"]:
self._draw_lines(bg, colour, extent, line, xo, yo)
elif feature["geometry"]["type"] == "Point":
x, y = self._scale_coords(
geometry["coordinates"][0], geometry["coordinates"][1], extent, xo, yo)
text = u" {} ".format(feature["properties"]["name_en"])
self._screen.print_at(text, int(x - len(text) / 2), int(y), colour=colour, bg=bg)
def _draw_tile_layer(self, tile, layer_name, c_filters, colour, t_filters, x, y, bg):
"""Draw the visible geometry in the specified map tile."""
# Don't bother rendering if the tile is not visible
left = (x + self._screen.width // 4) * 2
top = y + self._screen.height // 2
if (left > self._screen.width or left + self._size * 2 < 0 or
top > self._screen.height or top + self._size < 0):
return 0
# Not all layers are available in every tile.
try:
_layer = tile[layer_name]
_extent = float(_layer["extent"])
except KeyError:
return 0
for _feature in _layer["features"]:
try:
if c_filters and _feature["properties"]["class"] not in c_filters:
continue
if (t_filters and _feature["type"] not in t_filters and
_feature["properties"]["type"] not in t_filters):
continue
self._draw_feature(
_feature, _extent, colour, bg,
(x + self._screen.width // 4) * 2, y + self._screen.height // 2)
except KeyError:
pass
return 1
def _draw_satellite_tile(self, tile, x, y):
"""Draw a satellite image tile to screen."""
image, colours = tile.rendered_text
for (i, line) in enumerate(image):
self._screen.paint(line, x, y + i, colour_map=colours[i])
return 1
def _draw_tiles(self, x_offset, y_offset, bg):
"""Render all visible tiles a layer at a time."""
count = 0
for layer_name, c_filters, t_filters in self._get_features():
colour = (self._256_PALETTE[layer_name]
if self._screen.colours >= 256 else self._16_PALETTE[layer_name])
for x, y, z, tile, satellite in sorted(self._tiles.values(), key=lambda k: k[0]):
# Don't draw the wrong type or zoom of tile.
if satellite != self._satellite or z != self._zoom:
continue
# Convert tile location into pixels and draw the tile.
x *= self._size
y *= self._size
if satellite:
count += self._draw_satellite_tile(
tile,
int((x-x_offset + self._screen.width // 4) * 2),
int(y-y_offset + self._screen.height // 2))
else:
count += self._draw_tile_layer(tile, layer_name, c_filters, colour, t_filters,
x - x_offset, y - y_offset, bg)
return count
def _zoom_map(self, zoom_out=True):
"""Animate the zoom in/out as appropriate for the displayed map tile."""
size_step = 1 / _ZOOM_STEP if zoom_out else _ZOOM_STEP
self._next_update = 1
if self._satellite:
size_step **= _ZOOM_ANIMATION_STEPS
self._size *= size_step
if self._size <= _ZOOM_OUT_SIZE:
if self._zoom > 0:
self._zoom -= 1
self._size = _START_SIZE
else:
self._size = _ZOOM_OUT_SIZE
elif self._size >= _ZOOM_IN_SIZE:
if self._zoom < 20:
self._zoom += 1
self._size = _START_SIZE
else:
self._size = _ZOOM_IN_SIZE
def _move_to_desired_location(self):
"""Animate movement to desired location on map."""
self._next_update = 100000
x_start = self._convert_longitude(self._longitude)
y_start = self._convert_latitude(self._latitude)
x_end = self._convert_longitude(self._desired_longitude)
y_end = self._convert_latitude(self._desired_latitude)
if sqrt((x_end - x_start) ** 2 + (y_end - y_start) ** 2) > _START_SIZE // 4:
self._zoom_map(True)
elif self._zoom != self._desired_zoom:
self._zoom_map(self._desired_zoom < self._zoom)
if self._longitude != self._desired_longitude:
self._next_update = 1
if self._desired_longitude < self._longitude:
self._longitude = max(self._longitude - 360 / 2 ** self._zoom / self._size * 2,
self._desired_longitude)
else:
self._longitude = min(self._longitude + 360 / 2 ** self._zoom / self._size * 2,
self._desired_longitude)
if self._latitude != self._desired_latitude:
self._next_update = 1
if self._desired_latitude < self._latitude:
self._latitude = max(self._inc_lat(self._latitude, 2), self._desired_latitude)
else:
self._latitude = min(self._inc_lat(self._latitude, -2), self._desired_latitude)
if self._next_update == 1:
self._updated.set()
def _update(self, frame_no):
"""Draw the latest set of tiles to the Screen."""
# Check for any fatal errors from the background thread and quit if we hit anything.
if self._oops:
raise RuntimeError(self._oops)
# Calculate new positions for animated movement.
self._move_to_desired_location()
# Re-draw the tiles - if we have any suitable ones downloaded.
count = 0
x_offset = self._convert_longitude(self._longitude)
y_offset = self._convert_latitude(self._latitude)
if self._tiles:
# Clear the area first.
bg = 253 if self._screen.unicode_aware and self._screen.colours >= 256 else 0
for y in range(self._screen.height):
self._screen.print_at("." * self._screen.width, 0, y, colour=bg, bg=bg)
# Now draw all the available tiles.
count = self._draw_tiles(x_offset, y_offset, bg)
# Just a few pointers on what the user should do...
if count == 0:
self._screen.centre(" Loading - please wait... ", self._screen.height // 2, 1)
self._screen.centre("Press '?' for help.", 0, 1)
if _KEY == "":
footer = "Using local cached data - go to https://www.mapbox.com/ and get a free key."
else:
footer = u"Zoom: {} Location: {:.6}, {:.6} Maps: © Mapbox, © OpenStreetMap".format(
self._zoom, self._longitude, self._latitude)
self._screen.centre(footer, self._screen.height - 1, 1)
return count
def process_event(self, event):
"""User input for the main map view."""
if isinstance(event, KeyboardEvent):
if event.key_code in [Screen.ctrl("m"), Screen.ctrl("j")]:
self._scene.add_effect(
EnterLocation(
self._screen, self._longitude, self._latitude, self._on_new_location))
elif event.key_code in [ord('q'), ord('Q'), Screen.ctrl("c")]:
raise StopApplication("User quit")
elif event.key_code in [ord('t'), ord('T')]:
self._satellite = not self._satellite
if self._satellite:
self._size = _START_SIZE
elif event.key_code == ord("?"):
self._scene.add_effect(PopUpDialog(self._screen, _HELP, ["OK"]))
elif event.key_code == ord("+") and self._zoom <= 20:
if self._desired_zoom < 20:
self._desired_zoom += 1
elif event.key_code == ord("-") and self._zoom >= 0:
if self._desired_zoom > 0:
self._desired_zoom -= 1
elif event.key_code == ord("0"):
self._desired_zoom = 0
elif event.key_code == ord("9"):
self._desired_zoom = 20
elif event.key_code == Screen.KEY_LEFT:
self._desired_longitude -= 360 / 2 ** self._zoom / self._size * 10
elif event.key_code == Screen.KEY_RIGHT:
self._desired_longitude += 360 / 2 ** self._zoom / self._size * 10
elif event.key_code == Screen.KEY_UP:
self._desired_latitude = self._inc_lat(self._desired_latitude, -self._size / 10)
elif event.key_code == Screen.KEY_DOWN:
self._desired_latitude = self._inc_lat(self._desired_latitude, self._size / 10)
else:
return
# Trigger a reload of the tiles and redraw map
self._updated.set()
self._screen.force_update()
def _on_new_location(self, form):
"""Set a new desired location entered in the pop-up form."""
self._desired_longitude = float(form.data["long"])
self._desired_latitude = float(form.data["lat"])
self._desired_zoom = 13
self._screen.force_update()
# noinspection PyUnusedLocal
# pylint: disable=unused-argument
def clone(self, new_screen, new_scene):
# On resize, there will be a new Map - kill the thread in this one.
self._running = False
self._updated.set()
@property
def frame_update_count(self):
# Only redraw if required - as determined by the update logic.
return self._next_update
@property
def stop_frame(self):
# No specific end point for this Effect. Carry on running forever.
return 0
def reset(self):
# Nothing special to do. Just need this to satisfy the ABC.
pass
def demo(screen, scene):
screen.play([Scene([Map(screen)], -1)], stop_on_resize=True, start_scene=scene)
if __name__ == "__main__":
last_scene = None
while True:
try:
Screen.wrapper(demo, catch_interrupt=False, arguments=[last_scene])
sys.exit(0)
except ResizeScreenError as e:
last_scene = e.scene
```
#### File: asciimatics/tests/test_exceptions.py
```python
import unittest
from asciimatics.exceptions import ResizeScreenError, StopApplication
from asciimatics.scene import Scene
from tests.mock_objects import MockEffect
class TestExceptions(unittest.TestCase):
def test_resize(self):
"""
Check that we can create a ResizeScreenError
"""
scene = Scene([MockEffect()])
message = "Test message"
error = ResizeScreenError(message, scene)
self.assertEqual(error.scene, scene)
self.assertEqual(str(error), message)
def test_stop_app(self):
"""
Check that we can create a StopApplication.
"""
message = "Test message"
error = StopApplication(message)
self.assertEqual(str(error), message)
if __name__ == '__main__':
unittest.main()
```
#### File: asciimatics/tests/test_parsers.py
```python
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from asciimatics.parsers import AsciimaticsParser, AnsiTerminalParser, ControlCodeParser, Parser
import asciimatics.constants as constants
class TestParsers(unittest.TestCase):
def test_controlcode_parser(self):
"""
Check ControlCodeParser works as expected
"""
parser = ControlCodeParser()
parser.reset("\0\b\ra[", None)
tokens = parser.parse()
self.assertEquals(next(tokens), (0, Parser.DISPLAY_TEXT, "^@"))
self.assertEquals(next(tokens), (1, Parser.DISPLAY_TEXT, "^H"))
self.assertEquals(next(tokens), (2, Parser.DISPLAY_TEXT, "^M"))
self.assertEquals(next(tokens), (3, Parser.DISPLAY_TEXT, "a"))
self.assertEquals(next(tokens), (4, Parser.DISPLAY_TEXT, "["))
def test_asciimatics_parser(self):
"""
Check AsciimaticsParser works as expected.
"""
parser = AsciimaticsParser()
parser.reset("a${1}b${2,1}c${3,2,4}de${7}", None)
tokens = parser.parse()
self.assertEquals(next(tokens), (0, Parser.DISPLAY_TEXT, "a"))
self.assertEquals(next(tokens), (1, Parser.CHANGE_COLOURS, (1, 0, None)))
self.assertEquals(next(tokens), (1, Parser.DISPLAY_TEXT, "b"))
self.assertEquals(next(tokens), (6, Parser.CHANGE_COLOURS, (2, 1, None)))
self.assertEquals(next(tokens), (6, Parser.DISPLAY_TEXT, "c"))
self.assertEquals(next(tokens), (13, Parser.CHANGE_COLOURS, (3, 2, 4)))
self.assertEquals(next(tokens), (13, Parser.DISPLAY_TEXT, "d"))
self.assertEquals(next(tokens), (22, Parser.DISPLAY_TEXT, "e"))
self.assertEquals(next(tokens), (23, Parser.CHANGE_COLOURS, (7, 0, None)))
with self.assertRaises(StopIteration):
next(tokens)
def test_ansi_terminal_parser_colours(self):
"""
Check AnsiTerminalParser basic colours work as expected.
"""
parser = AnsiTerminalParser()
parser.reset("a\x1B[23ab\x1B[0mc\x1B[1md\x1B[2me\x1B[7mf\x1B[27mg\x1B[31;42mh\x1B[m", None)
tokens = parser.parse()
# Normal text
self.assertEquals(next(tokens), (0, Parser.DISPLAY_TEXT, "a"))
# Unknown escape code
self.assertEquals(next(tokens), (1, Parser.DISPLAY_TEXT, "b"))
# Reset
self.assertEquals(next(tokens), (7, Parser.CHANGE_COLOURS, (7, constants.A_NORMAL, 0)))
self.assertEquals(next(tokens), (7, Parser.DISPLAY_TEXT, "c"))
# Bold
self.assertEquals(next(tokens), (12, Parser.CHANGE_COLOURS, (7, constants.A_BOLD, 0)))
self.assertEquals(next(tokens), (12, Parser.DISPLAY_TEXT, "d"))
# Normal
self.assertEquals(next(tokens), (17, Parser.CHANGE_COLOURS, (7, constants.A_NORMAL, 0)))
self.assertEquals(next(tokens), (17, Parser.DISPLAY_TEXT, "e"))
# Inverse
self.assertEquals(next(tokens), (22, Parser.CHANGE_COLOURS, (7, constants.A_REVERSE, 0)))
self.assertEquals(next(tokens), (22, Parser.DISPLAY_TEXT, "f"))
# Unset inverse
self.assertEquals(next(tokens), (27, Parser.CHANGE_COLOURS, (7, constants.A_NORMAL, 0)))
self.assertEquals(next(tokens), (27, Parser.DISPLAY_TEXT, "g"))
# Standard colours, using multiple parameters
self.assertEquals(next(tokens), (33, Parser.CHANGE_COLOURS, (constants.COLOUR_RED, constants.A_NORMAL, constants.COLOUR_GREEN)))
self.assertEquals(next(tokens), (33, Parser.DISPLAY_TEXT, "h"))
# Final escape sequence with no visible text is returned with no text.
self.assertEquals(next(tokens), (42, Parser.CHANGE_COLOURS, (constants.COLOUR_WHITE, constants.A_NORMAL, constants.COLOUR_BLACK)))
with self.assertRaises(StopIteration):
next(tokens)
def test_ansi_terminal_parser_palette(self):
"""
Check AnsiTerminalParser colour palettes work as expected.
"""
parser = AnsiTerminalParser()
parser.reset(
"\x1B[38;1ma\x1B[38;5;17mb\x1B[48;2;1;2;3mc\x1B[48;5;54md\x1B[999me\x1B[93m\x1B[104m", None)
tokens = parser.parse()
# Bad colour scheme - ignore
self.assertEquals(next(tokens), (0, Parser.DISPLAY_TEXT, "a"))
# Standard colour palette
self.assertEquals(next(tokens), (8, Parser.CHANGE_COLOURS, (17, None, None)))
self.assertEquals(next(tokens), (8, Parser.DISPLAY_TEXT, "b"))
# RGB colour scheme - ignore
self.assertEquals(next(tokens), (19, Parser.DISPLAY_TEXT, "c"))
# Standard colour palette
self.assertEquals(next(tokens), (33, Parser.CHANGE_COLOURS, (17, None, 54)))
self.assertEquals(next(tokens), (33, Parser.DISPLAY_TEXT, "d"))
# Unknown parameter
self.assertEquals(next(tokens), (44, Parser.DISPLAY_TEXT, "e"))
# Intense colour palette
self.assertEquals(next(tokens), (51, Parser.CHANGE_COLOURS, (11, None, 54)))
self.assertEquals(next(tokens), (51, Parser.CHANGE_COLOURS, (11, None, 12)))
def test_ansi_terminal_parser_cursor(self):
"""
Check AnsiTerminalParser cursor movement work as expected.
"""
parser = AnsiTerminalParser()
parser.reset("aa\x08b\rc\x1B[Cdd\x1B[De\x1B[A\x1B[B\x1B[1;2H\x1B[?25h\x1B[?25l\r", None)
tokens = parser.parse()
# Normal text...
self.assertEquals(next(tokens), (0, Parser.DISPLAY_TEXT, "a"))
self.assertEquals(next(tokens), (1, Parser.DISPLAY_TEXT, "a"))
# Backspace and overwrite.
self.assertEquals(next(tokens), (2, Parser.MOVE_RELATIVE, (-1, 0)))
self.assertEquals(next(tokens), (2, Parser.DISPLAY_TEXT, "b"))
# Carriage return and overwrite
self.assertEquals(next(tokens), (4, Parser.MOVE_ABSOLUTE, (0, None)))
self.assertEquals(next(tokens), (4, Parser.DISPLAY_TEXT, "c"))
# Move cursor forwards and append.
self.assertEquals(next(tokens), (6, Parser.MOVE_RELATIVE, (1, 0)))
self.assertEquals(next(tokens), (6, Parser.DISPLAY_TEXT, "d"))
self.assertEquals(next(tokens), (10, Parser.DISPLAY_TEXT, "d"))
# Move cursor backwards and overwrite.
self.assertEquals(next(tokens), (11, Parser.MOVE_RELATIVE, (-1, 0)))
self.assertEquals(next(tokens), (11, Parser.DISPLAY_TEXT, "e"))
# Move cursor up and down.
self.assertEquals(next(tokens), (15, Parser.MOVE_RELATIVE, (0, -1)))
self.assertEquals(next(tokens), (15, Parser.MOVE_RELATIVE, (0, 1)))
# Move cursor to location
self.assertEquals(next(tokens), (15, Parser.MOVE_ABSOLUTE, (1, 0)))
# Show/hide cursor
self.assertEquals(next(tokens), (15, Parser.SHOW_CURSOR, True))
self.assertEquals(next(tokens), (15, Parser.SHOW_CURSOR, False))
# Trailing Carriage return
self.assertEquals(next(tokens), (15, Parser.MOVE_ABSOLUTE, (0, None)))
def test_ansi_terminal_parser_delete(self):
"""
Check AnsiTerminalParser delete operations work as expected.
"""
parser = AnsiTerminalParser()
# Delete to end of line
parser.reset("abcde\x08\x08\x08\x1B[K", None)
tokens = parser.parse()
self.assertEquals(next(tokens), (0, Parser.DISPLAY_TEXT, "a"))
self.assertEquals(next(tokens), (1, Parser.DISPLAY_TEXT, "b"))
self.assertEquals(next(tokens), (2, Parser.DISPLAY_TEXT, "c"))
self.assertEquals(next(tokens), (3, Parser.DISPLAY_TEXT, "d"))
self.assertEquals(next(tokens), (4, Parser.DISPLAY_TEXT, "e"))
self.assertEquals(next(tokens), (5, Parser.MOVE_RELATIVE, (-1, 0)))
self.assertEquals(next(tokens), (5, Parser.MOVE_RELATIVE, (-1, 0)))
self.assertEquals(next(tokens), (5, Parser.MOVE_RELATIVE, (-1, 0)))
self.assertEquals(next(tokens), (5, Parser.DELETE_LINE, 0))
with self.assertRaises(StopIteration):
next(tokens)
# Delete to start of line
parser.reset("abcde\x1B[1K", None)
tokens = parser.parse()
self.assertEquals(next(tokens), (0, Parser.DISPLAY_TEXT, "a"))
self.assertEquals(next(tokens), (1, Parser.DISPLAY_TEXT, "b"))
self.assertEquals(next(tokens), (2, Parser.DISPLAY_TEXT, "c"))
self.assertEquals(next(tokens), (3, Parser.DISPLAY_TEXT, "d"))
self.assertEquals(next(tokens), (4, Parser.DISPLAY_TEXT, "e"))
self.assertEquals(next(tokens), (5, Parser.DELETE_LINE, 1))
with self.assertRaises(StopIteration):
next(tokens)
# Delete line
parser.reset("abcde\x1B[2K", None)
tokens = parser.parse()
self.assertEquals(next(tokens), (0, Parser.DISPLAY_TEXT, "a"))
self.assertEquals(next(tokens), (1, Parser.DISPLAY_TEXT, "b"))
self.assertEquals(next(tokens), (2, Parser.DISPLAY_TEXT, "c"))
self.assertEquals(next(tokens), (3, Parser.DISPLAY_TEXT, "d"))
self.assertEquals(next(tokens), (4, Parser.DISPLAY_TEXT, "e"))
self.assertEquals(next(tokens), (5, Parser.DELETE_LINE, 2))
with self.assertRaises(StopIteration):
next(tokens)
# Delete char
parser.reset("abcde\x08\x08\x08\x1B[P", None)
tokens = parser.parse()
self.assertEquals(next(tokens), (0, Parser.DISPLAY_TEXT, "a"))
self.assertEquals(next(tokens), (1, Parser.DISPLAY_TEXT, "b"))
self.assertEquals(next(tokens), (2, Parser.DISPLAY_TEXT, "c"))
self.assertEquals(next(tokens), (3, Parser.DISPLAY_TEXT, "d"))
self.assertEquals(next(tokens), (4, Parser.DISPLAY_TEXT, "e"))
self.assertEquals(next(tokens), (5, Parser.MOVE_RELATIVE, (-1, 0)))
self.assertEquals(next(tokens), (5, Parser.MOVE_RELATIVE, (-1, 0)))
self.assertEquals(next(tokens), (5, Parser.MOVE_RELATIVE, (-1, 0)))
self.assertEquals(next(tokens), (5, Parser.DELETE_CHARS, 1))
with self.assertRaises(StopIteration):
next(tokens)
def test_ansi_terminal_parser_errors(self):
"""
Check AnsiTerminalParser handles unsupported encodings gracefully.
"""
parser = AnsiTerminalParser()
parser.reset("a\x1BZb\x07c", None)
tokens = parser.parse()
# Ignore unknown escape and next letter
self.assertEquals(next(tokens), (0, Parser.DISPLAY_TEXT, "a"))
self.assertEquals(next(tokens), (1, Parser.DISPLAY_TEXT, "b"))
# Ignore unknown control char
self.assertEquals(next(tokens), (4, Parser.DISPLAY_TEXT, "c"))
def test_ansi_terminal_parser_tab(self):
"""
Check AnsiTerminalParser handles tabs.
"""
parser = AnsiTerminalParser()
parser.reset("\x09", None)
tokens = parser.parse()
self.assertEquals(next(tokens), (0, Parser.NEXT_TAB, None))
def test_ansi_terminal_parser_clear(self):
"""
Check AnsiTerminalParser clears screen.
"""
parser = AnsiTerminalParser()
parser.reset("\x1B[2J", None)
tokens = parser.parse()
self.assertEquals(next(tokens), (0, Parser.CLEAR_SCREEN, None))
def test_ansi_terminal_parser_os_cmd(self):
"""
Check AnsiTerminalParser removes OS commands.
"""
parser = AnsiTerminalParser()
parser.reset("a\x1B]do something;stuff:to^ignore\x07b", None)
tokens = parser.parse()
self.assertEquals(next(tokens), (0, Parser.DISPLAY_TEXT, "a"))
self.assertEquals(next(tokens), (1, Parser.DISPLAY_TEXT, "b"))
```
#### File: asciimatics/tests/test_particles.py
```python
import unittest
from mock.mock import MagicMock
from asciimatics.particles import ShootScreen, DropScreen, Explosion, Rain, \
StarFirework, PalmFirework, RingFirework, SerpentFirework
from asciimatics.screen import Screen, Canvas
class TestParticles(unittest.TestCase):
def check_effect(self, canvas, effect, assert_fn,
is_blank=True, iterations=40, warm_up=0):
"""
Basic checks for all effects. Since they are all randomised to a
certain extent, just check the overall content for expected values.
"""
# Asciimatics always calls reset on entering a new Scene.
effect.reset()
# Check canvas is in desired starting state.
if is_blank:
for x in range(canvas.width):
for y in range(canvas.height):
self.assertEqual(canvas.get_from(x, y), (32, 7, 0, 0))
# Set up blank my_buffer - OK this should copy the existing screen, but
# we just lose one iteration on the checks (when not really blank).
my_buffer = [[(32, 7, 0, 0) for _ in range(40)] for _ in range(10)]
# Re-draw comparing the my_buffer with what's on the canvas
for i in range(iterations):
effect.update(i)
changed = False
if i >= warm_up:
view = ""
for y in range(canvas.height):
for x in range(canvas.width):
value = canvas.get_from(x, y)
assert_fn(value)
if value != my_buffer[y][x]:
changed = True
my_buffer[y][x] = value
view += chr(value[0])
view += "\n"
self.assertTrue(changed, "failed at step %d %s" % (i, view))
# Check there is no stop frame by default.
self.assertEqual(effect.stop_frame, 0)
def test_shoot_screen(self):
"""
Test that ShootScreen works as expected.
"""
screen = MagicMock(spec=Screen, colours=8)
canvas = Canvas(screen, 10, 40, 0, 0)
canvas.centre("Hello World!", 5)
effect = ShootScreen(canvas, canvas.width // 2, canvas.height // 2, 100)
self.check_effect(canvas,
effect,
lambda value: self.assertIn(chr(value[0]),
'HeloWrd! '),
is_blank=False,
iterations=4)
def test_drop_screen(self):
"""
Test that DropScreen works as expected.
"""
screen = MagicMock(spec=Screen, colours=8)
canvas = Canvas(screen, 10, 40, 0, 0)
canvas.centre("Hello World!", 0)
effect = DropScreen(canvas, 100)
self.check_effect(canvas,
effect,
lambda value: self.assertIn(chr(value[0]),
'HeloWrd! '),
is_blank=False,
warm_up=3,
iterations=10)
def test_explosion(self):
"""
Test that Explosion works as expected.
"""
screen = MagicMock(spec=Screen, colours=8)
canvas = Canvas(screen, 10, 40, 0, 0)
effect = Explosion(canvas, 4, 4, 25)
self.check_effect(canvas,
effect,
lambda value: self.assertIn(chr(value[0]), ' #'),
iterations=25)
def test_rain(self):
"""
Test that Rain works as expected.
"""
screen = MagicMock(spec=Screen, colours=8)
canvas = Canvas(screen, 10, 40, 0, 0)
effect = Rain(canvas, 200)
self.check_effect(canvas,
effect,
lambda value: self.assertIn(chr(value[0]), ' `\\v'))
def test_star_firework(self):
"""
Test that StarFirework works as expected.
"""
screen = MagicMock(spec=Screen, colours=8)
canvas = Canvas(screen, 10, 40, 0, 0)
effect = StarFirework(canvas, 4, 4, 25)
self.check_effect(canvas,
effect,
lambda value: self.assertIn(chr(value[0]), '|+:,. '),
iterations=25)
def test_palm_firework(self):
"""
Test that PalmFirework works as expected.
"""
screen = MagicMock(spec=Screen, colours=8)
canvas = Canvas(screen, 10, 40, 0, 0)
effect = PalmFirework(canvas, 4, 4, 25)
self.check_effect(canvas,
effect,
lambda value: self.assertIn(chr(value[0]), '|*+:,. '),
iterations=26)
def test_ring_firework(self):
"""
Test that RingFirework works as expected.
"""
screen = MagicMock(spec=Screen, colours=8)
canvas = Canvas(screen, 10, 40, 0, 0)
effect = RingFirework(canvas, 4, 4, 25)
self.check_effect(canvas,
effect,
lambda value: self.assertIn(chr(value[0]), '|*:. '),
iterations=15)
def test_serpent_firework(self):
"""
Test that SerpentFirework works as expected.
"""
screen = MagicMock(spec=Screen, colours=8)
canvas = Canvas(screen, 10, 40, 0, 0)
effect = SerpentFirework(canvas, 4, 4, 25)
self.check_effect(canvas,
effect,
lambda value: self.assertIn(chr(value[0]), '|+- '),
iterations=20)
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jptomo/boilerplate-setup.py",
"score": 2
} |
#### File: boilerplate-setup.py/{{cookiecutter.module_name}}/setup.py
```python
import sys
import os
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
def _read(fname):
here = os.path.dirname(os.path.abspath(__file__))
return open(os.path.join(here, fname)).read()
class PyTest(TestCommand):
user_options = [("pytest-args=", "a", "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
if len(sys.argv) == 2:
self.pytest_args = ["{{ cookiecutter.module_name }}"]
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
sys.exit(pytest.main(self.pytest_args))
setup(
name="{{ cookiecutter.package_name }}",
version=__import__("{{ cookiecutter.module_name }}").__version__,
author="{{ cookiecutter.author }}",
author_email="{{ cookiecutter.author_email }}",
url="{{ cookiecutter.url }}",
description="",
long_description=_read("README.rst"),
packages=find_packages(),
# install_requires=[""],
tests_require=["pytest", "testfixtures"],
cmdclass={"test": PyTest},
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
],
license="MIT License",
)
``` |
{
"source": "jptomo/example-django-change-settings",
"score": 2
} |
#### File: example-django-change-settings/apps/main.py
```python
from django.conf import settings
from django.conf.urls import url
from django.http import HttpResponse
def index(request):
return HttpResponse(getattr(settings, 'MESSAGE', '!!! EMPTY !!!'))
urlpatterns = url(r'^$', index),
``` |
{
"source": "jptomo/ex-zca",
"score": 2
} |
#### File: ex-zca/src/app.py
```python
from zope import component
from zope.interface import Interface
from zope.interface.interface import Method
class IOutputMessage(Interface):
output = Method(''' output message to some devices. ''')
class Greeter:
def __init__(self):
self.out = component.getUtility(IOutputMessage, 'output')
def greet(self):
self.out.output('hello, world.')
``` |
{
"source": "jptomo/pypy-lang-scheme",
"score": 2
} |
#### File: annotator/test/test_annrpython.py
```python
from __future__ import with_statement
import py.test
import sys
from rpython.conftest import option
from rpython.annotator import model as annmodel
from rpython.annotator.model import AnnotatorError, UnionError
from rpython.annotator.annrpython import RPythonAnnotator as _RPythonAnnotator
from rpython.annotator.classdef import NoSuchAttrError
from rpython.translator.translator import graphof as tgraphof
from rpython.annotator.policy import AnnotatorPolicy
from rpython.annotator.signature import Sig, SignatureError
from rpython.annotator.listdef import ListDef, ListChangeUnallowed
from rpython.annotator.dictdef import DictDef
from rpython.flowspace.model import *
from rpython.rlib.rarithmetic import r_uint, base_int, r_longlong, r_ulonglong
from rpython.rlib.rarithmetic import r_singlefloat
from rpython.rlib import objectmodel
from rpython.flowspace.flowcontext import FlowingError
from rpython.flowspace.operation import op
from rpython.translator.test import snippet
def graphof(a, func):
return tgraphof(a.translator, func)
def listitem(s_list):
assert isinstance(s_list, annmodel.SomeList)
return s_list.listdef.listitem.s_value
def somelist(s_type):
return annmodel.SomeList(ListDef(None, s_type))
def dictkey(s_dict):
assert isinstance(s_dict, annmodel.SomeDict)
return s_dict.dictdef.dictkey.s_value
def dictvalue(s_dict):
assert isinstance(s_dict, annmodel.SomeDict)
return s_dict.dictdef.dictvalue.s_value
def somedict(s_key, s_value):
return annmodel.SomeDict(DictDef(None, s_key, s_value))
class TestAnnotateTestCase:
def teardown_method(self, meth):
assert annmodel.s_Bool == annmodel.SomeBool()
class RPythonAnnotator(_RPythonAnnotator):
def build_types(self, *args):
s = _RPythonAnnotator.build_types(self, *args)
self.validate()
if option.view:
self.translator.view()
return s
def test_simple_func(self):
"""
one test source:
def f(x):
return x+1
"""
x = Variable("x")
oper = op.add(x, Constant(1))
block = Block([x])
fun = FunctionGraph("f", block)
block.operations.append(oper)
block.closeblock(Link([oper.result], fun.returnblock))
a = self.RPythonAnnotator()
a.addpendingblock(fun, fun.startblock, [annmodel.SomeInteger()])
a.complete()
assert a.gettype(fun.getreturnvar()) == int
def test_while(self):
"""
one test source:
def f(i):
while i > 0:
i = i - 1
return i
"""
i1 = Variable("i1")
i2 = Variable("i2")
conditionop = op.gt(i1, Constant(0))
decop = op.add(i2, Constant(-1))
headerblock = Block([i1])
whileblock = Block([i2])
fun = FunctionGraph("f", headerblock)
headerblock.operations.append(conditionop)
headerblock.exitswitch = conditionop.result
headerblock.closeblock(Link([i1], fun.returnblock, False),
Link([i1], whileblock, True))
whileblock.operations.append(decop)
whileblock.closeblock(Link([decop.result], headerblock))
a = self.RPythonAnnotator()
a.addpendingblock(fun, fun.startblock, [annmodel.SomeInteger()])
a.complete()
assert a.gettype(fun.getreturnvar()) == int
def test_while_sum(self):
"""
one test source:
def f(i):
sum = 0
while i > 0:
sum = sum + i
i = i - 1
return sum
"""
i1 = Variable("i1")
i2 = Variable("i2")
i3 = Variable("i3")
sum2 = Variable("sum2")
sum3 = Variable("sum3")
conditionop = op.gt(i2, Constant(0))
decop = op.add(i3, Constant(-1))
addop = op.add(i3, sum3)
startblock = Block([i1])
headerblock = Block([i2, sum2])
whileblock = Block([i3, sum3])
fun = FunctionGraph("f", startblock)
startblock.closeblock(Link([i1, Constant(0)], headerblock))
headerblock.operations.append(conditionop)
headerblock.exitswitch = conditionop.result
headerblock.closeblock(Link([sum2], fun.returnblock, False),
Link([i2, sum2], whileblock, True))
whileblock.operations.append(addop)
whileblock.operations.append(decop)
whileblock.closeblock(Link([decop.result, addop.result], headerblock))
a = self.RPythonAnnotator()
a.addpendingblock(fun, fun.startblock, [annmodel.SomeInteger()])
a.complete()
assert a.gettype(fun.getreturnvar()) == int
def test_f_calls_g(self):
a = self.RPythonAnnotator()
s = a.build_types(f_calls_g, [int])
# result should be an integer
assert s.knowntype == int
def test_lists(self):
a = self.RPythonAnnotator()
end_cell = a.build_types(snippet.poor_man_rev_range, [int])
# result should be a list of integers
assert listitem(end_cell).knowntype == int
def test_factorial(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.factorial, [int])
# result should be an integer
assert s.knowntype == int
def test_factorial2(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.factorial2, [int])
# result should be an integer
assert s.knowntype == int
def test_build_instance(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.build_instance, [])
# result should be a snippet.C instance
assert isinstance(s, annmodel.SomeInstance)
assert s.classdef == a.bookkeeper.getuniqueclassdef(snippet.C)
def test_set_attr(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.set_attr, [])
# result should be an integer
assert s.knowntype == int
def test_merge_setattr(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.merge_setattr, [int])
# result should be an integer
assert s.knowntype == int
def test_inheritance1(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.inheritance1, [])
# result should be exactly:
assert s == annmodel.SomeTuple([
a.bookkeeper.immutablevalue(()),
annmodel.SomeInteger()
])
def test_poor_man_range(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.poor_man_range, [int])
# result should be a list of integers
assert listitem(s).knowntype == int
def test_staticmethod(self):
class X(object):
@staticmethod
def stat(value):
return value + 4
def f(v):
return X().stat(v)
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeInteger)
def test_classmethod(self):
class X(object):
@classmethod
def meth(cls):
return None
def f():
return X().meth()
a = self.RPythonAnnotator()
py.test.raises(AnnotatorError, a.build_types, f, [])
def test_methodcall1(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet._methodcall1, [int])
# result should be a tuple of (C, positive_int)
assert s.knowntype == tuple
assert len(s.items) == 2
s0 = s.items[0]
assert isinstance(s0, annmodel.SomeInstance)
assert s0.classdef == a.bookkeeper.getuniqueclassdef(snippet.C)
assert s.items[1].knowntype == int
assert s.items[1].nonneg == True
def test_classes_methodcall1(self):
a = self.RPythonAnnotator()
a.build_types(snippet._methodcall1, [int])
# the user classes should have the following attributes:
getcdef = a.bookkeeper.getuniqueclassdef
assert getcdef(snippet.F).attrs.keys() == ['m']
assert getcdef(snippet.G).attrs.keys() == ['m2']
assert getcdef(snippet.H).attrs.keys() == ['attr']
assert getcdef(snippet.H).about_attribute('attr') == (
a.bookkeeper.immutablevalue(1))
def test_generaldict(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.generaldict, [str, int, str, int])
# result should be an integer
assert s.knowntype == int
def test_somebug1(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet._somebug1, [int])
# result should be a built-in method
assert isinstance(s, annmodel.SomeBuiltin)
def test_with_init(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.with_init, [int])
# result should be an integer
assert s.knowntype == int
def test_with_more_init(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.with_more_init, [int, bool])
# the user classes should have the following attributes:
getcdef = a.bookkeeper.getuniqueclassdef
# XXX on which class should the attribute 'a' appear? We only
# ever flow WithInit.__init__ with a self which is an instance
# of WithMoreInit, so currently it appears on WithMoreInit.
assert getcdef(snippet.WithMoreInit).about_attribute('a') == (
annmodel.SomeInteger())
assert getcdef(snippet.WithMoreInit).about_attribute('b') == (
annmodel.SomeBool())
def test_global_instance(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.global_instance, [])
# currently this returns the constant 42.
# XXX not sure this is the best behavior...
assert s == a.bookkeeper.immutablevalue(42)
def test_call_five(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.call_five, [])
# returns should be a list of constants (= 5)
assert listitem(s) == a.bookkeeper.immutablevalue(5)
def test_call_five_six(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.call_five_six, [])
# returns should be a list of positive integers
assert listitem(s) == annmodel.SomeInteger(nonneg=True)
def test_constant_result(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.constant_result, [])
#a.translator.simplify()
# must return "yadda"
assert s == a.bookkeeper.immutablevalue("yadda")
graphs = a.translator.graphs
assert len(graphs) == 2
assert graphs[0].func is snippet.constant_result
assert graphs[1].func is snippet.forty_two
a.simplify()
#a.translator.view()
def test_flow_type_info(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.flow_type_info, [int])
a.simplify()
assert s.knowntype == int
a = self.RPythonAnnotator()
s = a.build_types(snippet.flow_type_info, [str])
a.simplify()
assert s.knowntype == int
def test_flow_type_info_2(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.flow_type_info,
[annmodel.SomeInteger(nonneg=True)])
# this checks that isinstance(i, int) didn't lose the
# actually more precise information that i is non-negative
assert s == annmodel.SomeInteger(nonneg=True)
def test_flow_usertype_info(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.flow_usertype_info, [snippet.WithInit])
#a.translator.view()
assert isinstance(s, annmodel.SomeInstance)
assert s.classdef == a.bookkeeper.getuniqueclassdef(snippet.WithInit)
def test_flow_usertype_info2(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.flow_usertype_info, [snippet.WithMoreInit])
#a.translator.view()
assert isinstance(s, annmodel.SomeInstance)
assert s.classdef == a.bookkeeper.getuniqueclassdef(snippet.WithMoreInit)
def test_mergefunctions(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.mergefunctions, [int])
# the test is mostly that the above line hasn't blown up
# but let's at least check *something*
assert isinstance(s, annmodel.SomePBC)
def test_func_calls_func_which_just_raises(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.funccallsex, [])
# the test is mostly that the above line hasn't blown up
# but let's at least check *something*
#self.assert_(isinstance(s, SomeCallable))
def test_tuple_unpack_from_const_tuple_with_different_types(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.func_arg_unpack, [])
assert isinstance(s, annmodel.SomeInteger)
assert s.const == 3
def test_star_unpack_list(self):
def g():
pass
def f(l):
return g(*l)
a = self.RPythonAnnotator()
with py.test.raises(AnnotatorError):
a.build_types(f, [[int]])
def test_star_unpack_and_keywords(self):
def g(a, b, c=0, d=0):
return a + b + c + d
def f(a, b):
return g(a, *(b,), d=5)
a = self.RPythonAnnotator()
s_result = a.build_types(f, [int, int])
assert isinstance(s_result, annmodel.SomeInteger)
def test_pbc_attr_preserved_on_instance(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.preserve_pbc_attr_on_instance, [bool])
#a.simplify()
#a.translator.view()
assert s == annmodel.SomeInteger(nonneg=True)
#self.assertEquals(s.__class__, annmodel.SomeInteger)
def test_pbc_attr_preserved_on_instance_with_slots(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.preserve_pbc_attr_on_instance_with_slots,
[bool])
assert s == annmodel.SomeInteger(nonneg=True)
def test_is_and_knowntype_data(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.is_and_knowntype, [str])
#a.simplify()
#a.translator.view()
assert s == a.bookkeeper.immutablevalue(None)
def test_isinstance_and_knowntype_data(self):
a = self.RPythonAnnotator()
x = a.bookkeeper.immutablevalue(snippet.apbc)
s = a.build_types(snippet.isinstance_and_knowntype, [x])
#a.simplify()
#a.translator.view()
assert s == x
def test_somepbc_simplify(self):
a = self.RPythonAnnotator()
# this example used to trigger an AssertionError
a.build_types(snippet.somepbc_simplify, [])
def test_builtin_methods(self):
a = self.RPythonAnnotator()
iv = a.bookkeeper.immutablevalue
# this checks that some built-in methods are really supported by
# the annotator (it doesn't check that they operate property, though)
for example, methname, s_example in [
('', 'join', annmodel.SomeString()),
([], 'append', somelist(annmodel.s_Int)),
([], 'extend', somelist(annmodel.s_Int)),
([], 'reverse', somelist(annmodel.s_Int)),
([], 'insert', somelist(annmodel.s_Int)),
([], 'pop', somelist(annmodel.s_Int)),
]:
constmeth = getattr(example, methname)
s_constmeth = iv(constmeth)
assert isinstance(s_constmeth, annmodel.SomeBuiltin)
s_meth = s_example.getattr(iv(methname))
assert isinstance(s_constmeth, annmodel.SomeBuiltin)
def test_str_join(self):
a = self.RPythonAnnotator()
def g(n):
if n:
return ["foo", "bar"]
def f(n):
g(0)
return ''.join(g(n))
s = a.build_types(f, [int])
assert s.knowntype == str
assert s.no_nul
def test_unicode_join(self):
a = self.RPythonAnnotator()
def g(n):
if n:
return [u"foo", u"bar"]
def f(n):
g(0)
return u''.join(g(n))
s = a.build_types(f, [int])
assert s.knowntype == unicode
assert s.no_nul
def test_str_split(self):
a = self.RPythonAnnotator()
def g(n):
if n:
return "test string"
def f(n):
if n:
return g(n).split(' ')
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeList)
s_item = s.listdef.listitem.s_value
assert s_item.no_nul
def test_unicode_split(self):
a = self.RPythonAnnotator()
def g(n):
if n:
return u"test string"
def f(n):
if n:
return g(n).split(u' ')
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeList)
s_item = s.listdef.listitem.s_value
assert s_item.no_nul
def test_str_split_nul(self):
def f(n):
return n.split('\0')[0]
a = self.RPythonAnnotator()
a.translator.config.translation.check_str_without_nul = True
s = a.build_types(f, [annmodel.SomeString(no_nul=False, can_be_None=False)])
assert isinstance(s, annmodel.SomeString)
assert not s.can_be_None
assert s.no_nul
def g(n):
return n.split('\0', 1)[0]
a = self.RPythonAnnotator()
a.translator.config.translation.check_str_without_nul = True
s = a.build_types(g, [annmodel.SomeString(no_nul=False, can_be_None=False)])
assert isinstance(s, annmodel.SomeString)
assert not s.can_be_None
assert not s.no_nul
def test_unicode_split_nul(self):
def f(n):
return n.split(u'\0')[0]
a = self.RPythonAnnotator()
a.translator.config.translation.check_str_without_nul = True
s = a.build_types(f, [annmodel.SomeUnicodeString(
no_nul=False, can_be_None=False)])
assert isinstance(s, annmodel.SomeUnicodeString)
assert not s.can_be_None
assert s.no_nul
def g(n):
return n.split(u'\0', 1)[0]
a = self.RPythonAnnotator()
a.translator.config.translation.check_str_without_nul = True
s = a.build_types(g, [annmodel.SomeUnicodeString(
no_nul=False, can_be_None=False)])
assert isinstance(s, annmodel.SomeUnicodeString)
assert not s.can_be_None
assert not s.no_nul
def test_str_splitlines(self):
a = self.RPythonAnnotator()
def f(a_str):
return a_str.splitlines()
s = a.build_types(f, [str])
assert isinstance(s, annmodel.SomeList)
assert s.listdef.listitem.resized
def test_str_strip(self):
a = self.RPythonAnnotator()
def f(n, a_str):
if n == 0:
return a_str.strip(' ')
elif n == 1:
return a_str.rstrip(' ')
else:
return a_str.lstrip(' ')
s = a.build_types(f, [int, annmodel.SomeString(no_nul=True)])
assert s.no_nul
def test_unicode_strip(self):
a = self.RPythonAnnotator()
def f(n, a_str):
if n == 0:
return a_str.strip(u' ')
elif n == 1:
return a_str.rstrip(u' ')
else:
return a_str.lstrip(u' ')
s = a.build_types(f, [int, annmodel.SomeUnicodeString(no_nul=True)])
assert s.no_nul
def test_str_mul(self):
a = self.RPythonAnnotator()
def f(a_str):
return a_str * 3
s = a.build_types(f, [str])
assert isinstance(s, annmodel.SomeString)
def test_str_isalpha(self):
def f(s):
return s.isalpha()
a = self.RPythonAnnotator()
s = a.build_types(f, [str])
assert isinstance(s, annmodel.SomeBool)
def test_simple_slicing(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.simple_slice, [somelist(annmodel.s_Int)])
assert isinstance(s, annmodel.SomeList)
def test_simple_iter_list(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.simple_iter, [somelist(annmodel.s_Int)])
assert isinstance(s, annmodel.SomeIterator)
def test_simple_iter_next(self):
def f(x):
i = iter(range(x))
return i.next()
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeInteger)
def test_simple_iter_dict(self):
a = self.RPythonAnnotator()
t = somedict(annmodel.SomeInteger(), annmodel.SomeInteger())
s = a.build_types(snippet.simple_iter, [t])
assert isinstance(s, annmodel.SomeIterator)
def test_simple_zip(self):
a = self.RPythonAnnotator()
x = somelist(annmodel.SomeInteger())
y = somelist(annmodel.SomeString())
s = a.build_types(snippet.simple_zip, [x,y])
assert s.knowntype == list
assert listitem(s).knowntype == tuple
assert listitem(s).items[0].knowntype == int
assert listitem(s).items[1].knowntype == str
def test_dict_copy(self):
a = self.RPythonAnnotator()
t = somedict(annmodel.SomeInteger(), annmodel.SomeInteger())
s = a.build_types(snippet.dict_copy, [t])
assert isinstance(dictkey(s), annmodel.SomeInteger)
assert isinstance(dictvalue(s), annmodel.SomeInteger)
def test_dict_update(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.dict_update, [int])
assert isinstance(dictkey(s), annmodel.SomeInteger)
assert isinstance(dictvalue(s), annmodel.SomeInteger)
def test_dict_update_2(self):
a = self.RPythonAnnotator()
def g(n):
if n:
return {3: 4}
def f(n):
g(0)
d = {}
d.update(g(n))
return d
s = a.build_types(f, [int])
assert dictkey(s).knowntype == int
def test_dict_keys(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.dict_keys, [])
assert isinstance(listitem(s), annmodel.SomeString)
def test_dict_keys2(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.dict_keys2, [])
assert type(listitem(s)) is annmodel.SomeString
def test_dict_values(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.dict_values, [])
assert isinstance(listitem(s), annmodel.SomeString)
def test_dict_values2(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.dict_values2, [])
assert type(listitem(s)) is annmodel.SomeString
def test_dict_items(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.dict_items, [])
assert isinstance(listitem(s), annmodel.SomeTuple)
s_key, s_value = listitem(s).items
assert isinstance(s_key, annmodel.SomeString)
assert isinstance(s_value, annmodel.SomeInteger)
def test_dict_setdefault(self):
a = self.RPythonAnnotator()
def f():
d = {}
d.setdefault('a', 2)
d.setdefault('a', -3)
return d
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeDict)
assert isinstance(dictkey(s), annmodel.SomeString)
assert isinstance(dictvalue(s), annmodel.SomeInteger)
assert not dictvalue(s).nonneg
def test_exception_deduction(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.exception_deduction, [])
assert isinstance(s, annmodel.SomeInstance)
assert s.classdef is a.bookkeeper.getuniqueclassdef(snippet.Exc)
def test_exception_deduction_we_are_dumb(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.exception_deduction_we_are_dumb, [])
assert isinstance(s, annmodel.SomeInstance)
assert s.classdef is a.bookkeeper.getuniqueclassdef(snippet.Exc)
def test_nested_exception_deduction(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.nested_exception_deduction, [])
assert isinstance(s, annmodel.SomeTuple)
assert isinstance(s.items[0], annmodel.SomeInstance)
assert isinstance(s.items[1], annmodel.SomeInstance)
assert s.items[0].classdef is a.bookkeeper.getuniqueclassdef(snippet.Exc)
assert s.items[1].classdef is a.bookkeeper.getuniqueclassdef(snippet.Exc2)
def test_exc_deduction_our_exc_plus_others(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.exc_deduction_our_exc_plus_others, [])
assert isinstance(s, annmodel.SomeInteger)
def test_exc_deduction_our_excs_plus_others(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.exc_deduction_our_excs_plus_others, [])
assert isinstance(s, annmodel.SomeInteger)
def test_operation_always_raising(self):
def operation_always_raising(n):
lst = []
try:
return lst[n]
except IndexError:
return 24
a = self.RPythonAnnotator()
s = a.build_types(operation_always_raising, [int])
assert s == a.bookkeeper.immutablevalue(24)
def test_propagation_of_fresh_instances_through_attrs(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.propagation_of_fresh_instances_through_attrs, [int])
assert s is not None
def test_propagation_of_fresh_instances_through_attrs_rec_0(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.make_r, [int])
Rdef = a.bookkeeper.getuniqueclassdef(snippet.R)
assert s.classdef == Rdef
assert Rdef.attrs['r'].s_value.classdef == Rdef
assert Rdef.attrs['n'].s_value.knowntype == int
assert Rdef.attrs['m'].s_value.knowntype == int
def test_propagation_of_fresh_instances_through_attrs_rec_eo(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.make_eo, [int])
assert s.classdef == a.bookkeeper.getuniqueclassdef(snippet.B)
Even_def = a.bookkeeper.getuniqueclassdef(snippet.Even)
Odd_def = a.bookkeeper.getuniqueclassdef(snippet.Odd)
assert listitem(Even_def.attrs['x'].s_value).classdef == Odd_def
assert listitem(Even_def.attrs['y'].s_value).classdef == Even_def
assert listitem(Odd_def.attrs['x'].s_value).classdef == Even_def
assert listitem(Odd_def.attrs['y'].s_value).classdef == Odd_def
def test_flow_rev_numbers(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.flow_rev_numbers, [int])
assert s.knowntype == int
assert not s.is_constant() # !
def test_methodcall_is_precise(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.methodcall_is_precise, [bool])
getcdef = a.bookkeeper.getuniqueclassdef
assert 'x' not in getcdef(snippet.CBase).attrs
assert (getcdef(snippet.CSub1).attrs['x'].s_value ==
a.bookkeeper.immutablevalue(42))
assert (getcdef(snippet.CSub2).attrs['x'].s_value ==
a.bookkeeper.immutablevalue('world'))
assert s == a.bookkeeper.immutablevalue(42)
def test_call_star_args(self):
a = self.RPythonAnnotator(policy=AnnotatorPolicy())
s = a.build_types(snippet.call_star_args, [int])
assert s.knowntype == int
def test_call_star_args_multiple(self):
a = self.RPythonAnnotator(policy=AnnotatorPolicy())
s = a.build_types(snippet.call_star_args_multiple, [int])
assert s.knowntype == int
def test_class_spec(self):
a = self.RPythonAnnotator(policy=AnnotatorPolicy())
s = a.build_types(snippet.class_spec, [])
assert s.items[0].knowntype == int
assert s.items[1].knowntype == str
def test_class_spec_confused(self):
x = snippet.PolyStk()
def f():
return x
a = self.RPythonAnnotator(policy=AnnotatorPolicy())
with py.test.raises(Exception):
a.build_types(f, [])
def test_exception_deduction_with_raise1(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.exception_deduction_with_raise1, [bool])
assert isinstance(s, annmodel.SomeInstance)
assert s.classdef is a.bookkeeper.getuniqueclassdef(snippet.Exc)
def test_exception_deduction_with_raise2(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.exception_deduction_with_raise2, [bool])
assert isinstance(s, annmodel.SomeInstance)
assert s.classdef is a.bookkeeper.getuniqueclassdef(snippet.Exc)
def test_exception_deduction_with_raise3(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.exception_deduction_with_raise3, [bool])
assert isinstance(s, annmodel.SomeInstance)
assert s.classdef is a.bookkeeper.getuniqueclassdef(snippet.Exc)
def test_type_is(self):
class B(object):
pass
class C(B):
pass
def f(x):
assert type(x) is C
return x
a = self.RPythonAnnotator()
s = a.build_types(f, [B])
assert s.classdef is a.bookkeeper.getuniqueclassdef(C)
def test_union_type_some_pbc(self):
py.test.skip("is there a point? f() can return self.__class__ instead")
class A(object):
name = "A"
def f(self):
return type(self)
class B(A):
name = "B"
def f(tp):
return tp
def main(n):
if n:
if n == 1:
inst = A()
else:
inst = B()
arg = inst.f()
else:
arg = B
return f(arg).name
a = self.RPythonAnnotator()
s = a.build_types(main, [int])
assert isinstance(s, annmodel.SomeString)
def test_ann_assert(self):
def assert_(x):
assert x,"XXX"
a = self.RPythonAnnotator()
s = a.build_types(assert_, [int])
assert s.const is None
def test_string_and_none(self):
def f(n):
if n:
return 'y'
else:
return 'n'
def g(n):
if n:
return 'y'
else:
return None
a = self.RPythonAnnotator()
s = a.build_types(f, [bool])
assert s.knowntype == str
assert not s.can_be_None
s = a.build_types(g, [bool])
assert s.knowntype == str
assert s.can_be_None
def test_implicit_exc(self):
def f(l):
try:
l[0]
except (KeyError, IndexError),e:
return e
return None
a = self.RPythonAnnotator()
s = a.build_types(f, [somelist(annmodel.s_Int)])
assert s.classdef is a.bookkeeper.getuniqueclassdef(IndexError) # KeyError ignored because l is a list
def test_freeze_protocol(self):
class Stuff:
def __init__(self):
self.called = False
def _freeze_(self):
self.called = True
return True
myobj = Stuff()
a = self.RPythonAnnotator()
s = a.build_types(lambda: myobj, [])
assert myobj.called
assert isinstance(s, annmodel.SomePBC)
assert s.const == myobj
def test_cleanup_protocol(self):
class Stuff:
def __init__(self):
self.called = False
def _cleanup_(self):
self.called = True
myobj = Stuff()
a = self.RPythonAnnotator()
s = a.build_types(lambda: myobj, [])
assert myobj.called
assert isinstance(s, annmodel.SomeInstance)
assert s.classdef is a.bookkeeper.getuniqueclassdef(Stuff)
def test_circular_mutable_getattr(self):
class C:
pass
c = C()
c.x = c
def f():
return c.x
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeInstance)
assert s.classdef == a.bookkeeper.getuniqueclassdef(C)
def test_circular_list_type(self):
def f(n):
lst = []
for i in range(n):
lst = [lst]
return lst
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert listitem(s) == s
def test_harmonic(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.harmonic, [int])
assert s.knowntype == float
# check that the list produced by range() is not mutated or resized
graph = graphof(a, snippet.harmonic)
all_vars = set().union(*[block.getvariables() for block in graph.iterblocks()])
print all_vars
for var in all_vars:
s_value = var.annotation
if isinstance(s_value, annmodel.SomeList):
assert not s_value.listdef.listitem.resized
assert not s_value.listdef.listitem.mutated
assert s_value.listdef.listitem.range_step
def test_bool(self):
def f(a,b):
return bool(a) or bool(b)
a = self.RPythonAnnotator()
s = a.build_types(f, [int, somelist(annmodel.s_Int)])
assert s.knowntype == bool
def test_float(self):
def f(n):
return float(n)
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.knowntype == float
def test_r_uint(self):
def f(n):
return n + constant_unsigned_five
a = self.RPythonAnnotator()
s = a.build_types(f, [r_uint])
assert s == annmodel.SomeInteger(nonneg = True, unsigned = True)
def test_large_unsigned(self):
large_constant = sys.maxint * 2 + 1 # 0xFFFFFFFF on 32-bit platforms
def f():
return large_constant
a = self.RPythonAnnotator()
with py.test.raises(ValueError):
a.build_types(f, [])
# if you want to get a r_uint, you have to be explicit about it
def test_add_different_ints(self):
def f(a, b):
return a + b
a = self.RPythonAnnotator()
with py.test.raises(UnionError):
a.build_types(f, [r_uint, int])
def test_merge_different_ints(self):
def f(a, b):
if a:
c = a
else:
c = b
return c
a = self.RPythonAnnotator()
with py.test.raises(UnionError):
a.build_types(f, [r_uint, int])
def test_merge_ruint_zero(self):
def f(a):
if a:
c = a
else:
c = 0
return c
a = self.RPythonAnnotator()
s = a.build_types(f, [r_uint])
assert s == annmodel.SomeInteger(nonneg = True, unsigned = True)
def test_merge_ruint_nonneg_signed(self):
def f(a, b):
if a:
c = a
else:
assert b >= 0
c = b
return c
a = self.RPythonAnnotator()
s = a.build_types(f, [r_uint, int])
assert s == annmodel.SomeInteger(nonneg = True, unsigned = True)
def test_prebuilt_long_that_is_not_too_long(self):
small_constant = 12L
def f():
return small_constant
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == 12
assert s.nonneg
assert not s.unsigned
#
small_constant = -23L
def f():
return small_constant
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == -23
assert not s.nonneg
assert not s.unsigned
def test_pbc_getattr(self):
class C:
def __init__(self, v1, v2):
self.v2 = v2
self.v1 = v1
def _freeze_(self):
return True
c1 = C(1,'a')
c2 = C(2,'b')
c3 = C(3,'c')
def f1(l, c):
l.append(c.v1)
def f2(l, c):
l.append(c.v2)
def g():
l1 = []
l2 = []
f1(l1, c1)
f1(l1, c2)
f2(l2, c2)
f2(l2, c3)
return l1,l2
a = self.RPythonAnnotator()
s = a.build_types(g,[])
l1, l2 = s.items
assert listitem(l1).knowntype == int
assert listitem(l2).knowntype == str
acc1 = a.bookkeeper.getdesc(c1).getattrfamily()
acc2 = a.bookkeeper.getdesc(c2).getattrfamily()
acc3 = a.bookkeeper.getdesc(c3).getattrfamily()
assert acc1 is acc2 is acc3
assert len(acc1.descs) == 3
assert dict.fromkeys(acc1.attrs) == {'v1': None, 'v2': None}
def test_single_pbc_getattr(self):
class C:
def __init__(self, v1, v2):
self.v1 = v1
self.v2 = v2
def _freeze_(self):
return True
c1 = C(11, "hello")
c2 = C(22, 623)
def f1(l, c):
l.append(c.v1)
def f2(c):
return c.v2
def f3(c):
return c.v2
def g():
l = []
f1(l, c1)
f1(l, c2)
return l, f2(c1), f3(c2)
a = self.RPythonAnnotator()
s = a.build_types(g,[])
s_l, s_c1v2, s_c2v2 = s.items
assert listitem(s_l).knowntype == int
assert s_c1v2.const == "hello"
assert s_c2v2.const == 623
acc1 = a.bookkeeper.getdesc(c1).getattrfamily()
acc2 = a.bookkeeper.getdesc(c2).getattrfamily()
assert acc1 is acc2
assert acc1.attrs.keys() == ['v1']
def test_isinstance_unsigned_1(self):
def f(x):
return isinstance(x, r_uint)
def g():
v = r_uint(1)
return f(v)
a = self.RPythonAnnotator()
s = a.build_types(g, [])
assert s.const == True
def test_isinstance_unsigned_2(self):
class Foo:
pass
def f(x):
return isinstance(x, r_uint)
def g():
v = Foo()
return f(v)
a = self.RPythonAnnotator()
s = a.build_types(g, [])
assert s.const == False
def test_isinstance_base_int(self):
def f(x):
return isinstance(x, base_int)
def g(n):
v = r_uint(n)
return f(v)
a = self.RPythonAnnotator()
s = a.build_types(g, [int])
assert s.const == True
def test_alloc_like(self):
class Base(object):
pass
class C1(Base):
pass
class C2(Base):
pass
def inst(cls):
return cls()
def alloc(cls):
i = inst(cls)
assert isinstance(i, cls)
return i
alloc._annspecialcase_ = "specialize:arg(0)"
def f():
c1 = alloc(C1)
c2 = alloc(C2)
return c1,c2
a = self.RPythonAnnotator()
s = a.build_types(f, [])
C1df = a.bookkeeper.getuniqueclassdef(C1)
C2df = a.bookkeeper.getuniqueclassdef(C2)
assert s.items[0].classdef == C1df
assert s.items[1].classdef == C2df
allocdesc = a.bookkeeper.getdesc(alloc)
s_C1 = a.bookkeeper.immutablevalue(C1)
s_C2 = a.bookkeeper.immutablevalue(C2)
graph1 = allocdesc.specialize([s_C1], None)
graph2 = allocdesc.specialize([s_C2], None)
assert a.binding(graph1.getreturnvar()).classdef == C1df
assert a.binding(graph2.getreturnvar()).classdef == C2df
assert graph1 in a.translator.graphs
assert graph2 in a.translator.graphs
def test_specialcase_args(self):
class C1(object):
pass
class C2(object):
pass
def alloc(cls, cls2):
i = cls()
assert isinstance(i, cls)
j = cls2()
assert isinstance(j, cls2)
return i
def f():
alloc(C1, C1)
alloc(C1, C2)
alloc(C2, C1)
alloc(C2, C2)
alloc._annspecialcase_ = "specialize:arg(0,1)"
a = self.RPythonAnnotator()
C1df = a.bookkeeper.getuniqueclassdef(C1)
C2df = a.bookkeeper.getuniqueclassdef(C2)
s = a.build_types(f, [])
allocdesc = a.bookkeeper.getdesc(alloc)
s_C1 = a.bookkeeper.immutablevalue(C1)
s_C2 = a.bookkeeper.immutablevalue(C2)
graph1 = allocdesc.specialize([s_C1, s_C2], None)
graph2 = allocdesc.specialize([s_C2, s_C2], None)
assert a.binding(graph1.getreturnvar()).classdef == C1df
assert a.binding(graph2.getreturnvar()).classdef == C2df
assert graph1 in a.translator.graphs
assert graph2 in a.translator.graphs
def test_specialize_arg_bound_method(self):
class GC(object):
def trace(self, callback, *args):
return callback(*args)
trace._annspecialcase_ = "specialize:arg(1)"
def callback1(self, arg1):
self.x = arg1
return "hello"
def callback2(self, arg2, arg3):
self.y = arg2
self.z = arg3
return 6
def f():
gc = GC()
s1 = gc.trace(gc.callback1, "foo")
n2 = gc.trace(gc.callback2, 7, 2)
return (s1, n2, gc.x, gc.y, gc.z)
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.items[0].const == "hello"
assert s.items[1].const == 6
assert s.items[2].const == "foo"
assert s.items[3].const == 7
assert s.items[4].const == 2
def test_specialize_and_star_args(self):
class I(object):
def execute(self, op, *args):
if op == 0:
return args[0]+args[1]
if op == 1:
return args[0] * args[1] + args[2]
execute._annspecialcase_ = "specialize:arg(1)"
def f(x, y):
i = I()
a = i.execute(0, x, y)
b = i.execute(1, y, y, 5)
return a+b
a = self.RPythonAnnotator()
s = a.build_types(f, [int, int])
executedesc = a.bookkeeper.getdesc(I.execute.im_func)
assert len(executedesc._cache) == 2
assert len(executedesc._cache[(0, 'star', 2)].startblock.inputargs) == 4
assert len(executedesc._cache[(1, 'star', 3)].startblock.inputargs) == 5
def test_specialize_arg_or_var(self):
def f(a):
return 1
f._annspecialcase_ = 'specialize:arg_or_var(0)'
def fn(a):
return f(3) + f(a)
a = self.RPythonAnnotator()
a.build_types(fn, [int])
executedesc = a.bookkeeper.getdesc(f)
assert sorted(executedesc._cache.keys()) == [None, (3,)]
# we got two different special
def test_specialize_call_location(self):
def g(a):
return a
g._annspecialcase_ = "specialize:call_location"
def f(x):
return g(x)
f._annspecialcase_ = "specialize:argtype(0)"
def h(y):
w = f(y)
return int(f(str(y))) + w
a = self.RPythonAnnotator()
assert a.build_types(h, [int]) == annmodel.SomeInteger()
def test_assert_list_doesnt_lose_info(self):
class T(object):
pass
def g(l):
assert isinstance(l, list)
return l
def f():
l = [T()]
return g(l)
a = self.RPythonAnnotator()
s = a.build_types(f, [])
s_item = listitem(s)
assert isinstance(s_item, annmodel.SomeInstance)
assert s_item.classdef is a.bookkeeper.getuniqueclassdef(T)
def test_int_str_mul(self):
def f(x,a,b):
return a*x+x*b
a = self.RPythonAnnotator()
s = a.build_types(f, [str,int,int])
assert s.knowntype == str
def test_list_tuple(self):
def g0(x):
return list(x)
def g1(x):
return list(x)
def f(n):
l1 = g0(())
l2 = g1((1,))
if n:
t = (1,)
else:
t = (2,)
l3 = g1(t)
return l1, l2, l3
a = self.RPythonAnnotator()
s = a.build_types(f, [bool])
assert listitem(s.items[0]) == annmodel.SomeImpossibleValue()
assert listitem(s.items[1]).knowntype == int
assert listitem(s.items[2]).knowntype == int
def test_empty_list(self):
def f():
l = []
return bool(l)
def g():
l = []
x = bool(l)
l.append(1)
return x, bool(l)
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == False
a = self.RPythonAnnotator()
s = a.build_types(g, [])
assert s.items[0].knowntype == bool and not s.items[0].is_constant()
assert s.items[1].knowntype == bool and not s.items[1].is_constant()
def test_empty_dict(self):
def f():
d = {}
return bool(d)
def g():
d = {}
x = bool(d)
d['a'] = 1
return x, bool(d)
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == False
a = self.RPythonAnnotator()
s = a.build_types(g, [])
assert s.items[0].knowntype == bool and not s.items[0].is_constant()
assert s.items[1].knowntype == bool and not s.items[1].is_constant()
def test_call_two_funcs_but_one_can_only_raise(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.call_two_funcs_but_one_can_only_raise,
[int])
assert s == a.bookkeeper.immutablevalue(None)
def test_reraiseKeyError(self):
def f(dic):
try:
dic[5]
except KeyError:
raise
a = self.RPythonAnnotator()
a.build_types(f, [somedict(annmodel.s_Int, annmodel.s_Int)])
fg = graphof(a, f)
et, ev = fg.exceptblock.inputargs
t = annmodel.SomeType()
t.const = KeyError
t.is_type_of = [ev]
assert a.binding(et) == t
assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(KeyError)
def test_reraiseAnything(self):
def f(dic):
try:
dic[5]
except:
raise
a = self.RPythonAnnotator()
a.build_types(f, [somedict(annmodel.s_Int, annmodel.s_Int)])
fg = graphof(a, f)
et, ev = fg.exceptblock.inputargs
t = annmodel.SomeType()
t.is_type_of = [ev]
t.const = KeyError # IndexError ignored because 'dic' is a dict
assert a.binding(et) == t
assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(KeyError)
def test_exception_mixing(self):
def h():
pass
def g():
pass
class X(Exception):
def __init__(self, x=0):
self.x = x
def f(a, l):
if a==1:
raise X
elif a==2:
raise X(1)
elif a==3:
raise X(4)
else:
try:
l[0]
x,y = l
g()
finally:
h()
a = self.RPythonAnnotator()
a.build_types(f, [int, somelist(annmodel.s_Int)])
fg = graphof(a, f)
et, ev = fg.exceptblock.inputargs
t = annmodel.SomeType()
t.is_type_of = [ev]
assert a.binding(et) == t
assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(Exception)
def test_try_except_raise_finally1(self):
def h(): pass
def g(): pass
class X(Exception): pass
def f():
try:
try:
g()
except X:
h()
raise
finally:
h()
a = self.RPythonAnnotator()
a.build_types(f, [])
fg = graphof(a, f)
et, ev = fg.exceptblock.inputargs
t = annmodel.SomeType()
t.is_type_of = [ev]
assert a.binding(et) == t
assert isinstance(a.binding(ev), annmodel.SomeInstance) and a.binding(ev).classdef == a.bookkeeper.getuniqueclassdef(Exception)
def test_inplace_div(self):
def f(n):
n /= 2
return n / 2
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.knowntype == int
def test_prime(self):
a = self.RPythonAnnotator()
s = a.build_types(snippet.prime, [int])
assert s.knowntype == bool
def test_and_bool_coalesce(self):
def f(a,b,c,d,e):
x = a and b
if x:
return d,c
return e,c
a = self.RPythonAnnotator()
s = a.build_types(f, [int, str, a.bookkeeper.immutablevalue(1.0), a.bookkeeper.immutablevalue('d'), a.bookkeeper.immutablevalue('e')])
assert s == annmodel.SomeTuple([annmodel.SomeChar(), a.bookkeeper.immutablevalue(1.0)])
def test_bool_coalesce2(self):
def f(a,b,a1,b1,c,d,e):
x = (a or b) and (a1 or b1)
if x:
return d,c
return e,c
a = self.RPythonAnnotator()
s = a.build_types(f, [int, str, float, somelist(annmodel.s_Int),
a.bookkeeper.immutablevalue(1.0),
a.bookkeeper.immutablevalue('d'),
a.bookkeeper.immutablevalue('e')])
assert s == annmodel.SomeTuple([annmodel.SomeChar(),
a.bookkeeper.immutablevalue(1.0)])
def test_bool_coalesce_sanity(self):
def f(a):
while a:
pass
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s == a.bookkeeper.immutablevalue(None)
def test_non_None_path(self):
class C:
pass
def g(c):
if c is None:
return C()
return c
def f(x):
if x:
c = None
else:
c = C()
return g(c)
a = self.RPythonAnnotator()
s = a.build_types(f, [bool])
assert s.can_be_none() == False
def test_can_be_None_path(self):
class C:
pass
def f(x):
if x:
c = None
else:
c = C()
return isinstance(c, C)
a = self.RPythonAnnotator()
s = a.build_types(f, [bool])
assert not s.is_constant()
def test_nonneg_cleverness(self):
def f(a, b, c, d, e, f, g, h):
if a < 0: a = 0
if b <= 0: b = 0
if c >= 0:
pass
else:
c = 0
if d < a: d = a
if e <= b: e = 1
if c > f: f = 2
if d >= g: g = 3
if h != a: h = 0
return a, b, c, d, e, f, g, h
a = self.RPythonAnnotator()
s = a.build_types(f, [int]*8)
assert s == annmodel.SomeTuple([annmodel.SomeInteger(nonneg=True)] * 8)
def test_general_nonneg_cleverness(self):
def f(a, b, c, d, e, f, g, h):
if a < 0: a = 0
if b <= 0: b = 0
if c >= 0:
pass
else:
c = 0
if d < a: d = a
if e <= b: e = 1
if c > f: f = 2
if d >= g: g = 3
if h != a: h = 0
return a, b, c, d, e, f, g, h
a = self.RPythonAnnotator()
s = a.build_types(f, [r_longlong]*8)
assert s == annmodel.SomeTuple([annmodel.SomeInteger(nonneg=True, knowntype=r_longlong)] * 8)
def test_more_nonneg_cleverness(self):
def f(start, stop):
assert 0 <= start <= stop
return start, stop
a = self.RPythonAnnotator()
s = a.build_types(f, [int, int])
assert s == annmodel.SomeTuple([annmodel.SomeInteger(nonneg=True)] * 2)
def test_more_general_nonneg_cleverness(self):
def f(start, stop):
assert 0 <= start <= stop
return start, stop
a = self.RPythonAnnotator()
s = a.build_types(f, [r_longlong, r_longlong])
assert s == annmodel.SomeTuple([annmodel.SomeInteger(nonneg=True, knowntype=r_longlong)] * 2)
def test_nonneg_cleverness_is_gentle_with_unsigned(self):
def witness1(x):
pass
def witness2(x):
pass
def f(x):
if 0 < x:
witness1(x)
if x > 0:
witness2(x)
a = self.RPythonAnnotator()
s = a.build_types(f, [annmodel.SomeInteger(unsigned=True)])
wg1 = graphof(a, witness1)
wg2 = graphof(a, witness2)
assert a.binding(wg1.getargs()[0]).unsigned is True
assert a.binding(wg2.getargs()[0]).unsigned is True
def test_general_nonneg_cleverness_is_gentle_with_unsigned(self):
def witness1(x):
pass
def witness2(x):
pass
def f(x):
if 0 < x:
witness1(x)
if x > 0:
witness2(x)
a = self.RPythonAnnotator()
s = a.build_types(f, [annmodel.SomeInteger(knowntype=r_ulonglong)])
wg1 = graphof(a, witness1)
wg2 = graphof(a, witness2)
assert a.binding(wg1.getargs()[0]).knowntype is r_ulonglong
assert a.binding(wg2.getargs()[0]).knowntype is r_ulonglong
def test_nonneg_cleverness_in_max(self):
def f(x):
return max(x, 0) + max(0, x)
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.nonneg
def test_attr_moving_into_parent(self):
class A: pass
class B(A): pass
a1 = A()
b1 = B()
b1.stuff = a1
a1.stuff = None
def f():
return b1.stuff
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeInstance)
assert not s.can_be_None
assert s.classdef is a.bookkeeper.getuniqueclassdef(A)
def test_class_attribute(self):
class A:
stuff = 42
class B(A):
pass
def f():
b = B()
return b.stuff
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s == a.bookkeeper.immutablevalue(42)
def test_attr_recursive_getvalue(self):
class A: pass
a2 = A()
a2.stuff = None
a1 = A()
a1.stuff = a2
def f():
return a1.stuff
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeInstance)
assert s.can_be_None
assert s.classdef is a.bookkeeper.getuniqueclassdef(A)
def test_long_list_recursive_getvalue(self):
class A: pass
lst = []
for i in range(500):
a1 = A()
a1.stuff = lst
lst.append(a1)
def f():
A().stuff = None
return (A().stuff, lst)[1]
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeList)
s_item = s.listdef.listitem.s_value
assert isinstance(s_item, annmodel.SomeInstance)
def test_immutable_dict(self):
d = {4: "hello",
5: "world"}
def f(n):
return d[n]
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeString)
def test_immutable_recursive_list(self):
l = []
l.append(l)
def f():
return l
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeList)
s_item = s.listdef.listitem.s_value
assert isinstance(s_item, annmodel.SomeList)
assert s_item.listdef.same_as(s.listdef)
def test_defaults_with_list_or_dict(self):
def fn1(a=[]):
return a
def fn2(a={}):
return a
def f():
fn1()
fn2()
return fn1([6, 7]), fn2({2: 3, 4: 5})
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeTuple)
s1, s2 = s.items
assert not s1.is_constant()
assert not s2.is_constant()
assert isinstance(s1.listdef.listitem. s_value, annmodel.SomeInteger)
assert isinstance(s2.dictdef.dictkey. s_value, annmodel.SomeInteger)
assert isinstance(s2.dictdef.dictvalue.s_value, annmodel.SomeInteger)
def test_pbc_union(self):
class A:
def meth(self):
return 12
class B(A):
pass
class C(B):
pass
def f(i):
if i:
f(0)
x = B()
else:
x = C()
return x.meth()
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s == a.bookkeeper.immutablevalue(12)
def test_int(self):
def f(x, s):
return int(x) + int(s) + int(s, 16)
a = self.RPythonAnnotator()
s = a.build_types(f, [int, str])
assert s.knowntype == int
def test_int_nonneg(self):
def f(x, y):
assert x >= 0
return int(x) + int(y == 3)
a = self.RPythonAnnotator()
s = a.build_types(f, [int, int])
assert isinstance(s, annmodel.SomeInteger)
assert s.nonneg
def test_listitem_merge_asymmetry_bug(self):
class K:
pass
def mutr(k, x, i):
k.l2 = [x] + k.l2 # this involves a side-effectful union and unification, with this order
# of arguments some reflowing was missed
k.l2[i] = x
def witness(i):
pass
def trouble(k):
l = k.l1 + k.l2
for i in range(len(l)):
witness(l[i])
def f(flag, k, x, i):
if flag:
k = K()
k.l1 = []
k.l2 = []
trouble(k)
mutr(k, x, i)
a = self.RPythonAnnotator()
a.build_types(f, [bool, K, int, int])
g = graphof(a, witness)
assert a.binding(g.getargs()[0]).knowntype == int
# check RPython static semantics of isinstance(x,bool|int) as needed for wrap
def test_isinstance_int_bool(self):
def f(x):
if isinstance(x, int):
if isinstance(x, bool):
return "bool"
return "int"
return "dontknow"
a = self.RPythonAnnotator()
s = a.build_types(f, [bool])
assert s.const == "bool"
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.const == "int"
a = self.RPythonAnnotator()
s = a.build_types(f, [float])
assert s.const == "dontknow"
def test_hidden_method(self):
class Base:
def method(self):
return ["should be hidden"]
def indirect(self):
return self.method()
class A(Base):
def method(self):
return "visible"
class B(A): # note: it's a chain of subclasses
def method(self):
return None
def f(flag):
if flag:
obj = A()
else:
obj = B()
return obj.indirect()
a = self.RPythonAnnotator()
s = a.build_types(f, [bool])
assert annmodel.SomeString(can_be_None=True).contains(s)
def test_dont_see_AttributeError_clause(self):
class Stuff:
def _freeze_(self):
return True
def createcompiler(self):
try:
return self.default_compiler
except AttributeError:
compiler = "yadda"
self.default_compiler = compiler
return compiler
stuff = Stuff()
stuff.default_compiler = 123
def f():
return stuff.createcompiler()
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s == a.bookkeeper.immutablevalue(123)
def test_class_attribute_is_an_instance_of_itself(self):
class Base:
hello = None
class A(Base):
pass
A.hello = globalA = A()
def f():
return (Base().hello, globalA)
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeTuple)
assert isinstance(s.items[0], annmodel.SomeInstance)
assert s.items[0].classdef is a.bookkeeper.getuniqueclassdef(A)
assert s.items[0].can_be_None
assert s.items[1] == a.bookkeeper.immutablevalue(A.hello)
def test_dict_and_none(self):
def f(i):
if i:
return {}
else:
return None
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.knowntype == annmodel.SomeOrderedDict.knowntype
def test_const_list_and_none(self):
def g(l=None):
return l is None
L = [1,2]
def f():
g()
return g(L)
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.knowntype == bool
assert not s.is_constant()
def test_const_dict_and_none(self):
def g(d=None):
return d is None
D = {1:2}
def f():
g(D)
return g()
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.knowntype == bool
assert not s.is_constant()
def test_issubtype_and_const(self):
class A(object):
pass
class B(object):
pass
class C(A):
pass
b = B()
c = C()
def g(f):
if f == 1:
x = b
elif f == 2:
x = c
else:
x = C()
t = type(x)
return issubclass(t, A)
a = self.RPythonAnnotator()
x = annmodel.SomeInteger()
x.const = 1
s = a.build_types(g, [x])
assert s.const == False
a = self.RPythonAnnotator()
x = annmodel.SomeInteger()
x.const = 2
s = a.build_types(g, [x])
assert s.const == True
def test_reading_also_generalizes(self):
def f1(i):
d = {'c': i}
return d['not-a-char'], d
a = self.RPythonAnnotator()
s = a.build_types(f1, [int])
assert dictkey(s.items[1]).__class__ == annmodel.SomeString
def f2(i):
d = {'c': i}
return d.get('not-a-char', i+1), d
a = self.RPythonAnnotator()
s = a.build_types(f2, [int])
assert dictkey(s.items[1]).__class__ == annmodel.SomeString
def f3(i):
d = {'c': i}
return 'not-a-char' in d, d
a = self.RPythonAnnotator()
s = a.build_types(f3, [int])
assert dictkey(s.items[1]).__class__ == annmodel.SomeString
def f4():
lst = ['a', 'b', 'c']
return 'not-a-char' in lst, lst
a = self.RPythonAnnotator()
s = a.build_types(f4, [])
assert listitem(s.items[1]).__class__ == annmodel.SomeString
def f5():
lst = ['a', 'b', 'c']
return lst.index('not-a-char'), lst
a = self.RPythonAnnotator()
s = a.build_types(f5, [])
assert listitem(s.items[1]).__class__ == annmodel.SomeString
def test_true_str_is_not_none(self):
def f(s):
if s:
return s
else:
return ''
def g(i):
if i:
return f(None)
else:
return f('')
a = self.RPythonAnnotator()
s = a.build_types(g, [int])
assert s.knowntype == str
assert not s.can_be_None
def test_true_func_is_not_none(self):
def a1():
pass
def a2():
pass
def f(a):
if a:
return a
else:
return a2
def g(i):
if i:
return f(None)
else:
return f(a1)
a = self.RPythonAnnotator()
s = a.build_types(g, [int])
assert not s.can_be_None
def test_string_noNUL_canbeNone(self):
def f(a):
if a:
return "abc"
else:
return None
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.can_be_None
assert s.no_nul
def test_unicode_noNUL_canbeNone(self):
def f(a):
if a:
return u"abc"
else:
return None
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.can_be_None
assert s.no_nul
def test_str_or_None(self):
def f(a):
if a:
return "abc"
else:
return None
def g(a):
x = f(a)
if x is None:
return "abcd"
return x
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.can_be_None
assert s.no_nul
def test_unicode_or_None(self):
def f(a):
if a:
return u"abc"
else:
return None
def g(a):
x = f(a)
if x is None:
return u"abcd"
return x
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.can_be_None
assert s.no_nul
def test_emulated_pbc_call_simple(self):
def f(a,b):
return a + b
from rpython.annotator import annrpython
a = annrpython.RPythonAnnotator()
from rpython.annotator import model as annmodel
s_f = a.bookkeeper.immutablevalue(f)
a.bookkeeper.emulate_pbc_call('f', s_f, [annmodel.SomeInteger(), annmodel.SomeInteger()])
a.complete()
a.simplify()
assert a.binding(graphof(a, f).getreturnvar()).knowntype == int
fdesc = a.bookkeeper.getdesc(f)
someint = annmodel.SomeInteger()
assert (fdesc.get_s_signatures((2, (), False))
== [([someint,someint],someint)])
def test_emulated_pbc_call_callback(self):
def f(a,b):
return a + b
from rpython.annotator import annrpython
a = annrpython.RPythonAnnotator()
from rpython.annotator import model as annmodel
memo = []
def callb(ann, graph):
memo.append(annmodel.SomeInteger() == ann.binding(graph.getreturnvar()))
s_f = a.bookkeeper.immutablevalue(f)
s = a.bookkeeper.emulate_pbc_call('f', s_f, [annmodel.SomeInteger(), annmodel.SomeInteger()],
callback=callb)
assert s == annmodel.SomeImpossibleValue()
a.complete()
assert a.binding(graphof(a, f).getreturnvar()).knowntype == int
assert len(memo) >= 1
for t in memo:
assert t
def test_iterator_union(self):
def it(d):
return d.iteritems()
d0 = {1:2}
def f():
it(d0)
return it({1:2})
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeIterator)
assert s.variant == ('items',)
def test_iteritems_str0(self):
def it(d):
return d.iteritems()
def f():
d0 = {'1a': '2a', '3': '4'}
for item in it(d0):
return "%s=%s" % item
raise ValueError
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeString)
assert s.no_nul
def test_iteritems_unicode0(self):
def it(d):
return d.iteritems()
def f():
d0 = {u'1a': u'2a', u'3': u'4'}
for item in it(d0):
return u"%s=%s" % item
raise ValueError
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeUnicodeString)
assert s.no_nul
def test_no_nul_mod(self):
def f(x):
s = "%d" % x
return s
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeString)
assert s.no_nul
def test_no_nul_mod_unicode(self):
def f(x):
s = u"%d" % x
return s
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeUnicodeString)
assert s.no_nul
def test_mul_str0(self):
def f(s):
return s*10
a = self.RPythonAnnotator()
s = a.build_types(f, [annmodel.SomeString(no_nul=True)])
assert isinstance(s, annmodel.SomeString)
assert s.no_nul
a = self.RPythonAnnotator()
s = a.build_types(f, [annmodel.SomeUnicodeString(no_nul=True)])
assert isinstance(s, annmodel.SomeUnicodeString)
assert s.no_nul
def test_reverse_mul_str0(self):
def f(s):
return 10*s
a = self.RPythonAnnotator()
s = a.build_types(f, [annmodel.SomeString(no_nul=True)])
assert isinstance(s, annmodel.SomeString)
assert s.no_nul
a = self.RPythonAnnotator()
s = a.build_types(f, [annmodel.SomeUnicodeString(no_nul=True)])
assert isinstance(s, annmodel.SomeUnicodeString)
assert s.no_nul
def test_getitem_str0(self):
def f(s, n):
if n == 1:
return s[0]
elif n == 2:
return s[1]
elif n == 3:
return s[1:]
return s
a = self.RPythonAnnotator()
a.translator.config.translation.check_str_without_nul = True
s = a.build_types(f, [annmodel.SomeString(no_nul=True),
annmodel.SomeInteger()])
assert isinstance(s, annmodel.SomeString)
assert s.no_nul
a = self.RPythonAnnotator()
a.translator.config.translation.check_str_without_nul = True
s = a.build_types(f, [annmodel.SomeUnicodeString(no_nul=True),
annmodel.SomeInteger()])
assert isinstance(s, annmodel.SomeUnicodeString)
assert s.no_nul
def test_non_none_and_none_with_isinstance(self):
class A(object):
pass
class B(A):
pass
def g(x):
if isinstance(x, A):
return x
return None
def f():
g(B())
return g(None)
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeInstance)
assert s.classdef == a.bookkeeper.getuniqueclassdef(B)
def test_type_is_no_improvement(self):
class B(object):
pass
class C(B):
pass
class D(B):
pass
def f(x):
if type(x) is C:
return x
raise Exception
a = self.RPythonAnnotator()
s = a.build_types(f, [D])
assert s == annmodel.SomeImpossibleValue()
def test_is_constant_instance(self):
class A(object):
pass
prebuilt_instance = A()
def f(x):
if x is prebuilt_instance:
return x
raise Exception
a = self.RPythonAnnotator()
s = a.build_types(f, [A])
assert s.is_constant()
assert s.const is prebuilt_instance
def test_call_memoized_function(self):
fr1 = Freezing()
fr2 = Freezing()
def getorbuild(key):
a = 1
if key is fr1:
result = eval("a+2")
else:
result = eval("a+6")
return result
getorbuild._annspecialcase_ = "specialize:memo"
def f1(i):
if i > 0:
fr = fr1
else:
fr = fr2
return getorbuild(fr)
a = self.RPythonAnnotator()
s = a.build_types(f1, [int])
assert s.knowntype == int
def test_call_memoized_function_with_bools(self):
fr1 = Freezing()
fr2 = Freezing()
def getorbuild(key, flag1, flag2):
a = 1
if key is fr1:
result = eval("a+2")
else:
result = eval("a+6")
if flag1:
result += 100
if flag2:
result += 1000
return result
getorbuild._annspecialcase_ = "specialize:memo"
def f1(i):
if i > 0:
fr = fr1
else:
fr = fr2
return getorbuild(fr, i % 2 == 0, i % 3 == 0)
a = self.RPythonAnnotator()
s = a.build_types(f1, [int])
assert s.knowntype == int
def test_stored_bound_method(self):
# issue 129
class H:
def h(self):
return 42
class C:
def __init__(self, func):
self.f = func
def do(self):
return self.f()
def g():
h = H()
c = C(h.h)
return c.do()
a = self.RPythonAnnotator()
s = a.build_types(g, [])
assert s.is_constant()
assert s.const == 42
def test_stored_bound_method_2(self):
# issue 129
class H:
pass
class H1(H):
def h(self):
return 42
class H2(H):
def h(self):
return 17
class C:
def __init__(self, func):
self.f = func
def do(self):
return self.f()
def g(flag):
if flag:
h = H1()
else:
h = H2()
c = C(h.h)
return c.do()
a = self.RPythonAnnotator()
s = a.build_types(g, [int])
assert s.knowntype == int
assert not s.is_constant()
def test_getorbuild_as_attr(self):
from rpython.rlib.cache import Cache
class SpaceCache(Cache):
def _build(self, callable):
return callable()
class CacheX(Cache):
def _build(self, key):
return key.x
class CacheY(Cache):
def _build(self, key):
return key.y
class X:
def __init__(self, x):
self.x = x
def _freeze_(self):
return True
class Y:
def __init__(self, y):
self.y = y
def _freeze_(self):
return True
X1 = X(1)
Y2 = Y("hello")
fromcache = SpaceCache().getorbuild
def f():
return (fromcache(CacheX).getorbuild(X1),
fromcache(CacheY).getorbuild(Y2))
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.items[0].knowntype == int
assert s.items[1].knowntype == str
def test_constant_bound_method(self):
class C:
def __init__(self, value):
self.value = value
def meth(self):
return self.value
meth = C(1).meth
def f():
return meth()
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.knowntype == int
def test_annotate__del__(self):
class A(object):
def __init__(self):
self.a = 2
def __del__(self):
self.a = 1
def f():
return A().a
a = self.RPythonAnnotator()
t = a.translator
s = a.build_types(f, [])
assert s.knowntype == int
graph = tgraphof(t, A.__del__.im_func)
assert graph.startblock in a.annotated
def test_annotate__del__baseclass(self):
class A(object):
def __init__(self):
self.a = 2
def __del__(self):
self.a = 1
class B(A):
def __init__(self):
self.a = 3
def f():
return B().a
a = self.RPythonAnnotator()
t = a.translator
s = a.build_types(f, [])
assert s.knowntype == int
graph = tgraphof(t, A.__del__.im_func)
assert graph.startblock in a.annotated
def test_annotate_type(self):
class A:
pass
x = [A(), A()]
def witness(t):
return type(t)
def get(i):
return x[i]
def f(i):
witness(None)
return witness(get(i))
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeType)
def test_annotate_iter_empty_container(self):
def f():
n = 0
d = {}
for x in []: n += x
for y in d: n += y
for z in d.iterkeys(): n += z
for s in d.itervalues(): n += s
for t, u in d.items(): n += t * u
for t, u in d.iteritems(): n += t * u
return n
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.is_constant()
assert s.const == 0
def test_mixin(self):
class Mixin(object):
_mixin_ = True
def m(self, v):
return v
class Base(object):
pass
class A(Base, Mixin):
pass
class B(Base, Mixin):
pass
class C(B):
pass
def f():
a = A()
v0 = a.m(2)
b = B()
v1 = b.m('x')
c = C()
v2 = c.m('y')
return v0, v1, v2
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s.items[0], annmodel.SomeInteger)
assert isinstance(s.items[1], annmodel.SomeChar)
assert isinstance(s.items[2], annmodel.SomeChar)
def test_mixin_staticmethod(self):
class Mixin(object):
_mixin_ = True
@staticmethod
def m(v):
return v
class Base(object):
pass
class A(Base, Mixin):
pass
class B(Base, Mixin):
pass
class C(B):
pass
def f():
a = A()
v0 = a.m(2)
b = B()
v1 = b.m('x')
c = C()
v2 = c.m('y')
return v0, v1, v2
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s.items[0], annmodel.SomeInteger)
assert isinstance(s.items[1], annmodel.SomeChar)
assert isinstance(s.items[2], annmodel.SomeChar)
def test_mixin_first(self):
class Mixin(object):
_mixin_ = True
def foo(self): return 4
class Base(object):
def foo(self): return 5
class Concrete(Mixin, Base):
pass
def f():
return Concrete().foo()
assert f() == 4
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == 4
def test_mixin_last(self):
class Mixin(object):
_mixin_ = True
def foo(self): return 4
class Base(object):
def foo(self): return 5
class Concrete(Base, Mixin):
pass
def f():
return Concrete().foo()
assert f() == 5
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == 5
def test_mixin_concrete(self):
class Mixin(object):
_mixin_ = True
def foo(self): return 4
class Concrete(Mixin):
def foo(self): return 5
def f():
return Concrete().foo()
assert f() == 5
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == 5
def test_multiple_mixins_mro(self):
# an obscure situation, but it occurred in module/micronumpy/types.py
class A(object):
_mixin_ = True
def foo(self): return 1
class B(A):
_mixin_ = True
def foo(self): return 2
class C(A):
_mixin_ = True
class D(B, C):
_mixin_ = True
class Concrete(D):
pass
def f():
return Concrete().foo()
assert f() == 2
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == 2
def test_multiple_mixins_mro_2(self):
class A(object):
_mixin_ = True
def foo(self): return 1
class B(A):
_mixin_ = True
def foo(self): return 2
class C(A):
_mixin_ = True
class Concrete(C, B):
pass
def f():
return Concrete().foo()
assert f() == 2
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.const == 2
def test_cannot_use_directly_mixin(self):
class A(object):
_mixin_ = True
#
def f():
return A()
a = self.RPythonAnnotator()
py.test.raises(AnnotatorError, a.build_types, f, [])
#
class B(object):
pass
x = B()
def g():
return isinstance(x, A)
py.test.raises(AnnotatorError, a.build_types, g, [])
def test_import_from_mixin(self):
class M(object):
def f(self):
return self.a
class I(object):
objectmodel.import_from_mixin(M)
def __init__(self, i):
self.a = i
class S(object):
objectmodel.import_from_mixin(M)
def __init__(self, s):
self.a = s
def f(n):
return (I(n).f(), S("a" * n).f())
assert f(3) == (3, "aaa")
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s.items[0], annmodel.SomeInteger)
assert isinstance(s.items[1], annmodel.SomeString)
def test___class___attribute(self):
class Base(object): pass
class A(Base): pass
class B(Base): pass
class C(A): pass
def seelater():
C()
def f(n):
if n == 1:
x = A()
else:
x = B()
y = B()
result = x.__class__, y.__class__
seelater()
return result
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s.items[0], annmodel.SomePBC)
assert len(s.items[0].descriptions) == 4
assert isinstance(s.items[1], annmodel.SomePBC)
assert len(s.items[1].descriptions) == 1
def test_slots(self):
# check that the annotator ignores slots instead of being
# confused by them showing up as 'member' objects in the class
class A(object):
__slots__ = ('a', 'b')
def f(x):
a = A()
a.b = x
return a.b
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.knowntype == int
def test_slots_reads(self):
class A(object):
__slots__ = ()
class B(A):
def __init__(self, x):
self.x = x
def f(x):
if x:
a = A()
else:
a = B(x)
return a.x # should explode here
a = self.RPythonAnnotator()
with py.test.raises(NoSuchAttrError) as excinfo:
a.build_types(f, [int])
# this should explode on reading the attribute 'a.x', but it can
# sometimes explode on 'self.x = x', which does not make much sense.
# But it looks hard to fix in general: we don't know yet during 'a.x'
# if the attribute x will be read-only or read-write.
def test_unboxed_value(self):
class A(object):
__slots__ = ()
class C(A, objectmodel.UnboxedValue):
__slots__ = unboxedattrname = 'smallint'
def f(n):
return C(n).smallint
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.knowntype == int
def test_annotate_bool(self):
def f(x):
return ~x
a = self.RPythonAnnotator()
s = a.build_types(f, [bool])
assert s.knowntype == int
def f(x):
return -x
a = self.RPythonAnnotator()
s = a.build_types(f, [bool])
assert s.knowntype == int
def f(x):
return +x
a = self.RPythonAnnotator()
s = a.build_types(f, [bool])
assert s.knowntype == int
def f(x):
return abs(x)
a = self.RPythonAnnotator()
s = a.build_types(f, [bool])
assert s.knowntype == int
def f(x):
return int(x)
a = self.RPythonAnnotator()
s = a.build_types(f, [bool])
assert s.knowntype == int
def f(x, y):
return x + y
a = self.RPythonAnnotator()
s = a.build_types(f, [bool, int])
assert s.knowntype == int
a = self.RPythonAnnotator()
s = a.build_types(f, [int, bool])
assert s.knowntype == int
def test_annotate_rarith(self):
inttypes = [int, r_uint, r_longlong, r_ulonglong]
for inttype in inttypes:
c = inttype()
def f():
return c
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeInteger)
assert s.knowntype == inttype
assert s.unsigned == (inttype(-1) > 0)
for inttype in inttypes:
def f():
return inttype(0)
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeInteger)
assert s.knowntype == inttype
assert s.unsigned == (inttype(-1) > 0)
for inttype in inttypes:
def f(x):
return x
a = self.RPythonAnnotator()
s = a.build_types(f, [inttype])
assert isinstance(s, annmodel.SomeInteger)
assert s.knowntype == inttype
assert s.unsigned == (inttype(-1) > 0)
def test_annotate_rshift(self):
def f(x):
return x >> 2
a = self.RPythonAnnotator()
s = a.build_types(f, [annmodel.SomeInteger(nonneg=True)])
assert isinstance(s, annmodel.SomeInteger)
assert s.nonneg
def test_prebuilt_mutables(self):
class A:
pass
class B:
pass
a1 = A()
a2 = A()
a1.d = {} # this tests confusion between the two '{}', which
a2.d = {} # compare equal
a1.l = []
a2.l = []
b = B()
b.d1 = a1.d
b.d2 = a2.d
b.l1 = a1.l
b.l2 = a2.l
def dmutate(d):
d[123] = 321
def lmutate(l):
l.append(42)
def readout(d, l):
return len(d) + len(l)
def f():
dmutate(b.d1)
dmutate(b.d2)
dmutate(a1.d)
dmutate(a2.d)
lmutate(b.l1)
lmutate(b.l2)
lmutate(a1.l)
lmutate(a2.l)
return readout(a1.d, a1.l) + readout(a2.d, a2.l)
a = self.RPythonAnnotator()
a.build_types(f, [])
v1, v2 = graphof(a, readout).getargs()
assert not a.binding(v1).is_constant()
assert not a.binding(v2).is_constant()
def test_prebuilt_mutables_dont_use_eq(self):
# test that __eq__ is not called during annotation, at least
# when we know that the classes differ anyway
class Base(object):
def __eq__(self, other):
if self is other:
return True
raise ValueError
def __hash__(self):
return 42
class A(Base):
pass
class B(Base):
pass
a1 = A()
a2 = B()
a1.x = 5
a2.x = 6
def f():
return a1.x + a2.x
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert s.knowntype == int
def test_chr_out_of_bounds(self):
def g(n, max):
if n < max:
return chr(n)
else:
return '?'
def fun(max):
v = g(1000, max)
return g(ord(v), max)
a = self.RPythonAnnotator()
s = a.build_types(fun, [int])
assert isinstance(s, annmodel.SomeChar)
def test_range_nonneg(self):
def fun(n, k):
for i in range(n):
if k == 17:
return i
return 0
a = self.RPythonAnnotator()
s = a.build_types(fun, [int, int])
assert isinstance(s, annmodel.SomeInteger)
assert s.nonneg
def test_range_nonneg_variablestep(self):
def get_step(n):
if n == 1:
return 2
else:
return 3
def fun(n, k):
step = get_step(n)
for i in range(0, n, step):
if k == 17:
return i
return 0
a = self.RPythonAnnotator()
s = a.build_types(fun, [int, int])
assert isinstance(s, annmodel.SomeInteger)
assert s.nonneg
def test_reverse_range_nonneg(self):
def fun(n, k):
for i in range(n-1, -1, -1):
if k == 17:
return i
return 0
a = self.RPythonAnnotator()
s = a.build_types(fun, [int, int])
assert isinstance(s, annmodel.SomeInteger)
assert s.nonneg
def test_sig(self):
def fun(x, y):
return x+y
s_nonneg = annmodel.SomeInteger(nonneg=True)
fun._annenforceargs_ = Sig(int, s_nonneg)
a = self.RPythonAnnotator()
s = a.build_types(fun, [s_nonneg, s_nonneg])
assert isinstance(s, annmodel.SomeInteger)
assert not s.nonneg
with py.test.raises(SignatureError):
a.build_types(fun, [int, int])
def test_sig_simpler(self):
def fun(x, y):
return x+y
s_nonneg = annmodel.SomeInteger(nonneg=True)
fun._annenforceargs_ = (int, s_nonneg)
a = self.RPythonAnnotator()
s = a.build_types(fun, [s_nonneg, s_nonneg])
assert isinstance(s, annmodel.SomeInteger)
assert not s.nonneg
with py.test.raises(SignatureError):
a.build_types(fun, [int, int])
def test_sig_lambda(self):
def fun(x, y):
return y
s_nonneg = annmodel.SomeInteger(nonneg=True)
fun._annenforceargs_ = Sig(lambda s1,s2: s1, lambda s1,s2: s1)
# means: the 2nd argument's annotation becomes the 1st argument's
# input annotation
a = self.RPythonAnnotator()
s = a.build_types(fun, [int, s_nonneg])
assert isinstance(s, annmodel.SomeInteger)
assert not s.nonneg
with py.test.raises(SignatureError):
a.build_types(fun, [s_nonneg, int])
def test_sig_bug(self):
def g(x, y=5):
return y == 5
g._annenforceargs_ = (int, int)
def fun(x):
return g(x)
a = self.RPythonAnnotator()
s = a.build_types(fun, [int])
assert s.knowntype is bool
assert s.is_constant()
def test_sig_list(self):
def g(buf):
buf.append(5)
g._annenforceargs_ = ([int],)
def fun():
lst = []
g(lst)
return lst[0]
a = self.RPythonAnnotator()
s = a.build_types(fun, [])
assert s.knowntype is int
assert not s.is_constant()
def test_slots_check(self):
class Base(object):
__slots__ = 'x'
class A(Base):
__slots__ = 'y'
def m(self):
return 65
class C(Base):
__slots__ = 'z'
def m(self):
return 67
for attrname, works in [('x', True),
('y', False),
('z', False),
('t', False)]:
def fun(n):
if n: o = A()
else: o = C()
setattr(o, attrname, 12)
return o.m()
a = self.RPythonAnnotator()
if works:
a.build_types(fun, [int])
else:
with py.test.raises(NoSuchAttrError):
a.build_types(fun, [int])
def test_slots_enforce_attrs(self):
class Superbase(object):
__slots__ = 'x'
class Base(Superbase):
pass
class A(Base):
pass
class B(Base):
pass
def fun(s):
if s is None: # known not to be None in this test
o = B()
o.x = 12
elif len(s) > 5:
o = A()
else:
o = Base()
return o.x
a = self.RPythonAnnotator()
s = a.build_types(fun, [str])
assert s == annmodel.s_ImpossibleValue # but not blocked blocks
def test_enforced_attrs_check(self):
class Base(object):
_attrs_ = 'x'
class A(Base):
_attrs_ = 'y'
def m(self):
return 65
class C(Base):
_attrs_ = 'z'
def m(self):
return 67
for attrname, works in [('x', True),
('y', False),
('z', False),
('t', False)]:
def fun(n):
if n: o = A()
else: o = C()
setattr(o, attrname, 12)
return o.m()
a = self.RPythonAnnotator()
if works:
a.build_types(fun, [int])
else:
from rpython.annotator.classdef import NoSuchAttrError
py.test.raises(NoSuchAttrError, a.build_types, fun, [int])
def test_attrs_enforce_attrs(self):
class Superbase(object):
_attrs_ = 'x'
class Base(Superbase):
pass
class A(Base):
pass
class B(Base):
pass
def fun(s):
if s is None: # known not to be None in this test
o = B()
o.x = 12
elif len(s) > 5:
o = A()
else:
o = Base()
return o.x
a = self.RPythonAnnotator()
s = a.build_types(fun, [str])
assert s == annmodel.s_ImpossibleValue # but not blocked blocks
def test_pbc_enforce_attrs(self):
class F(object):
_attrs_ = ['foo',]
def _freeze_(self):
return True
p1 = F()
p2 = F()
def g(): pass
def f(x):
if x:
p = p1
else:
p = p2
g()
return p.foo
a = self.RPythonAnnotator()
a.build_types(f, [bool])
def test_enforce_settled(self):
class A(object):
_settled_ = True
def m(self):
raise NotImplementedError
class B(A):
def m(self):
return 1
def n(self):
return 1
def fun(x):
if x:
a = A()
else:
a = B()
return a.m()
a = self.RPythonAnnotator()
s = a.build_types(fun, [bool])
assert s.knowntype == int
def fun(x):
if x:
a = A()
else:
a = B()
return a.n()
a = self.RPythonAnnotator()
with py.test.raises(AnnotatorError):
a.build_types(fun, [bool])
def test_float_cmp(self):
def fun(x, y):
return (x < y,
x <= y,
x == y,
x != y,
x > y,
x >= y)
a = self.RPythonAnnotator(policy=AnnotatorPolicy())
s = a.build_types(fun, [float, float])
assert [s_item.knowntype for s_item in s.items] == [bool] * 6
def test_empty_range(self):
def g(lst):
total = 0
for i in range(len(lst)):
total += lst[i]
return total
def fun():
return g([])
a = self.RPythonAnnotator(policy=AnnotatorPolicy())
s = a.build_types(fun, [])
assert s.const == 0
def test_compare_int_bool(self):
def fun(x):
return 50 < x
a = self.RPythonAnnotator(policy=AnnotatorPolicy())
s = a.build_types(fun, [bool])
assert isinstance(s, annmodel.SomeBool)
def test_long_as_intermediate_value(self):
from sys import maxint
from rpython.rlib.rarithmetic import intmask
def fun(x):
if x > 0:
v = maxint
else:
v = -maxint
return intmask(v * 10)
P = AnnotatorPolicy()
a = self.RPythonAnnotator(policy=P)
s = a.build_types(fun, [bool])
assert isinstance(s, annmodel.SomeInteger)
def test_instance_with_flags(self):
from rpython.rlib.jit import hint
class A:
_virtualizable_ = []
class B(A):
def meth(self):
return self
class C(A):
def meth(self):
return self
def f(n):
x = B()
x = hint(x, access_directly=True)
m = x.meth
for i in range(n):
x = C()
m = x.meth
return x, m, m()
a = self.RPythonAnnotator()
s = a.build_types(f, [a.bookkeeper.immutablevalue(0)])
assert isinstance(s.items[0], annmodel.SomeInstance)
assert s.items[0].flags == {'access_directly': True}
assert isinstance(s.items[1], annmodel.SomePBC)
assert len(s.items[1].descriptions) == 1
assert s.items[1].any_description().flags == {'access_directly':
True}
assert isinstance(s.items[2], annmodel.SomeInstance)
assert s.items[2].flags == {'access_directly': True}
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s.items[0], annmodel.SomeInstance)
assert s.items[0].flags == {}
assert isinstance(s.items[1], annmodel.SomePBC)
assert isinstance(s.items[2], annmodel.SomeInstance)
assert s.items[2].flags == {}
@py.test.mark.xfail
def test_no_access_directly_on_heap(self):
from rpython.rlib.jit import hint
class A:
_virtualizable_ = []
class I:
pass
def f():
x = A()
x = hint(x, access_directly=True)
i = I()
i.x = x
a = self.RPythonAnnotator()
with py.test.raises(AnnotatorError):
a.build_types(f, [])
class M:
def __init__(self):
self.l = []
self.d = {}
class C:
def _freeze_(self):
return True
def __init__(self):
self.m = M()
self.l2 = []
c = C()
def f():
x = A()
x = hint(x, access_directly=True)
c.m.l.append(x)
a = self.RPythonAnnotator()
py.test.raises(AnnotatorError, a.build_types, f, [])
def f():
x = A()
x = hint(x, access_directly=True)
c.m.d[None] = x
a = self.RPythonAnnotator()
py.test.raises(AnnotatorError, a.build_types, f, [])
def f():
x = A()
x = hint(x, access_directly=True)
c.m.d[x] = None
a = self.RPythonAnnotator()
py.test.raises(AnnotatorError, a.build_types, f, [])
def test_ctr_location(self):
class A:
_annspecialcase_ = 'specialize:ctr_location'
def __init__(self, x):
self.x = x
def f(n):
a = A(2 * n)
a.x = n
b = A("")
b.x = str(n)
return len(b.x) + a.x
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeInteger)
def test_weakref(self):
import weakref
class A:
pass
class B(A):
pass
class C(A):
pass
def f(n):
if n:
b = B()
b.hello = 42
r = weakref.ref(b)
else:
c = C()
c.hello = 64
r = weakref.ref(c)
return r().hello
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeInteger)
assert not s.is_constant()
def test_float_pow_unsupported(self):
def f(x, y):
x **= y
return x ** y
a = self.RPythonAnnotator()
py.test.raises(FlowingError, a.build_types, f, [int, int])
a = self.RPythonAnnotator()
py.test.raises(FlowingError, a.build_types, f, [float, float])
def test_intcmp_bug(self):
def g(x, y):
return x <= y
def f(x, y):
if g(x, y):
g(x, r_uint(y))
a = self.RPythonAnnotator()
with py.test.raises(UnionError):
a.build_types(f, [int, int])
def test_compare_with_zero(self):
def g():
should_not_see_this
def f(n):
assert n >= 0
if n < 0:
g()
if not (n >= 0):
g()
a = self.RPythonAnnotator()
a.build_types(f, [int])
def test_r_singlefloat(self):
z = r_singlefloat(0.4)
def g(n):
if n > 0:
return r_singlefloat(n * 0.1)
else:
return z
a = self.RPythonAnnotator()
s = a.build_types(g, [int])
assert isinstance(s, annmodel.SomeSingleFloat)
def test_unicode_simple(self):
def f():
return u'xxx'
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeUnicodeString)
def test_unicode(self):
def g(n):
if n > 0:
return unichr(1234)
else:
return u"x\xe4x"
def f(n):
x = g(0)
return x[n]
a = self.RPythonAnnotator()
s = a.build_types(g, [int])
assert isinstance(s, annmodel.SomeUnicodeString)
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeUnicodeCodePoint)
def test_unicode_from_string(self):
def f(x):
return unicode(x)
a = self.RPythonAnnotator()
s = a.build_types(f, [str])
assert isinstance(s, annmodel.SomeUnicodeString)
def test_unicode_add(self):
def f(x):
return unicode(x) + unichr(1234)
def g(x):
return unichr(x) + unichr(2)
a = self.RPythonAnnotator()
s = a.build_types(f, [str])
assert isinstance(s, annmodel.SomeUnicodeString)
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeUnicodeString)
def test_unicode_startswith(self):
def f(x):
return u'xxxx'.replace(x, u'z')
a = self.RPythonAnnotator()
s = a.build_types(f, [unicode])
assert isinstance(s, annmodel.SomeUnicodeString)
def test_unicode_buildtypes(self):
def f(x):
return x
a = self.RPythonAnnotator()
s = a.build_types(f, [unicode])
assert isinstance(s, annmodel.SomeUnicodeString)
def test_replace_annotations(self):
def f(x):
return 'a'.replace(x, 'b')
a = self.RPythonAnnotator()
s = a.build_types(f, [str])
assert isinstance(s, annmodel.SomeString)
assert s.no_nul
def f(x):
return u'a'.replace(x, u'b')
a = self.RPythonAnnotator()
s = a.build_types(f, [unicode])
assert isinstance(s, annmodel.SomeUnicodeString)
assert s.no_nul
def test_unicode_char(self):
def f(x, i):
for c in x:
if c == i:
return c
return 'x'
a = self.RPythonAnnotator()
s = a.build_types(f, [unicode, str])
assert isinstance(s, annmodel.SomeUnicodeCodePoint)
def test_strformatting_unicode(self):
def f(x):
return '%s' % unichr(x)
a = self.RPythonAnnotator()
py.test.raises(AnnotatorError, a.build_types, f, [int])
def f(x):
return '%s' % (unichr(x) * 3)
a = self.RPythonAnnotator()
py.test.raises(AnnotatorError, a.build_types, f, [int])
def f(x):
return '%s%s' % (1, unichr(x))
a = self.RPythonAnnotator()
py.test.raises(AnnotatorError, a.build_types, f, [int])
def f(x):
return '%s%s' % (1, unichr(x) * 15)
a = self.RPythonAnnotator()
py.test.raises(AnnotatorError, a.build_types, f, [int])
def test_strformatting_tuple(self):
"""
A function which returns the result of interpolating a tuple of a
single str into a str format string should be annotated as returning
SomeString.
"""
def f(x):
return '%s' % (x,)
a = self.RPythonAnnotator()
s = a.build_types(f, [str])
assert isinstance(s, annmodel.SomeString)
def test_unicodeformatting(self):
def f(x):
return u'%s' % x
a = self.RPythonAnnotator()
s = a.build_types(f, [unicode])
assert isinstance(s, annmodel.SomeUnicodeString)
def test_unicodeformatting_tuple(self):
def f(x):
return u'%s' % (x,)
a = self.RPythonAnnotator()
s = a.build_types(f, [unicode])
assert isinstance(s, annmodel.SomeUnicodeString)
def test_negative_slice(self):
def f(s, e):
return [1, 2, 3][s:e]
a = self.RPythonAnnotator()
py.test.raises(AnnotatorError, "a.build_types(f, [int, int])")
a.build_types(f, [annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True)])
def f(x):
return x[:-1]
a.build_types(f, [str])
def test_negative_number_find(self):
def f(s, e):
return "xyz".find("x", s, e)
a = self.RPythonAnnotator()
py.test.raises(AnnotatorError, "a.build_types(f, [int, int])")
a.build_types(f, [annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True)])
def f(s, e):
return "xyz".rfind("x", s, e)
py.test.raises(AnnotatorError, "a.build_types(f, [int, int])")
a.build_types(f, [annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True)])
def f(s, e):
return "xyz".count("x", s, e)
py.test.raises(AnnotatorError, "a.build_types(f, [int, int])")
a.build_types(f, [annmodel.SomeInteger(nonneg=True),
annmodel.SomeInteger(nonneg=True)])
def test_setslice(self):
def f():
lst = [2, 5, 7]
lst[1:2] = [4]
return lst
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeList)
assert not s.listdef.listitem.resized
assert not s.listdef.listitem.immutable
assert s.listdef.listitem.mutated
def test_delslice(self):
def f():
lst = [2, 5, 7]
del lst[1:2]
return lst
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeList)
assert s.listdef.listitem.resized
def test_varargs(self):
def f(*args):
return args[0] + 42
a = self.RPythonAnnotator()
s = a.build_types(f, [int, int])
assert isinstance(s, annmodel.SomeInteger)
def test_listitem_no_mutating(self):
from rpython.rlib.debug import check_annotation
called = []
def checker(ann, bk):
called.append(True)
assert not ann.listdef.listitem.mutated
ann.listdef.never_resize()
def f():
l = [1,2,3]
check_annotation(l, checker)
return l
def g():
l = f()
l.append(4)
a = self.RPythonAnnotator()
py.test.raises(ListChangeUnallowed, a.build_types, g, [])
assert called
def test_listitem_no_mutating2(self):
from rpython.rlib.debug import make_sure_not_resized
def f():
return make_sure_not_resized([1,2,3])
def g():
l = [1,2,3]
l.append(4)
return l
def fn(i):
if i:
func = f
else:
func = g
return func()
a = self.RPythonAnnotator()
a.translator.config.translation.list_comprehension_operations = True
py.test.raises(ListChangeUnallowed, a.build_types, fn, [int])
def test_listitem_never_resize(self):
from rpython.rlib.debug import check_annotation
def checker(ann, bk):
ann.listdef.never_resize()
def f():
l = [1,2,3]
l.append(4)
check_annotation(l, checker)
a = self.RPythonAnnotator()
py.test.raises(ListChangeUnallowed, a.build_types, f, [])
def test_len_of_empty_list(self):
class X:
pass
def f(n):
x = X()
x.lst = None
if n < 0: # to showcase a failure of the famous "assert contains"
return len(x.lst)
x.lst = []
return len(x.lst)
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.const == 0
def test_hash_sideeffect(self):
class X:
pass
x1 = X()
x2 = X()
x3 = X()
d = {(2, x1): 5, (3, x2): 7}
def f(n, m):
if m == 1: x = x1
elif m == 2: x = x2
else: x = x3
return d[n, x]
a = self.RPythonAnnotator()
s = a.build_types(f, [int, int])
assert s.knowntype == int
assert hasattr(x1, '__precomputed_identity_hash')
assert hasattr(x2, '__precomputed_identity_hash')
assert not hasattr(x3, '__precomputed_identity_hash')
def test_contains_of_empty_dict(self):
class A(object):
def meth(self):
return 1
def g(x, y):
d1 = {}
for i in range(y):
if x in d1:
return d1[x].meth()
d1[i+1] = A()
return 0
a = self.RPythonAnnotator()
s = a.build_types(g, [int, int])
assert s.knowntype is int
def f(x):
d0 = {}
if x in d0:
d0[x].meth()
return x+1
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.knowntype is int
def test_relax(self):
def f(*args):
return args[0] + args[1]
f.relax_sig_check = True
def g(x):
return f(x, x - x)
a = self.RPythonAnnotator()
s = a.build_types(g, [int])
assert a.bookkeeper.getdesc(f).getuniquegraph()
def test_cannot_raise_ll_exception(self):
from rpython.rtyper.annlowlevel import cast_instance_to_base_ptr
#
def f():
e = OverflowError()
lle = cast_instance_to_base_ptr(e)
raise Exception(lle)
# ^^^ instead, must cast back from a base ptr to an instance
a = self.RPythonAnnotator()
with py.test.raises(AssertionError):
a.build_types(f, [])
def test_enumerate(self):
def f():
for i, x in enumerate(['a', 'b', 'c', 'd']):
if i == 2:
return x
return '?'
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeChar)
def test_context_manager(self):
class C:
def __init__(self):
pass
def __enter__(self):
self.x = 1
def __exit__(self, *args):
self.x = 3
def f():
c = C()
with c:
pass
return c.x
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeInteger)
# not a constant: both __enter__ and __exit__ have been annotated
assert not s.is_constant()
def test_make_sure_not_resized(self):
from rpython.rlib.debug import make_sure_not_resized
def pycode(consts):
make_sure_not_resized(consts)
def build1():
return pycode(consts=[1])
def build2():
return pycode(consts=[0])
def fn():
build1()
build2()
a = self.RPythonAnnotator()
a.translator.config.translation.list_comprehension_operations = True
a.build_types(fn, [])
# assert did not raise ListChangeUnallowed
def test_return_immutable_list(self):
class A:
_immutable_fields_ = 'lst[*]'
def f(n):
a = A()
l1 = [n, 0]
l1[1] = n+1
a.lst = l1
return a.lst
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.listdef.listitem.immutable
def test_return_immutable_list_quasiimmut_field(self):
class A:
_immutable_fields_ = 'lst?[*]'
def f(n):
a = A()
l1 = [n, 0]
l1[1] = n+1
a.lst = l1
return a.lst
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.listdef.listitem.immutable
def test_immutable_list_is_actually_resized(self):
class A:
_immutable_fields_ = 'lst[*]'
def f(n):
a = A()
l1 = [n]
l1.append(n+1)
a.lst = l1
return a.lst
a = self.RPythonAnnotator()
py.test.raises(ListChangeUnallowed, a.build_types, f, [int])
def test_immutable_list_is_assigned_a_resizable_list(self):
class A:
_immutable_fields_ = 'lst[*]'
def f(n):
a = A()
foo = []
foo.append(n)
a.lst = foo
a = self.RPythonAnnotator()
py.test.raises(ListChangeUnallowed, a.build_types, f, [int])
def test_can_merge_immutable_list_with_regular_list(self):
class A:
_immutable_fields_ = 'lst[*]'
def foo(lst):
pass
def f(n):
a = A()
l1 = [n, 0]
l1[1] = n+1
a.lst = l1
if n > 0:
foo(a.lst)
else:
lst = [0]
lst[0] = n
foo(lst)
a = self.RPythonAnnotator()
a.build_types(f, [int])
def f(n):
a = A()
l1 = [n, 0]
l1[1] = n+1
a.lst = l1
if n > 0:
lst = [0]
lst[0] = n
foo(lst)
else:
foo(a.lst)
a = self.RPythonAnnotator()
a.build_types(f, [int])
def test_immutable_field_subclass(self):
class Root:
pass
class A(Root):
_immutable_fields_ = '_my_lst[*]'
def __init__(self, lst):
self._my_lst = lst
def foo(x):
return len(x._my_lst)
def f(n):
foo(A([2, n]))
foo(Root())
a = self.RPythonAnnotator()
e = py.test.raises(Exception, a.build_types, f, [int])
assert "field '_my_lst' was migrated" in str(e.value)
def test_range_variable_step(self):
def g(n):
return range(0, 10, n)
def f(n):
r = g(1) # constant step, at first
s = g(n) # but it becomes a variable step
return r
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert s.listdef.listitem.range_step == 0
def test_specialize_arg_memo(self):
@objectmodel.specialize.memo()
def g(n):
return n
@objectmodel.specialize.arg(0)
def f(i):
return g(i)
def main(i):
if i == 2:
return f(2)
elif i == 3:
return f(3)
else:
raise NotImplementedError
a = self.RPythonAnnotator()
s = a.build_types(main, [int])
assert isinstance(s, annmodel.SomeInteger)
def test_join_none_and_nonnull(self):
from rpython.rlib.rstring import assert_str0
def f(i):
a = str(i)
a = assert_str0(a)
return a.join([None])
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeString)
assert not s.can_be_None
def test_contains_no_nul(self):
def f(i):
if "\0" in i:
return None
else:
return i
a = self.RPythonAnnotator()
a.translator.config.translation.check_str_without_nul = True
s = a.build_types(f, [annmodel.SomeString(no_nul=False)])
assert isinstance(s, annmodel.SomeString)
assert s.can_be_None
assert s.no_nul
def test_contains_no_nul_unicode(self):
def f(i):
if u"\0" in i:
return None
else:
return i
a = self.RPythonAnnotator()
a.translator.config.translation.check_str_without_nul = True
s = a.build_types(f, [annmodel.SomeUnicodeString(no_nul=False)])
assert isinstance(s, annmodel.SomeUnicodeString)
assert s.can_be_None
assert s.no_nul
def test_no___call__(self):
class X(object):
def __call__(self):
xxx
x = X()
def f():
return x
a = self.RPythonAnnotator()
e = py.test.raises(Exception, a.build_types, f, [])
assert 'object with a __call__ is not RPython' in str(e.value)
def test_os_getcwd(self):
import os
def fn():
return os.getcwd()
a = self.RPythonAnnotator()
s = a.build_types(fn, [])
assert isinstance(s, annmodel.SomeString)
assert s.no_nul
def test_os_getenv(self):
import os
def fn():
return os.environ.get('PATH')
a = self.RPythonAnnotator()
s = a.build_types(fn, [])
assert isinstance(s, annmodel.SomeString)
assert s.no_nul
def test_base_iter(self):
class A(object):
def __iter__(self):
return self
def fn():
return iter(A())
a = self.RPythonAnnotator()
s = a.build_types(fn, [])
assert isinstance(s, annmodel.SomeInstance)
assert s.classdef.name.endswith('.A')
def test_iter_next(self):
class A(object):
def __iter__(self):
return self
def next(self):
return 1
def fn():
s = 0
for x in A():
s += x
return s
a = self.RPythonAnnotator()
s = a.build_types(fn, [])
assert len(a.translator.graphs) == 3 # fn, __iter__, next
assert isinstance(s, annmodel.SomeInteger)
def test_next_function(self):
def fn(n):
x = [0, 1, n]
i = iter(x)
return next(i) + next(i)
a = self.RPythonAnnotator()
s = a.build_types(fn, [int])
assert isinstance(s, annmodel.SomeInteger)
def test_instance_getitem(self):
class A(object):
def __getitem__(self, i):
return i * i
def fn(i):
a = A()
return a[i]
a = self.RPythonAnnotator()
s = a.build_types(fn, [int])
assert len(a.translator.graphs) == 2 # fn, __getitem__
assert isinstance(s, annmodel.SomeInteger)
def test_instance_setitem(self):
class A(object):
def __setitem__(self, i, v):
self.value = i * v
def fn(i, v):
a = A()
a[i] = v
return a.value
a = self.RPythonAnnotator()
s = a.build_types(fn, [int, int])
assert len(a.translator.graphs) == 2 # fn, __setitem__
assert isinstance(s, annmodel.SomeInteger)
def test_instance_getslice(self):
class A(object):
def __getslice__(self, stop, start):
return "Test"[stop:start]
def fn():
a = A()
return a[0:2]
a = self.RPythonAnnotator()
s = a.build_types(fn, [])
assert len(a.translator.graphs) == 2 # fn, __getslice__
assert isinstance(s, annmodel.SomeString)
def test_instance_setslice(self):
class A(object):
def __setslice__(self, stop, start, value):
self.value = value
def fn():
a = A()
a[0:2] = '00'
return a.value
a = self.RPythonAnnotator()
s = a.build_types(fn, [])
assert len(a.translator.graphs) == 2 # fn, __setslice__
assert isinstance(s, annmodel.SomeString)
def test_instance_len(self):
class A(object):
def __len__(self):
return 0
def fn():
a = A()
return len(a)
a = self.RPythonAnnotator()
s = a.build_types(fn, [])
assert len(a.translator.graphs) == 2 # fn, __len__
assert isinstance(s, annmodel.SomeInteger)
def test_reversed(self):
def fn(n):
for elem in reversed([1, 2, 3, 4, 5]):
return elem
return n
a = self.RPythonAnnotator()
s = a.build_types(fn, [int])
assert isinstance(s, annmodel.SomeInteger)
def test_no_attr_on_common_exception_classes(self):
for cls in [ValueError, Exception]:
def fn():
e = cls()
e.foo = "bar"
a = self.RPythonAnnotator()
with py.test.raises(NoSuchAttrError):
a.build_types(fn, [])
def test_lower_char(self):
def fn(c):
return c.lower()
a = self.RPythonAnnotator()
s = a.build_types(fn, [annmodel.SomeChar()])
assert s == annmodel.SomeChar()
def test_isinstance_double_const(self):
class X(object):
def _freeze_(self):
return True
x = X()
def f(i):
if i:
x1 = x
else:
x1 = None
print "hello" # this is to force the merge of blocks
return isinstance(x1, X)
a = self.RPythonAnnotator()
s = a.build_types(f, [annmodel.SomeInteger()])
assert isinstance(s, annmodel.SomeBool)
def test_object_init(self):
class A(object):
pass
class B(A):
def __init__(self):
A.__init__(self)
def f():
B()
a = self.RPythonAnnotator()
a.build_types(f, []) # assert did not explode
def test_bytearray(self):
def f():
return bytearray("xyz")
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeByteArray)
assert not s.is_constant() # never a constant!
def test_bytearray_add(self):
def f(a):
return a + bytearray("xyz")
a = self.RPythonAnnotator()
assert isinstance(a.build_types(f, [annmodel.SomeByteArray()]),
annmodel.SomeByteArray)
a = self.RPythonAnnotator()
assert isinstance(a.build_types(f, [str]),
annmodel.SomeByteArray)
a = self.RPythonAnnotator()
assert isinstance(a.build_types(f, [annmodel.SomeChar()]),
annmodel.SomeByteArray)
def test_bytearray_setitem_getitem(self):
def f(b, i, c):
b[i] = c
return b[i + 1]
a = self.RPythonAnnotator()
assert isinstance(a.build_types(f, [annmodel.SomeByteArray(),
int, int]),
annmodel.SomeInteger)
def test_constant_startswith_endswith(self):
def f():
return "abc".startswith("ab") and "abc".endswith("bc")
a = self.RPythonAnnotator()
assert a.build_types(f, []).const is True
def test_specific_attributes(self):
class A(object):
pass
class B(A):
def __init__(self, x):
assert x >= 0
self.x = x
def fn(i):
if i % 2:
a = A()
else:
a = B(3)
if i % 3:
a.x = -3
if isinstance(a, B):
return a.x
return 0
a = self.RPythonAnnotator()
assert not a.build_types(fn, [int]).nonneg
def test_unionerror_attrs(self):
def f(x):
if x < 10:
return 1
else:
return "bbb"
a = self.RPythonAnnotator()
with py.test.raises(UnionError) as exc:
a.build_types(f, [int])
the_exc = exc.value
s_objs = set([type(the_exc.s_obj1), type(the_exc.s_obj2)])
assert s_objs == set([annmodel.SomeInteger, annmodel.SomeString])
def test_unionerror_tuple_size(self):
def f(x):
if x < 10:
return (1, )
else:
return (1, 2)
a = self.RPythonAnnotator()
with py.test.raises(UnionError) as exc:
a.build_types(f, [int])
assert "RPython cannot unify tuples of different length: 2 versus 1" in exc.value.msg
def test_unionerror_signedness(self):
def f(x):
if x < 10:
return r_uint(99)
else:
return -1
a = self.RPythonAnnotator()
with py.test.raises(UnionError) as exc:
a.build_types(f, [int])
assert ("RPython cannot prove that these integers are of the "
"same signedness" in exc.value.msg)
def test_unionerror_instance(self):
class A(object): pass
class B(object): pass
def f(x):
if x < 10:
return A()
else:
return B()
a = self.RPythonAnnotator()
with py.test.raises(UnionError) as exc:
a.build_types(f, [int])
assert ("RPython cannot unify instances with no common base class"
in exc.value.msg)
def test_unionerror_iters(self):
def f(x):
d = { 1 : "a", 2 : "b" }
if x < 10:
return d.iterkeys()
else:
return d.itervalues()
a = self.RPythonAnnotator()
with py.test.raises(UnionError) as exc:
a.build_types(f, [int])
assert ("RPython cannot unify incompatible iterator variants" in
exc.value.msg)
def test_variable_getattr(self):
class A(object): pass
def f(y):
a = A()
return getattr(a, y)
a = self.RPythonAnnotator()
with py.test.raises(AnnotatorError) as exc:
a.build_types(f, [str])
assert ("variable argument to getattr" in exc.value.msg)
def test_bad_call(self):
def f(x):
return x()
a = self.RPythonAnnotator()
with py.test.raises(AnnotatorError) as exc:
a.build_types(f, [str])
assert ("Cannot prove that the object is callable" in exc.value.msg)
def test_UnionError_on_PBC(self):
l = ['a', 1]
def f(x):
l.append(x)
a = self.RPythonAnnotator()
with py.test.raises(UnionError) as excinfo:
a.build_types(f, [int])
assert 'Happened at file' in excinfo.value.source
assert 'Known variable annotations:' in excinfo.value.source
def test_str_format_error(self):
def f(s, x):
return s.format(x)
a = self.RPythonAnnotator()
with py.test.raises(AnnotatorError) as exc:
a.build_types(f, [str, str])
assert ("format() is not RPython" in exc.value.msg)
def test_prebuilt_ordered_dict(self):
try:
from collections import OrderedDict
except ImportError:
py.test.skip("Please upgrade to python 2.7")
d = OrderedDict([("aa", 1)])
def f():
return d
a = self.RPythonAnnotator()
assert isinstance(a.build_types(f, []), annmodel.SomeOrderedDict)
def test_enumerate_none(self):
# enumerate(None) can occur as an intermediate step during a full
# annotation, because the None will be generalized later to
# None-or-list for example
def f(flag):
if flag:
x = None
else:
x = [42]
return enumerate(x).next()
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeTuple)
assert s.items[1].const == 42
def test_unpack_none_gets_a_blocked_block(self):
def f(x):
a, b = x
a = self.RPythonAnnotator()
py.test.raises(AnnotatorError,
a.build_types, f, [annmodel.s_None])
def test_class___name__(self):
class Abc(object):
pass
def f():
return Abc().__class__.__name__
a = self.RPythonAnnotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeString)
def test_isinstance_str_1(self):
def g():
pass
def f(n):
if n > 5:
s = "foo"
else:
s = None
g()
return isinstance(s, str)
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeBool)
assert not s.is_constant()
def test_isinstance_str_2(self):
def g():
pass
def f(n):
if n > 5:
s = "foo"
else:
s = None
g()
if isinstance(s, str):
return s
return ""
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeString)
assert not s.can_be_none()
def test_property_getter(self):
class O1(object):
def __init__(self, x):
self._x = x
@property
def x(self):
return self._x
def f(n):
o = O1(n)
return o.x + getattr(o, 'x')
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeInteger)
op = list(graphof(a, f).iterblocks())[0].operations
i = 0
c = 0
while i < len(op):
if op[i].opname == 'getattr':
c += 1
assert op[i].args[1].value == 'x__getter__'
i += 1
assert i < len(op) and op[i].opname == 'simple_call' and \
op[i].args[0] == op[i - 1].result
i += 1
assert c == 2
def test_property_setter(self):
class O2(object):
def __init__(self):
self._x = 0
def set_x(self, v):
self._x = v
x = property(fset=set_x)
def f(n):
o = O2()
o.x = n
setattr(o, 'x', n)
a = self.RPythonAnnotator()
s = a.build_types(f, [int])
op = list(graphof(a, f).iterblocks())[0].operations
i = 0
c = 0
while i < len(op):
if op[i].opname == 'getattr':
c += 1
assert op[i].args[1].value == 'x__setter__'
i += 1
assert i < len(op) and op[i].opname == 'simple_call' and \
op[i].args[0] == op[i - 1].result and len(op[i].args) == 2
i += 1
assert c == 2
def test_property_unionerr(self):
class O1(object):
def __init__(self, x):
self._x = x
@property
def x(self):
return self._x
class O2(O1):
def set_x(self, v):
self._x = v
x = property(fset=set_x)
def f1(n):
o = O2(n)
return o.x
def f2(n):
o = O2(n)
o.x = 20
a = self.RPythonAnnotator()
with py.test.raises(UnionError) as exc:
a.build_types(f1, [int])
a = self.RPythonAnnotator()
with py.test.raises(UnionError) as exc:
a.build_types(f2, [int])
def test_property_union_2(self):
py.test.xfail("FIX ME")
class Base(object):
pass
class A(Base):
def __init__(self):
pass
@property
def x(self):
return 42
class B(Base):
def __init__(self, x):
self.x = x
def f(n):
if n < 0:
obj = A()
else:
obj = B(n)
return obj.x
a = self.RPythonAnnotator()
# Ideally, this should translate to something sensible,
# but for now, AnnotatorError is better than silently mistranslating.
with py.test.raises(AnnotatorError):
a.build_types(f, [int])
def test_property_union_3(self):
py.test.xfail("FIX ME")
class Base(object):
pass
class A(Base):
@property
def x(self):
return 42
class B(Base):
x = 43
def f(n):
if n < 0:
obj = A()
else:
obj = B()
return obj.x
a = self.RPythonAnnotator()
with py.test.raises(AnnotatorError):
a.build_types(f, [int])
def test_dict_can_be_none_ordering_issue(self):
def g(d):
return 42 in d
def f(n):
g(None)
g({})
a = self.RPythonAnnotator()
a.build_types(f, [int])
def g(n):
return [0, 1, 2, n]
def f_calls_g(n):
total = 0
lst = g(n)
i = 0
while i < len(lst):
total += i
i += 1
return total
constant_unsigned_five = r_uint(5)
class Freezing:
def _freeze_(self):
return True
```
#### File: rpython/bin/translatorshell.py
```python
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(
os.path.dirname(os.path.realpath(__file__)))))
from rpython.translator.interactive import Translation
from rpython.rtyper.rtyper import *
from rpython.rlib.rarithmetic import *
def get_c_function(lib, f):
from ctypes import CDLL
name = f.__name__
return getattr(CDLL(lib.strpath), 'pypy_g_' + name)
def setup_readline():
import readline
try:
import rlcompleter2
rlcompleter2.setup()
except ImportError:
import rlcompleter
readline.parse_and_bind("tab: complete")
import os
histfile = os.path.join(os.environ["HOME"], ".pypytrhist")
try:
getattr(readline, "clear_history", lambda: None)()
readline.read_history_file(histfile)
except IOError:
pass
import atexit
atexit.register(readline.write_history_file, histfile)
if __name__ == '__main__':
try:
setup_readline()
except ImportError, err:
print "Disabling readline support (%s)" % err
from rpython.translator.test import snippet
from rpython.rtyper.rtyper import RPythonTyper
if (os.getcwd() not in sys.path and
os.path.curdir not in sys.path):
sys.path.insert(0, os.getcwd())
print __doc__
import os
os.putenv("PYTHONINSPECT", "1")
```
#### File: config/test/test_translationoption.py
```python
import sys
import py
from rpython.config.translationoption import get_combined_translation_config
from rpython.config.translationoption import set_opt_level
from rpython.config.translationoption import get_translation_config
from rpython.config import translationoption
from rpython.config.config import ConflictConfigError, ConfigError
from rpython.translator.platform import platform as compiler
def test_no_gcrootfinder_with_boehm():
config = get_combined_translation_config()
config.translation.gcrootfinder = "shadowstack"
py.test.raises(ConflictConfigError, set_opt_level, config, '0')
if compiler.name == 'msvc' or sys.platform == 'darwin':
def test_no_asmgcrot_on_msvc():
config = get_combined_translation_config()
config.translation.gcrootfinder = "asmgcc"
py.test.raises(ConfigError, set_opt_level, config, 'jit')
def test_get_translation_config():
from rpython.translator.interactive import Translation
from rpython.config import config
def f(x):
config = get_translation_config()
if config is not None:
return config.translating
return False
t = Translation(f, [int])
config = t.config
# do the patching
t.annotate()
retvar = t.context.graphs[0].returnblock.inputargs[0]
assert t.context.annotator.binding(retvar).const
assert get_translation_config() is config # check during import time
```
#### File: backend/arm/support.py
```python
from rpython.rtyper.lltypesystem import lltype, rffi, llmemory
from rpython.rlib.rarithmetic import r_uint
from rpython.translator.tool.cbuild import ExternalCompilationInfo
eci = ExternalCompilationInfo(post_include_bits=["""
static int pypy__arm_int_div(int a, int b) {
return a/b;
}
static unsigned int pypy__arm_uint_div(unsigned int a, unsigned int b) {
return a/b;
}
static int pypy__arm_int_mod(int a, int b) {
return a % b;
}
"""])
def arm_int_div_emulator(a, b):
return int(a / float(b))
arm_int_div_sign = lltype.Ptr(
lltype.FuncType([lltype.Signed, lltype.Signed], lltype.Signed))
arm_int_div = rffi.llexternal(
"pypy__arm_int_div", [lltype.Signed, lltype.Signed], lltype.Signed,
_callable=arm_int_div_emulator,
compilation_info=eci,
_nowrapper=True, elidable_function=True)
def arm_uint_div_emulator(a, b):
return r_uint(a) / r_uint(b)
arm_uint_div_sign = lltype.Ptr(
lltype.FuncType([lltype.Unsigned, lltype.Unsigned], lltype.Unsigned))
arm_uint_div = rffi.llexternal(
"pypy__arm_uint_div", [lltype.Unsigned, lltype.Unsigned], lltype.Unsigned,
_callable=arm_uint_div_emulator,
compilation_info=eci,
_nowrapper=True, elidable_function=True)
def arm_int_mod_emulator(a, b):
sign = 1
if a < 0:
a = -1 * a
sign = -1
if b < 0:
b = -1 * b
res = a % b
return sign * res
arm_int_mod_sign = arm_int_div_sign
arm_int_mod = rffi.llexternal(
"pypy__arm_int_mod", [lltype.Signed, lltype.Signed], lltype.Signed,
_callable=arm_int_mod_emulator,
compilation_info=eci,
_nowrapper=True, elidable_function=True)
```
#### File: llsupport/test/test_rewrite.py
```python
from rpython.jit.backend.llsupport.descr import get_size_descr,\
get_field_descr, get_array_descr, ArrayDescr, FieldDescr,\
SizeDescr, get_interiorfield_descr
from rpython.jit.backend.llsupport.gc import GcLLDescr_boehm,\
GcLLDescr_framework
from rpython.jit.backend.llsupport import jitframe
from rpython.jit.metainterp.gc import get_description
from rpython.jit.tool.oparser import parse
from rpython.jit.metainterp.optimizeopt.util import equaloplists
from rpython.jit.metainterp.history import JitCellToken, FLOAT
from rpython.jit.metainterp.history import AbstractFailDescr
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper import rclass
from rpython.jit.backend.x86.arch import WORD
class Evaluator(object):
def __init__(self, scope):
self.scope = scope
def __getitem__(self, key):
return eval(key, self.scope)
class FakeLoopToken(object):
pass
o_vtable = lltype.malloc(rclass.OBJECT_VTABLE, immortal=True)
class RewriteTests(object):
def check_rewrite(self, frm_operations, to_operations, **namespace):
S = lltype.GcStruct('S', ('x', lltype.Signed),
('y', lltype.Signed))
sdescr = get_size_descr(self.gc_ll_descr, S)
sdescr.tid = 1234
#
T = lltype.GcStruct('T', ('y', lltype.Signed),
('z', lltype.Ptr(S)),
('t', lltype.Signed))
tdescr = get_size_descr(self.gc_ll_descr, T)
tdescr.tid = 5678
tzdescr = get_field_descr(self.gc_ll_descr, T, 'z')
#
A = lltype.GcArray(lltype.Signed)
adescr = get_array_descr(self.gc_ll_descr, A)
adescr.tid = 4321
alendescr = adescr.lendescr
#
B = lltype.GcArray(lltype.Char)
bdescr = get_array_descr(self.gc_ll_descr, B)
bdescr.tid = 8765
blendescr = bdescr.lendescr
#
C = lltype.GcArray(lltype.Ptr(S))
cdescr = get_array_descr(self.gc_ll_descr, C)
cdescr.tid = 8111
clendescr = cdescr.lendescr
#
E = lltype.GcStruct('Empty')
edescr = get_size_descr(self.gc_ll_descr, E)
edescr.tid = 9000
#
vtable_descr = self.gc_ll_descr.fielddescr_vtable
O = lltype.GcStruct('O', ('parent', rclass.OBJECT),
('x', lltype.Signed))
o_descr = self.cpu.sizeof(O, True)
o_vtable = globals()['o_vtable']
#
tiddescr = self.gc_ll_descr.fielddescr_tid
wbdescr = self.gc_ll_descr.write_barrier_descr
WORD = globals()['WORD']
#
strdescr = self.gc_ll_descr.str_descr
unicodedescr = self.gc_ll_descr.unicode_descr
strlendescr = strdescr.lendescr
unicodelendescr = unicodedescr.lendescr
strhashdescr = self.gc_ll_descr.str_hash_descr
unicodehashdescr = self.gc_ll_descr.unicode_hash_descr
casmdescr = JitCellToken()
clt = FakeLoopToken()
clt._ll_initial_locs = [0, 8]
frame_info = lltype.malloc(jitframe.JITFRAMEINFO, flavor='raw')
clt.frame_info = frame_info
frame_info.jfi_frame_depth = 13
frame_info.jfi_frame_size = 255
framedescrs = self.gc_ll_descr.getframedescrs(self.cpu)
framelendescr = framedescrs.arraydescr.lendescr
jfi_frame_depth = framedescrs.jfi_frame_depth
jfi_frame_size = framedescrs.jfi_frame_size
jf_frame_info = framedescrs.jf_frame_info
jf_savedata = framedescrs.jf_savedata
jf_force_descr = framedescrs.jf_force_descr
jf_descr = framedescrs.jf_descr
jf_guard_exc = framedescrs.jf_guard_exc
jf_forward = framedescrs.jf_forward
jf_extra_stack_depth = framedescrs.jf_extra_stack_depth
signedframedescr = self.cpu.signedframedescr
floatframedescr = self.cpu.floatframedescr
casmdescr.compiled_loop_token = clt
#
guarddescr = AbstractFailDescr()
#
namespace.update(locals())
#
for funcname in self.gc_ll_descr._generated_functions:
namespace[funcname] = self.gc_ll_descr.get_malloc_fn(funcname)
namespace[funcname + '_descr'] = getattr(self.gc_ll_descr,
'%s_descr' % funcname)
#
ops = parse(frm_operations, namespace=namespace)
expected = parse(to_operations % Evaluator(namespace),
namespace=namespace)
operations = self.gc_ll_descr.rewrite_assembler(self.cpu,
ops.operations,
[])
remap = {}
for a, b in zip(ops.inputargs, expected.inputargs):
remap[b] = a
equaloplists(operations, expected.operations, remap=remap)
lltype.free(frame_info, flavor='raw')
class FakeTracker(object):
pass
class BaseFakeCPU(object):
JITFRAME_FIXED_SIZE = 0
def __init__(self):
self.tracker = FakeTracker()
self._cache = {}
self.signedframedescr = ArrayDescr(3, 8, FieldDescr('len', 0, 0, 0), 0)
self.floatframedescr = ArrayDescr(5, 8, FieldDescr('len', 0, 0, 0), 0)
def getarraydescr_for_frame(self, tp):
if tp == FLOAT:
return self.floatframedescr
return self.signedframedescr
def unpack_arraydescr_size(self, d):
return 0, d.itemsize, 0
def unpack_fielddescr(self, d):
return d.offset
def arraydescrof(self, ARRAY):
try:
return self._cache[ARRAY]
except KeyError:
r = ArrayDescr(1, 2, FieldDescr('len', 0, 0, 0), 0)
self._cache[ARRAY] = r
return r
def fielddescrof(self, STRUCT, fname):
key = (STRUCT, fname)
try:
return self._cache[key]
except KeyError:
r = FieldDescr(fname, 1, 1, 1)
self._cache[key] = r
return r
class TestBoehm(RewriteTests):
def setup_method(self, meth):
class FakeCPU(BaseFakeCPU):
def sizeof(self, STRUCT, is_object):
assert is_object
return SizeDescr(102, gc_fielddescrs=[],
vtable=o_vtable)
self.cpu = FakeCPU()
self.gc_ll_descr = GcLLDescr_boehm(None, None, None)
def test_new(self):
self.check_rewrite("""
[]
p0 = new(descr=sdescr)
jump()
""", """
[p1]
p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\
descr=malloc_fixedsize_descr)
jump()
""")
def test_no_collapsing(self):
self.check_rewrite("""
[]
p0 = new(descr=sdescr)
p1 = new(descr=sdescr)
jump()
""", """
[]
p0 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\
descr=malloc_fixedsize_descr)
p1 = call_malloc_gc(ConstClass(malloc_fixedsize), %(sdescr.size)d,\
descr=malloc_fixedsize_descr)
jump()
""")
def test_new_array_fixed(self):
self.check_rewrite("""
[]
p0 = new_array(10, descr=adescr)
jump()
""", """
[]
p0 = call_malloc_gc(ConstClass(malloc_array), \
%(adescr.basesize)d, \
10, \
%(adescr.itemsize)d, \
%(adescr.lendescr.offset)d, \
descr=malloc_array_descr)
jump()
""")
## should ideally be:
## p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \
## %(adescr.basesize + 10 * adescr.itemsize)d, \
## descr=malloc_fixedsize_descr)
## setfield_gc(p0, 10, descr=alendescr)
def test_new_array_variable(self):
self.check_rewrite("""
[i1]
p0 = new_array(i1, descr=adescr)
jump()
""", """
[i1]
p0 = call_malloc_gc(ConstClass(malloc_array), \
%(adescr.basesize)d, \
i1, \
%(adescr.itemsize)d, \
%(adescr.lendescr.offset)d, \
descr=malloc_array_descr)
jump()
""")
def test_new_with_vtable(self):
self.check_rewrite("""
[]
p0 = new_with_vtable(descr=o_descr)
jump()
""", """
[p1]
p0 = call_malloc_gc(ConstClass(malloc_fixedsize), 102, \
descr=malloc_fixedsize_descr)
setfield_gc(p0, ConstClass(o_vtable), descr=vtable_descr)
jump()
""")
def test_newstr(self):
self.check_rewrite("""
[i1]
p0 = newstr(i1)
jump()
""", """
[i1]
p0 = call_malloc_gc(ConstClass(malloc_array), \
%(strdescr.basesize)d, \
i1, \
%(strdescr.itemsize)d, \
%(strlendescr.offset)d, \
descr=malloc_array_descr)
jump()
""")
def test_newunicode(self):
self.check_rewrite("""
[i1]
p0 = newunicode(10)
jump()
""", """
[i1]
p0 = call_malloc_gc(ConstClass(malloc_array), \
%(unicodedescr.basesize)d, \
10, \
%(unicodedescr.itemsize)d, \
%(unicodelendescr.offset)d, \
descr=malloc_array_descr)
jump()
""")
## should ideally be:
## p0 = call_malloc_gc(ConstClass(malloc_fixedsize), \
## %(unicodedescr.basesize + \
## 10 * unicodedescr.itemsize)d, \
## descr=malloc_fixedsize_descr)
## setfield_gc(p0, 10, descr=unicodelendescr)
class TestFramework(RewriteTests):
def setup_method(self, meth):
class config_(object):
class translation(object):
gc = 'minimark'
gcrootfinder = 'asmgcc'
gctransformer = 'framework'
gcremovetypeptr = False
gcdescr = get_description(config_)
self.gc_ll_descr = GcLLDescr_framework(gcdescr, None, None, None,
really_not_translated=True)
self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = (
lambda cpu: True)
self.gc_ll_descr.malloc_zero_filled = False
#
class FakeCPU(BaseFakeCPU):
def sizeof(self, STRUCT, is_object):
descr = SizeDescr(104, gc_fielddescrs=[])
descr.tid = 9315
return descr
self.cpu = FakeCPU()
def test_rewrite_assembler_new_to_malloc(self):
self.check_rewrite("""
[p1]
p0 = new(descr=sdescr)
jump()
""", """
[p1]
p0 = call_malloc_nursery(%(sdescr.size)d)
setfield_gc(p0, 1234, descr=tiddescr)
jump()
""")
def test_rewrite_assembler_new3_to_malloc(self):
self.check_rewrite("""
[]
p0 = new(descr=sdescr)
p1 = new(descr=tdescr)
p2 = new(descr=sdescr)
jump()
""", """
[]
p0 = call_malloc_nursery( \
%(sdescr.size + tdescr.size + sdescr.size)d)
setfield_gc(p0, 1234, descr=tiddescr)
p1 = nursery_ptr_increment(p0, %(sdescr.size)d)
setfield_gc(p1, 5678, descr=tiddescr)
p2 = nursery_ptr_increment(p1, %(tdescr.size)d)
setfield_gc(p2, 1234, descr=tiddescr)
zero_ptr_field(p1, %(tdescr.gc_fielddescrs[0].offset)s)
jump()
""")
def test_rewrite_assembler_new_array_fixed_to_malloc(self):
self.check_rewrite("""
[]
p0 = new_array(10, descr=adescr)
jump()
""", """
[]
p0 = call_malloc_nursery( \
%(adescr.basesize + 10 * adescr.itemsize)d)
setfield_gc(p0, 4321, descr=tiddescr)
setfield_gc(p0, 10, descr=alendescr)
jump()
""")
def test_rewrite_assembler_new_and_new_array_fixed_to_malloc(self):
self.check_rewrite("""
[]
p0 = new(descr=sdescr)
p1 = new_array(10, descr=adescr)
jump()
""", """
[]
p0 = call_malloc_nursery( \
%(sdescr.size + \
adescr.basesize + 10 * adescr.itemsize)d)
setfield_gc(p0, 1234, descr=tiddescr)
p1 = nursery_ptr_increment(p0, %(sdescr.size)d)
setfield_gc(p1, 4321, descr=tiddescr)
setfield_gc(p1, 10, descr=alendescr)
jump()
""")
def test_rewrite_assembler_round_up(self):
self.check_rewrite("""
[]
p0 = new_array(6, descr=bdescr)
jump()
""", """
[]
p0 = call_malloc_nursery(%(bdescr.basesize + 8)d)
setfield_gc(p0, 8765, descr=tiddescr)
setfield_gc(p0, 6, descr=blendescr)
jump()
""")
def test_rewrite_assembler_round_up_always(self):
self.check_rewrite("""
[]
p0 = new_array(5, descr=bdescr)
p1 = new_array(5, descr=bdescr)
p2 = new_array(5, descr=bdescr)
p3 = new_array(5, descr=bdescr)
jump()
""", """
[]
p0 = call_malloc_nursery(%(4 * (bdescr.basesize + 8))d)
setfield_gc(p0, 8765, descr=tiddescr)
setfield_gc(p0, 5, descr=blendescr)
p1 = nursery_ptr_increment(p0, %(bdescr.basesize + 8)d)
setfield_gc(p1, 8765, descr=tiddescr)
setfield_gc(p1, 5, descr=blendescr)
p2 = nursery_ptr_increment(p1, %(bdescr.basesize + 8)d)
setfield_gc(p2, 8765, descr=tiddescr)
setfield_gc(p2, 5, descr=blendescr)
p3 = nursery_ptr_increment(p2, %(bdescr.basesize + 8)d)
setfield_gc(p3, 8765, descr=tiddescr)
setfield_gc(p3, 5, descr=blendescr)
jump()
""")
def test_rewrite_assembler_minimal_size(self):
self.check_rewrite("""
[]
p0 = new(descr=edescr)
p1 = new(descr=edescr)
jump()
""", """
[]
p0 = call_malloc_nursery(%(4*WORD)d)
setfield_gc(p0, 9000, descr=tiddescr)
p1 = nursery_ptr_increment(p0, %(2*WORD)d)
setfield_gc(p1, 9000, descr=tiddescr)
jump()
""")
def test_rewrite_assembler_variable_size(self):
self.check_rewrite("""
[i0]
p0 = new_array(i0, descr=bdescr)
jump(i0)
""", """
[i0]
p0 = call_malloc_nursery_varsize(0, 1, i0, descr=bdescr)
setfield_gc(p0, i0, descr=blendescr)
jump(i0)
""")
def test_rewrite_new_string(self):
self.check_rewrite("""
[i0]
p0 = newstr(i0)
jump(i0)
""", """
[i0]
p0 = call_malloc_nursery_varsize(1, 1, i0, descr=strdescr)
setfield_gc(p0, i0, descr=strlendescr)
setfield_gc(p0, 0, descr=strhashdescr)
jump(i0)
""")
def test_rewrite_assembler_nonstandard_array(self):
# a non-standard array is a bit hard to get; e.g. GcArray(Float)
# is like that on Win32, but not on Linux. Build one manually...
NONSTD = lltype.GcArray(lltype.Float)
nonstd_descr = get_array_descr(self.gc_ll_descr, NONSTD)
nonstd_descr.tid = 6464
nonstd_descr.basesize = 64 # <= hacked
nonstd_descr.itemsize = 8
nonstd_descr_gcref = 123
self.check_rewrite("""
[i0, p1]
p0 = new_array(i0, descr=nonstd_descr)
setarrayitem_gc(p0, i0, p1)
jump(i0)
""", """
[i0, p1]
p0 = call_malloc_gc(ConstClass(malloc_array_nonstandard), \
64, 8, \
%(nonstd_descr.lendescr.offset)d, \
6464, i0, \
descr=malloc_array_nonstandard_descr)
cond_call_gc_wb_array(p0, i0, descr=wbdescr)
setarrayitem_gc(p0, i0, p1)
jump(i0)
""", nonstd_descr=nonstd_descr)
def test_rewrite_assembler_maximal_size_1(self):
self.gc_ll_descr.max_size_of_young_obj = 100
self.check_rewrite("""
[]
p0 = new_array(103, descr=bdescr)
jump()
""", """
[]
p0 = call_malloc_gc(ConstClass(malloc_array), 1, \
%(bdescr.tid)d, 103, \
descr=malloc_array_descr)
jump()
""")
def test_rewrite_assembler_maximal_size_2(self):
self.gc_ll_descr.max_size_of_young_obj = 300
self.check_rewrite("""
[]
p0 = new_array(101, descr=bdescr)
p1 = new_array(102, descr=bdescr) # two new_arrays can be combined
p2 = new_array(103, descr=bdescr) # but not all three
jump()
""", """
[]
p0 = call_malloc_nursery( \
%(2 * (bdescr.basesize + 104))d)
setfield_gc(p0, 8765, descr=tiddescr)
setfield_gc(p0, 101, descr=blendescr)
p1 = nursery_ptr_increment(p0, %(bdescr.basesize + 104)d)
setfield_gc(p1, 8765, descr=tiddescr)
setfield_gc(p1, 102, descr=blendescr)
p2 = call_malloc_nursery( \
%(bdescr.basesize + 104)d)
setfield_gc(p2, 8765, descr=tiddescr)
setfield_gc(p2, 103, descr=blendescr)
jump()
""")
def test_rewrite_assembler_huge_size(self):
# "huge" is defined as "larger than 0xffffff bytes, or 16MB"
self.check_rewrite("""
[]
p0 = new_array(20000000, descr=bdescr)
jump()
""", """
[]
p0 = call_malloc_gc(ConstClass(malloc_array), 1, \
%(bdescr.tid)d, 20000000, \
descr=malloc_array_descr)
jump()
""")
def test_new_with_vtable(self):
self.check_rewrite("""
[]
p0 = new_with_vtable(descr=o_descr)
jump()
""", """
[p1]
p0 = call_malloc_nursery(104) # rounded up
setfield_gc(p0, 9315, descr=tiddescr)
setfield_gc(p0, 0, descr=vtable_descr)
jump()
""")
def test_new_with_vtable_too_big(self):
self.gc_ll_descr.max_size_of_young_obj = 100
self.check_rewrite("""
[]
p0 = new_with_vtable(descr=o_descr)
jump()
""", """
[p1]
p0 = call_malloc_gc(ConstClass(malloc_big_fixedsize), 104, 9315, \
descr=malloc_big_fixedsize_descr)
setfield_gc(p0, 0, descr=vtable_descr)
jump()
""")
def test_rewrite_assembler_newstr_newunicode(self):
self.check_rewrite("""
[i2]
p0 = newstr(14)
p1 = newunicode(10)
p2 = newunicode(i2)
p3 = newstr(i2)
jump()
""", """
[i2]
p0 = call_malloc_nursery( \
%(strdescr.basesize + 16 * strdescr.itemsize + \
unicodedescr.basesize + 10 * unicodedescr.itemsize)d)
setfield_gc(p0, %(strdescr.tid)d, descr=tiddescr)
setfield_gc(p0, 14, descr=strlendescr)
setfield_gc(p0, 0, descr=strhashdescr)
p1 = nursery_ptr_increment(p0, %(strdescr.basesize + 16 * strdescr.itemsize)d)
setfield_gc(p1, %(unicodedescr.tid)d, descr=tiddescr)
setfield_gc(p1, 10, descr=unicodelendescr)
setfield_gc(p1, 0, descr=unicodehashdescr)
p2 = call_malloc_nursery_varsize(2, %(unicodedescr.itemsize)d, i2,\
descr=unicodedescr)
setfield_gc(p2, i2, descr=unicodelendescr)
setfield_gc(p2, 0, descr=unicodehashdescr)
p3 = call_malloc_nursery_varsize(1, 1, i2, \
descr=strdescr)
setfield_gc(p3, i2, descr=strlendescr)
setfield_gc(p3, 0, descr=strhashdescr)
jump()
""")
def test_write_barrier_before_setfield_gc(self):
self.check_rewrite("""
[p1, p2]
setfield_gc(p1, p2, descr=tzdescr)
jump()
""", """
[p1, p2]
cond_call_gc_wb(p1, descr=wbdescr)
setfield_gc(p1, p2, descr=tzdescr)
jump()
""")
def test_write_barrier_before_array_without_from_array(self):
self.gc_ll_descr.write_barrier_descr.has_write_barrier_from_array = (
lambda cpu: False)
self.check_rewrite("""
[p1, i2, p3]
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""", """
[p1, i2, p3]
cond_call_gc_wb(p1, descr=wbdescr)
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""")
def test_write_barrier_before_short_array(self):
self.gc_ll_descr.max_size_of_young_obj = 2000
self.check_rewrite("""
[i2, p3]
p1 = new_array_clear(129, descr=cdescr)
call_n(123456)
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""", """
[i2, p3]
p1 = call_malloc_nursery( \
%(cdescr.basesize + 129 * cdescr.itemsize)d)
setfield_gc(p1, 8111, descr=tiddescr)
setfield_gc(p1, 129, descr=clendescr)
zero_array(p1, 0, 129, descr=cdescr)
call_n(123456)
cond_call_gc_wb(p1, descr=wbdescr)
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""")
def test_write_barrier_before_long_array(self):
# the limit of "being too long" is fixed, arbitrarily, at 130
self.gc_ll_descr.max_size_of_young_obj = 2000
self.check_rewrite("""
[i2, p3]
p1 = new_array_clear(130, descr=cdescr)
call_n(123456)
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""", """
[i2, p3]
p1 = call_malloc_nursery( \
%(cdescr.basesize + 130 * cdescr.itemsize)d)
setfield_gc(p1, 8111, descr=tiddescr)
setfield_gc(p1, 130, descr=clendescr)
zero_array(p1, 0, 130, descr=cdescr)
call_n(123456)
cond_call_gc_wb_array(p1, i2, descr=wbdescr)
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""")
def test_write_barrier_before_unknown_array(self):
self.check_rewrite("""
[p1, i2, p3]
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""", """
[p1, i2, p3]
cond_call_gc_wb_array(p1, i2, descr=wbdescr)
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""")
def test_label_makes_size_unknown(self):
self.check_rewrite("""
[i2, p3]
p1 = new_array_clear(5, descr=cdescr)
label(p1, i2, p3)
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""", """
[i2, p3]
p1 = call_malloc_nursery( \
%(cdescr.basesize + 5 * cdescr.itemsize)d)
setfield_gc(p1, 8111, descr=tiddescr)
setfield_gc(p1, 5, descr=clendescr)
zero_array(p1, 0, 5, descr=cdescr)
label(p1, i2, p3)
cond_call_gc_wb_array(p1, i2, descr=wbdescr)
setarrayitem_gc(p1, i2, p3, descr=cdescr)
jump()
""")
def test_write_barrier_before_setinteriorfield_gc(self):
S1 = lltype.GcStruct('S1')
INTERIOR = lltype.GcArray(('z', lltype.Ptr(S1)))
interiordescr = get_array_descr(self.gc_ll_descr, INTERIOR)
interiordescr.tid = 1291
interiorlendescr = interiordescr.lendescr
interiorzdescr = get_interiorfield_descr(self.gc_ll_descr,
INTERIOR, 'z')
self.check_rewrite("""
[p1, p2]
setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr)
jump(p1, p2)
""", """
[p1, p2]
cond_call_gc_wb_array(p1, 0, descr=wbdescr)
setinteriorfield_gc(p1, 0, p2, descr=interiorzdescr)
jump(p1, p2)
""", interiorzdescr=interiorzdescr)
def test_initialization_store(self):
self.check_rewrite("""
[p1]
p0 = new(descr=tdescr)
setfield_gc(p0, p1, descr=tzdescr)
jump()
""", """
[p1]
p0 = call_malloc_nursery(%(tdescr.size)d)
setfield_gc(p0, 5678, descr=tiddescr)
setfield_gc(p0, p1, descr=tzdescr)
jump()
""")
def test_initialization_store_2(self):
self.check_rewrite("""
[]
p0 = new(descr=tdescr)
p1 = new(descr=sdescr)
setfield_gc(p0, p1, descr=tzdescr)
jump()
""", """
[]
p0 = call_malloc_nursery(%(tdescr.size + sdescr.size)d)
setfield_gc(p0, 5678, descr=tiddescr)
p1 = nursery_ptr_increment(p0, %(tdescr.size)d)
setfield_gc(p1, 1234, descr=tiddescr)
# <<<no cond_call_gc_wb here>>>
setfield_gc(p0, p1, descr=tzdescr)
jump()
""")
def test_initialization_store_array(self):
self.check_rewrite("""
[p1, i2]
p0 = new_array_clear(5, descr=cdescr)
setarrayitem_gc(p0, i2, p1, descr=cdescr)
jump()
""", """
[p1, i2]
p0 = call_malloc_nursery( \
%(cdescr.basesize + 5 * cdescr.itemsize)d)
setfield_gc(p0, 8111, descr=tiddescr)
setfield_gc(p0, 5, descr=clendescr)
zero_array(p0, 0, 5, descr=cdescr)
setarrayitem_gc(p0, i2, p1, descr=cdescr)
jump()
""")
def test_zero_array_reduced_left(self):
self.check_rewrite("""
[p1, p2]
p0 = new_array_clear(5, descr=cdescr)
setarrayitem_gc(p0, 1, p1, descr=cdescr)
setarrayitem_gc(p0, 0, p2, descr=cdescr)
jump()
""", """
[p1, p2]
p0 = call_malloc_nursery( \
%(cdescr.basesize + 5 * cdescr.itemsize)d)
setfield_gc(p0, 8111, descr=tiddescr)
setfield_gc(p0, 5, descr=clendescr)
zero_array(p0, 2, 3, descr=cdescr)
setarrayitem_gc(p0, 1, p1, descr=cdescr)
setarrayitem_gc(p0, 0, p2, descr=cdescr)
jump()
""")
def test_zero_array_reduced_right(self):
self.check_rewrite("""
[p1, p2]
p0 = new_array_clear(5, descr=cdescr)
setarrayitem_gc(p0, 3, p1, descr=cdescr)
setarrayitem_gc(p0, 4, p2, descr=cdescr)
jump()
""", """
[p1, p2]
p0 = call_malloc_nursery( \
%(cdescr.basesize + 5 * cdescr.itemsize)d)
setfield_gc(p0, 8111, descr=tiddescr)
setfield_gc(p0, 5, descr=clendescr)
zero_array(p0, 0, 3, descr=cdescr)
setarrayitem_gc(p0, 3, p1, descr=cdescr)
setarrayitem_gc(p0, 4, p2, descr=cdescr)
jump()
""")
def test_zero_array_not_reduced_at_all(self):
self.check_rewrite("""
[p1, p2]
p0 = new_array_clear(5, descr=cdescr)
setarrayitem_gc(p0, 3, p1, descr=cdescr)
setarrayitem_gc(p0, 2, p2, descr=cdescr)
setarrayitem_gc(p0, 1, p2, descr=cdescr)
jump()
""", """
[p1, p2]
p0 = call_malloc_nursery( \
%(cdescr.basesize + 5 * cdescr.itemsize)d)
setfield_gc(p0, 8111, descr=tiddescr)
setfield_gc(p0, 5, descr=clendescr)
zero_array(p0, 0, 5, descr=cdescr)
setarrayitem_gc(p0, 3, p1, descr=cdescr)
setarrayitem_gc(p0, 2, p2, descr=cdescr)
setarrayitem_gc(p0, 1, p2, descr=cdescr)
jump()
""")
def test_zero_array_reduced_completely(self):
self.check_rewrite("""
[p1, p2]
p0 = new_array_clear(5, descr=cdescr)
setarrayitem_gc(p0, 3, p1, descr=cdescr)
setarrayitem_gc(p0, 4, p2, descr=cdescr)
setarrayitem_gc(p0, 0, p1, descr=cdescr)
setarrayitem_gc(p0, 2, p2, descr=cdescr)
setarrayitem_gc(p0, 1, p2, descr=cdescr)
jump()
""", """
[p1, p2]
p0 = call_malloc_nursery( \
%(cdescr.basesize + 5 * cdescr.itemsize)d)
setfield_gc(p0, 8111, descr=tiddescr)
setfield_gc(p0, 5, descr=clendescr)
zero_array(p0, 5, 0, descr=cdescr)
setarrayitem_gc(p0, 3, p1, descr=cdescr)
setarrayitem_gc(p0, 4, p2, descr=cdescr)
setarrayitem_gc(p0, 0, p1, descr=cdescr)
setarrayitem_gc(p0, 2, p2, descr=cdescr)
setarrayitem_gc(p0, 1, p2, descr=cdescr)
jump()
""")
def test_zero_array_reduced_left_with_call(self):
self.check_rewrite("""
[p1, p2]
p0 = new_array_clear(5, descr=cdescr)
setarrayitem_gc(p0, 0, p1, descr=cdescr)
call_n(321321)
setarrayitem_gc(p0, 1, p2, descr=cdescr)
jump()
""", """
[p1, p2]
p0 = call_malloc_nursery( \
%(cdescr.basesize + 5 * cdescr.itemsize)d)
setfield_gc(p0, 8111, descr=tiddescr)
setfield_gc(p0, 5, descr=clendescr)
zero_array(p0, 1, 4, descr=cdescr)
setarrayitem_gc(p0, 0, p1, descr=cdescr)
call_n(321321)
cond_call_gc_wb(p0, descr=wbdescr)
setarrayitem_gc(p0, 1, p2, descr=cdescr)
jump()
""")
def test_zero_array_reduced_left_with_label(self):
self.check_rewrite("""
[p1, p2]
p0 = new_array_clear(5, descr=cdescr)
setarrayitem_gc(p0, 0, p1, descr=cdescr)
label(p0, p2)
setarrayitem_gc(p0, 1, p2, descr=cdescr)
jump()
""", """
[p1, p2]
p0 = call_malloc_nursery( \
%(cdescr.basesize + 5 * cdescr.itemsize)d)
setfield_gc(p0, 8111, descr=tiddescr)
setfield_gc(p0, 5, descr=clendescr)
zero_array(p0, 1, 4, descr=cdescr)
setarrayitem_gc(p0, 0, p1, descr=cdescr)
label(p0, p2)
cond_call_gc_wb_array(p0, 1, descr=wbdescr)
setarrayitem_gc(p0, 1, p2, descr=cdescr)
jump()
""")
def test_zero_array_varsize(self):
self.check_rewrite("""
[p1, p2, i3]
p0 = new_array_clear(i3, descr=bdescr)
jump()
""", """
[p1, p2, i3]
p0 = call_malloc_nursery_varsize(0, 1, i3, descr=bdescr)
setfield_gc(p0, i3, descr=blendescr)
zero_array(p0, 0, i3, descr=bdescr)
jump()
""")
def test_zero_array_varsize_cannot_reduce(self):
self.check_rewrite("""
[p1, p2, i3]
p0 = new_array_clear(i3, descr=bdescr)
setarrayitem_gc(p0, 0, p1, descr=bdescr)
jump()
""", """
[p1, p2, i3]
p0 = call_malloc_nursery_varsize(0, 1, i3, descr=bdescr)
setfield_gc(p0, i3, descr=blendescr)
zero_array(p0, 0, i3, descr=bdescr)
cond_call_gc_wb_array(p0, 0, descr=wbdescr)
setarrayitem_gc(p0, 0, p1, descr=bdescr)
jump()
""")
def test_initialization_store_potentially_large_array(self):
# the write barrier cannot be omitted, because we might get
# an array with cards and the GC assumes that the write
# barrier is always called, even on young (but large) arrays
self.check_rewrite("""
[i0, p1, i2]
p0 = new_array(i0, descr=bdescr)
setarrayitem_gc(p0, i2, p1, descr=bdescr)
jump()
""", """
[i0, p1, i2]
p0 = call_malloc_nursery_varsize(0, 1, i0, descr=bdescr)
setfield_gc(p0, i0, descr=blendescr)
cond_call_gc_wb_array(p0, i2, descr=wbdescr)
setarrayitem_gc(p0, i2, p1, descr=bdescr)
jump()
""")
def test_non_initialization_store(self):
self.check_rewrite("""
[i0]
p0 = new(descr=tdescr)
p1 = newstr(i0)
setfield_gc(p0, p1, descr=tzdescr)
jump()
""", """
[i0]
p0 = call_malloc_nursery(%(tdescr.size)d)
setfield_gc(p0, 5678, descr=tiddescr)
zero_ptr_field(p0, %(tdescr.gc_fielddescrs[0].offset)s)
p1 = call_malloc_nursery_varsize(1, 1, i0, \
descr=strdescr)
setfield_gc(p1, i0, descr=strlendescr)
setfield_gc(p1, 0, descr=strhashdescr)
cond_call_gc_wb(p0, descr=wbdescr)
setfield_gc(p0, p1, descr=tzdescr)
jump()
""")
def test_non_initialization_store_label(self):
self.check_rewrite("""
[p1]
p0 = new(descr=tdescr)
label(p0, p1)
setfield_gc(p0, p1, descr=tzdescr)
jump()
""", """
[p1]
p0 = call_malloc_nursery(%(tdescr.size)d)
setfield_gc(p0, 5678, descr=tiddescr)
zero_ptr_field(p0, %(tdescr.gc_fielddescrs[0].offset)s)
label(p0, p1)
cond_call_gc_wb(p0, descr=wbdescr)
setfield_gc(p0, p1, descr=tzdescr)
jump()
""")
def test_multiple_writes(self):
self.check_rewrite("""
[p0, p1, p2]
setfield_gc(p0, p1, descr=tzdescr)
setfield_gc(p0, p2, descr=tzdescr)
jump(p1, p2, p0)
""", """
[p0, p1, p2]
cond_call_gc_wb(p0, descr=wbdescr)
setfield_gc(p0, p1, descr=tzdescr)
setfield_gc(p0, p2, descr=tzdescr)
jump(p1, p2, p0)
""")
def test_rewrite_call_assembler(self):
self.check_rewrite("""
[i0, f0]
i2 = call_assembler_i(i0, f0, descr=casmdescr)
""", """
[i0, f0]
i1 = getfield_raw_i(ConstClass(frame_info), descr=jfi_frame_size)
p1 = call_malloc_nursery_varsize_frame(i1)
setfield_gc(p1, 0, descr=tiddescr)
i2 = getfield_raw_i(ConstClass(frame_info), descr=jfi_frame_depth)
setfield_gc(p1, 0, descr=jf_extra_stack_depth)
setfield_gc(p1, NULL, descr=jf_savedata)
setfield_gc(p1, NULL, descr=jf_force_descr)
setfield_gc(p1, NULL, descr=jf_descr)
setfield_gc(p1, NULL, descr=jf_guard_exc)
setfield_gc(p1, NULL, descr=jf_forward)
setfield_gc(p1, i2, descr=framelendescr)
setfield_gc(p1, ConstClass(frame_info), descr=jf_frame_info)
setarrayitem_gc(p1, 0, i0, descr=signedframedescr)
setarrayitem_gc(p1, 1, f0, descr=floatframedescr)
i3 = call_assembler_i(p1, descr=casmdescr)
""")
def test_int_add_ovf(self):
self.check_rewrite("""
[i0]
p0 = new(descr=tdescr)
i1 = int_add_ovf(i0, 123)
guard_overflow(descr=guarddescr) []
jump()
""", """
[i0]
p0 = call_malloc_nursery(%(tdescr.size)d)
setfield_gc(p0, 5678, descr=tiddescr)
zero_ptr_field(p0, %(tdescr.gc_fielddescrs[0].offset)s)
i1 = int_add_ovf(i0, 123)
guard_overflow(descr=guarddescr) []
jump()
""")
def test_int_gt(self):
self.check_rewrite("""
[i0]
p0 = new(descr=tdescr)
i1 = int_gt(i0, 123)
guard_false(i1, descr=guarddescr) []
jump()
""", """
[i0]
p0 = call_malloc_nursery(%(tdescr.size)d)
setfield_gc(p0, 5678, descr=tiddescr)
zero_ptr_field(p0, %(tdescr.gc_fielddescrs[0].offset)s)
i1 = int_gt(i0, 123)
guard_false(i1, descr=guarddescr) []
jump()
""")
def test_zero_ptr_field_before_getfield(self):
# This case may need to be fixed in the metainterp/optimizeopt
# already so that it no longer occurs for rewrite.py. But anyway
# it's a good idea to make sure rewrite.py is correct on its own.
self.check_rewrite("""
[]
p0 = new(descr=tdescr)
p1 = getfield_gc_r(p0, descr=tdescr)
jump(p1)
""", """
[]
p0 = call_malloc_nursery(%(tdescr.size)d)
setfield_gc(p0, 5678, descr=tiddescr)
zero_ptr_field(p0, %(tdescr.gc_fielddescrs[0].offset)s)
p1 = getfield_gc_r(p0, descr=tdescr)
jump(p1)
""")
```
#### File: jit/backend/model.py
```python
import weakref
from rpython.rlib.debug import debug_start, debug_print, debug_stop
from rpython.rtyper.lltypesystem import lltype, llmemory
class CPUTotalTracker(object):
total_compiled_loops = 0
total_compiled_bridges = 0
total_freed_loops = 0
total_freed_bridges = 0
class AbstractCPU(object):
supports_floats = False
supports_longlong = False
# ^^^ This is only useful on 32-bit platforms. If True,
# longlongs are supported by the JIT, but stored as doubles.
# Boxes and Consts are BoxFloats and ConstFloats.
supports_singlefloats = False
supports_guard_gc_type = False
propagate_exception_descr = None
remove_gctypeptr = False
def __init__(self):
self.tracker = CPUTotalTracker()
def _freeze_(self):
return True
def setup_once(self):
"""Called once by the front-end when the program starts."""
pass
def finish_once(self):
"""Called once by the front-end when the program stops."""
pass
def get_all_loop_runs(self):
""" Function that will return number of times all the loops were run.
Requires earlier setting of set_debug(True), otherwise you won't
get the information.
Returns an instance of LOOP_RUN_CONTAINER from rlib.jit_hooks
"""
raise NotImplementedError
def set_debug(self, value):
""" Enable or disable debugging info. Does nothing by default. Returns
the previous setting.
"""
return False
def compile_loop(self, inputargs, operations, looptoken, jd_id=0,
unique_id=0, log=True, name='', logger=None):
"""Assemble the given loop.
Should create and attach a fresh CompiledLoopToken to
looptoken.compiled_loop_token and stick extra attributes
on it to point to the compiled loop in assembler.
Returns either None or an instance of rpython.rlib.jit.AsmInfo.
"""
raise NotImplementedError
def compile_bridge(self, faildescr, inputargs, operations,
original_loop_token, log=True, logger=None):
"""Assemble the bridge.
The FailDescr is the descr of the original guard that failed.
Returns either None or an instance of rpython.rlib.jit.AsmInfo.
"""
raise NotImplementedError
def dump_loop_token(self, looptoken):
"""Print a disassembled version of looptoken to stdout"""
raise NotImplementedError
def execute_token(self, looptoken, *args):
"""NOT_RPYTHON (for tests only)
Execute the generated code referenced by the looptoken.
When done, this returns a 'dead JIT frame' object that can
be inspected with the get_latest_xxx() methods.
"""
argtypes = [lltype.typeOf(x) for x in args]
execute = self.make_execute_token(*argtypes)
return execute(looptoken, *args)
def make_execute_token(self, *argtypes):
"""Must make and return an execute_token() function that will be
called with the given argtypes.
"""
raise NotImplementedError
def get_latest_descr(self, deadframe):
"""Returns the Descr for the last operation executed by the frame."""
raise NotImplementedError
def get_int_value(self, deadframe, index):
"""Returns the value for the index'th argument to the
last executed operation (from 'fail_args' if it was a guard,
or from 'args' if it was a FINISH). Returns an int."""
raise NotImplementedError
def get_float_value(self, deadframe, index):
"""Returns the value for the index'th argument to the
last executed operation (from 'fail_args' if it was a guard,
or from 'args' if it was a FINISH). Returns a FLOATSTORAGE."""
raise NotImplementedError
def get_ref_value(self, deadframe, index):
"""Returns the value for the index'th argument to the
last executed operation (from 'fail_args' if it was a guard,
or from 'args' if it was a FINISH). Returns a GCREF."""
raise NotImplementedError
def grab_exc_value(self, deadframe):
"""Return the exception set by the latest execute_token(),
when it exits due to a failure of a GUARD_EXCEPTION or
GUARD_NO_EXCEPTION. (Returns a GCREF)""" # XXX remove me
raise NotImplementedError
def set_savedata_ref(self, deadframe, data):
"""For the front-end: store a GCREF on the deadframe object."""
raise NotImplementedError
def get_savedata_ref(self, deadframe):
"""For the front-end: get the GCREF saved with set_savedata_ref()."""
raise NotImplementedError
def force(self, force_token):
"""Take a 'force token' as produced by the FORCE_TOKEN operation,
and 'kill' the corresponding JIT frame, which should be somewhere
in the stack right now. Returns it as a dead frame object. When
we later return to the JIT frame, the next operation executed must
be a GUARD_NOT_FORCED, which will fail."""
raise NotImplementedError
def redirect_call_assembler(self, oldlooptoken, newlooptoken):
"""Redirect oldlooptoken to newlooptoken. More precisely, it is
enough to redirect all CALL_ASSEMBLERs already compiled that call
oldlooptoken so that from now own they will call newlooptoken."""
raise NotImplementedError
def invalidate_loop(self, looptoken):
"""Activate all GUARD_NOT_INVALIDATED in the loop and its attached
bridges. Before this call, all GUARD_NOT_INVALIDATED do nothing;
after this call, they all fail. Note that afterwards, if one such
guard fails often enough, it has a bridge attached to it; it is
possible then to re-call invalidate_loop() on the same looptoken,
which must invalidate all newer GUARD_NOT_INVALIDATED, but not the
old one that already has a bridge attached to it."""
raise NotImplementedError
def free_loop_and_bridges(self, compiled_loop_token):
"""This method is called to free resources (machine code,
references to resume guards, etc.) allocated by the compilation
of a loop and all bridges attached to it. After this call, the
frontend cannot use this compiled loop any more; in fact, it
guarantees that at the point of the call to free_code_group(),
none of the corresponding assembler is currently running.
"""
pass
def sizeof(self, S):
raise NotImplementedError
def fielddescrof(self, S, fieldname):
"""Return the Descr corresponding to field 'fieldname' on the
structure 'S'. It is important that this function (at least)
caches the results."""
raise NotImplementedError
def interiorfielddescrof(self, A, fieldname):
raise NotImplementedError
def arraydescrof(self, A):
raise NotImplementedError
def calldescrof(self, FUNC, ARGS, RESULT, extrainfo):
# FUNC is the original function type, but ARGS is a list of types
# with Voids removed
raise NotImplementedError
def typedescrof(self, TYPE):
raise NotImplementedError
def unpack_arraydescr_size(self, arraydescr):
"""
Return basesize, itemsize, is_signed
"""
raise NotImplementedError
@staticmethod
def cast_int_to_ptr(x, TYPE):
x = llmemory.cast_int_to_adr(x)
return llmemory.cast_adr_to_ptr(x, TYPE)
# ---------- the backend-dependent operations ----------
# lltype specific operations
# --------------------------
def bh_getarrayitem_gc_i(self, array, index, arraydescr):
raise NotImplementedError
def bh_getarrayitem_gc_r(self, array, index, arraydescr):
raise NotImplementedError
def bh_getarrayitem_gc_f(self, array, index, arraydescr):
raise NotImplementedError
def bh_getfield_gc_i(self, struct, fielddescr):
raise NotImplementedError
def bh_getfield_gc_r(self, struct, fielddescr):
raise NotImplementedError
def bh_getfield_gc_f(self, struct, fielddescr):
raise NotImplementedError
def bh_getfield_raw_i(self, struct, fielddescr):
raise NotImplementedError
def bh_getfield_raw_r(self, struct, fielddescr):
raise NotImplementedError
def bh_getfield_raw_f(self, struct, fielddescr):
raise NotImplementedError
def bh_new(self, sizedescr):
raise NotImplementedError
def bh_new_with_vtable(self, vtable, sizedescr):
raise NotImplementedError
def bh_new_array(self, length, arraydescr):
raise NotImplementedError
def bh_newstr(self, length):
raise NotImplementedError
def bh_newunicode(self, length):
raise NotImplementedError
def bh_new_raw_buffer(self, size):
raise NotImplementedError
def bh_arraylen_gc(self, array, arraydescr):
raise NotImplementedError
def bh_classof(self, struct):
raise NotImplementedError
def bh_setarrayitem_gc_i(self, array, index, newvalue, arraydescr):
raise NotImplementedError
def bh_setarrayitem_gc_r(self, array, index, newvalue, arraydescr):
raise NotImplementedError
def bh_setarrayitem_gc_f(self, array, index, newvalue, arraydescr):
raise NotImplementedError
def bh_setfield_gc_i(self, struct, newvalue, fielddescr):
raise NotImplementedError
def bh_setfield_gc_r(self, struct, newvalue, fielddescr):
raise NotImplementedError
def bh_setfield_gc_f(self, struct, newvalue, fielddescr):
raise NotImplementedError
def bh_setfield_raw_i(self, struct, newvalue, fielddescr):
raise NotImplementedError
def bh_setfield_raw_f(self, struct, newvalue, fielddescr):
raise NotImplementedError
def bh_call_i(self, func, args_i, args_r, args_f, calldescr):
raise NotImplementedError
def bh_call_r(self, func, args_i, args_r, args_f, calldescr):
raise NotImplementedError
def bh_call_f(self, func, args_i, args_r, args_f, calldescr):
raise NotImplementedError
def bh_call_v(self, func, args_i, args_r, args_f, calldescr):
raise NotImplementedError
def bh_strlen(self, string):
raise NotImplementedError
def bh_strgetitem(self, string, index):
raise NotImplementedError
def bh_unicodelen(self, string):
raise NotImplementedError
def bh_unicodegetitem(self, string, index):
raise NotImplementedError
def bh_strsetitem(self, string, index, newvalue):
raise NotImplementedError
def bh_unicodesetitem(self, string, index, newvalue):
raise NotImplementedError
def bh_copystrcontent(self, src, dst, srcstart, dststart, length):
raise NotImplementedError
def bh_copyunicodecontent(self, src, dst, srcstart, dststart, length):
raise NotImplementedError
class CompiledLoopToken(object):
asmmemmgr_blocks = None
asmmemmgr_gcroots = 0
def __init__(self, cpu, number):
cpu.tracker.total_compiled_loops += 1
self.cpu = cpu
self.number = number
self.bridges_count = 0
self.invalidate_positions = []
# a list of weakrefs to looptokens that has been redirected to
# this one
self.looptokens_redirected_to = []
debug_start("jit-mem-looptoken-alloc")
debug_print("allocating Loop #", self.number)
debug_stop("jit-mem-looptoken-alloc")
def compiling_a_bridge(self):
self.cpu.tracker.total_compiled_bridges += 1
self.bridges_count += 1
debug_start("jit-mem-looptoken-alloc")
debug_print("allocating Bridge #", self.bridges_count, "of Loop #", self.number)
debug_stop("jit-mem-looptoken-alloc")
def update_frame_info(self, oldlooptoken, baseofs):
new_fi = self.frame_info
new_loop_tokens = []
for ref in oldlooptoken.looptokens_redirected_to:
looptoken = ref()
if looptoken:
looptoken.frame_info.update_frame_depth(baseofs,
new_fi.jfi_frame_depth)
new_loop_tokens.append(ref)
oldlooptoken.frame_info.update_frame_depth(baseofs,
new_fi.jfi_frame_depth)
assert oldlooptoken is not None
new_loop_tokens.append(weakref.ref(oldlooptoken))
self.looptokens_redirected_to = new_loop_tokens
def __del__(self):
#debug_start("jit-mem-looptoken-free")
#debug_print("freeing Loop #", self.number, 'with',
# self.bridges_count, 'attached bridges')
self.cpu.free_loop_and_bridges(self)
self.cpu.tracker.total_freed_loops += 1
self.cpu.tracker.total_freed_bridges += self.bridges_count
#debug_stop("jit-mem-looptoken-free")
```
#### File: jit/codewriter/assembler.py
```python
from rpython.jit.metainterp.history import AbstractDescr, getkind
from rpython.jit.codewriter.flatten import Register, Label, TLabel, KINDS
from rpython.jit.codewriter.flatten import ListOfKind, IndirectCallTargets
from rpython.jit.codewriter.format import format_assembler
from rpython.jit.codewriter.jitcode import SwitchDictDescr, JitCode
from rpython.jit.codewriter import heaptracker, longlong
from rpython.rlib.objectmodel import ComputedIntSymbolic
from rpython.flowspace.model import Constant
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.rtyper import rclass
class AssemblerError(Exception):
pass
class Assembler(object):
def __init__(self):
self.insns = {}
self.descrs = []
self.indirectcalltargets = set() # set of JitCodes
self.list_of_addr2name = []
self._descr_dict = {}
self._count_jitcodes = 0
self._seen_raw_objects = set()
def assemble(self, ssarepr, jitcode=None):
"""Take the 'ssarepr' representation of the code and assemble
it inside the 'jitcode'. If jitcode is None, make a new one.
"""
self.setup(ssarepr.name)
ssarepr._insns_pos = []
for insn in ssarepr.insns:
ssarepr._insns_pos.append(len(self.code))
self.write_insn(insn)
self.fix_labels()
self.check_result()
if jitcode is None:
jitcode = JitCode(ssarepr.name)
jitcode._ssarepr = ssarepr
self.make_jitcode(jitcode)
if self._count_jitcodes < 20: # stop if we have a lot of them
jitcode._dump = format_assembler(ssarepr)
self._count_jitcodes += 1
return jitcode
def setup(self, name):
self.code = []
self.constants_dict = {}
self.constants_i = []
self.constants_r = []
self.constants_f = []
self.label_positions = {}
self.tlabel_positions = []
self.switchdictdescrs = []
self.count_regs = dict.fromkeys(KINDS, 0)
self.liveness = {}
self.startpoints = set()
self.alllabels = set()
self.resulttypes = {}
self.ssareprname = name
def emit_reg(self, reg):
if reg.index >= self.count_regs[reg.kind]:
self.count_regs[reg.kind] = reg.index + 1
self.code.append(chr(reg.index))
def emit_const(self, const, kind, allow_short=False):
value = const.value
if kind == 'int':
TYPE = const.concretetype
if isinstance(TYPE, lltype.Ptr):
assert TYPE.TO._gckind == 'raw'
self.see_raw_object(value)
value = llmemory.cast_ptr_to_adr(value)
TYPE = llmemory.Address
if TYPE == llmemory.Address:
value = heaptracker.adr2int(value)
if TYPE is lltype.SingleFloat:
value = longlong.singlefloat2int(value)
if not isinstance(value, (llmemory.AddressAsInt,
ComputedIntSymbolic)):
value = lltype.cast_primitive(lltype.Signed, value)
if allow_short:
try:
short_num = -128 <= value <= 127
except TypeError: # "Symbolics cannot be compared!"
short_num = False
if short_num:
# emit the constant as a small integer
self.code.append(chr(value & 0xFF))
return True
constants = self.constants_i
elif kind == 'ref':
value = lltype.cast_opaque_ptr(llmemory.GCREF, value)
constants = self.constants_r
elif kind == 'float':
if const.concretetype == lltype.Float:
value = longlong.getfloatstorage(value)
else:
assert longlong.is_longlong(const.concretetype)
value = rffi.cast(lltype.SignedLongLong, value)
constants = self.constants_f
else:
raise AssemblerError('unimplemented %r in %r' %
(const, self.ssareprname))
key = (kind, Constant(value))
if key not in self.constants_dict:
constants.append(value)
val = 256 - len(constants)
assert val >= 0, "too many constants"
self.constants_dict[key] = val
# emit the constant normally, as one byte that is an index in the
# list of constants
self.code.append(chr(self.constants_dict[key]))
return False
def write_insn(self, insn):
if insn[0] == '---':
return
if isinstance(insn[0], Label):
self.label_positions[insn[0].name] = len(self.code)
return
if insn[0] == '-live-':
key = len(self.code)
live_i, live_r, live_f = self.liveness.get(key, ("", "", ""))
live_i = self.get_liveness_info(live_i, insn[1:], 'int')
live_r = self.get_liveness_info(live_r, insn[1:], 'ref')
live_f = self.get_liveness_info(live_f, insn[1:], 'float')
self.liveness[key] = live_i, live_r, live_f
return
startposition = len(self.code)
self.code.append("temporary placeholder")
#
argcodes = []
allow_short = (insn[0] in USE_C_FORM)
for x in insn[1:]:
if isinstance(x, Register):
self.emit_reg(x)
argcodes.append(x.kind[0])
elif isinstance(x, Constant):
kind = getkind(x.concretetype)
is_short = self.emit_const(x, kind, allow_short=allow_short)
if is_short:
argcodes.append('c')
else:
argcodes.append(kind[0])
elif isinstance(x, TLabel):
self.alllabels.add(len(self.code))
self.tlabel_positions.append((x.name, len(self.code)))
self.code.append("temp 1")
self.code.append("temp 2")
argcodes.append('L')
elif isinstance(x, ListOfKind):
itemkind = x.kind
lst = list(x)
assert len(lst) <= 255, "list too long!"
self.code.append(chr(len(lst)))
for item in lst:
if isinstance(item, Register):
assert itemkind == item.kind
self.emit_reg(item)
elif isinstance(item, Constant):
assert itemkind == getkind(item.concretetype)
self.emit_const(item, itemkind)
else:
raise NotImplementedError("found in ListOfKind(): %r"
% (item,))
argcodes.append(itemkind[0].upper())
elif isinstance(x, AbstractDescr):
if x not in self._descr_dict:
self._descr_dict[x] = len(self.descrs)
self.descrs.append(x)
if isinstance(x, SwitchDictDescr):
self.switchdictdescrs.append(x)
num = self._descr_dict[x]
assert 0 <= num <= 0xFFFF, "too many AbstractDescrs!"
self.code.append(chr(num & 0xFF))
self.code.append(chr(num >> 8))
argcodes.append('d')
elif isinstance(x, IndirectCallTargets):
self.indirectcalltargets.update(x.lst)
elif x == '->':
assert '>' not in argcodes
argcodes.append('>')
else:
raise NotImplementedError(x)
#
opname = insn[0]
if '>' in argcodes:
assert argcodes.index('>') == len(argcodes) - 2
self.resulttypes[len(self.code)] = argcodes[-1]
key = opname + '/' + ''.join(argcodes)
num = self.insns.setdefault(key, len(self.insns))
self.code[startposition] = chr(num)
self.startpoints.add(startposition)
def get_liveness_info(self, prevlives, args, kind):
"""Return a string whose characters are register numbers.
We sort the numbers, too, to increase the chances of duplicate
strings (which are collapsed into a single string during translation).
"""
lives = set(prevlives) # set of characters
for reg in args:
if isinstance(reg, Register) and reg.kind == kind:
lives.add(chr(reg.index))
return lives
def fix_labels(self):
for name, pos in self.tlabel_positions:
assert self.code[pos ] == "temp 1"
assert self.code[pos+1] == "temp 2"
target = self.label_positions[name]
assert 0 <= target <= 0xFFFF
self.code[pos ] = chr(target & 0xFF)
self.code[pos+1] = chr(target >> 8)
for descr in self.switchdictdescrs:
as_dict = {}
for key, switchlabel in descr._labels:
target = self.label_positions[switchlabel.name]
as_dict[key] = target
descr.attach(as_dict)
def check_result(self):
# Limitation of the number of registers, from the single-byte encoding
assert self.count_regs['int'] + len(self.constants_i) <= 256
assert self.count_regs['ref'] + len(self.constants_r) <= 256
assert self.count_regs['float'] + len(self.constants_f) <= 256
def make_jitcode(self, jitcode):
jitcode.setup(''.join(self.code),
self.constants_i,
self.constants_r,
self.constants_f,
self.count_regs['int'],
self.count_regs['ref'],
self.count_regs['float'],
liveness=self.liveness,
startpoints=self.startpoints,
alllabels=self.alllabels,
resulttypes=self.resulttypes)
def see_raw_object(self, value):
if value._obj not in self._seen_raw_objects:
self._seen_raw_objects.add(value._obj)
if not value: # filter out NULL pointers
return
TYPE = lltype.typeOf(value).TO
if isinstance(TYPE, lltype.FuncType):
name = value._obj._name
elif TYPE == rclass.OBJECT_VTABLE:
if not value.name: # this is really the "dummy" class
return # pointer from some dict
name = ''.join(value.name.chars)
else:
return
addr = llmemory.cast_ptr_to_adr(value)
self.list_of_addr2name.append((addr, name))
def finished(self, callinfocollection):
# Helper called at the end of assembling. Registers the extra
# functions shown in _callinfo_for_oopspec.
for func in callinfocollection.all_function_addresses_as_int():
func = heaptracker.int2adr(func)
self.see_raw_object(func.ptr)
# A set of instructions that use the 'c' encoding for small constants.
# Allowing it anywhere causes the number of instruction variants to
# expode, growing past 256. So we list here only the most common
# instructions where the 'c' variant might be useful.
USE_C_FORM = set([
'copystrcontent',
'getarrayitem_gc_pure_i',
'getarrayitem_gc_pure_r',
'getarrayitem_gc_i',
'getarrayitem_gc_r',
'goto_if_not_int_eq',
'goto_if_not_int_ge',
'goto_if_not_int_gt',
'goto_if_not_int_le',
'goto_if_not_int_lt',
'goto_if_not_int_ne',
'int_add',
'int_and',
'int_copy',
'int_eq',
'int_ge',
'int_gt',
'int_le',
'int_lt',
'int_ne',
'int_return',
'int_sub',
'jit_merge_point',
'new_array',
'new_array_clear',
'newstr',
'setarrayitem_gc_i',
'setarrayitem_gc_r',
'setfield_gc_i',
'strgetitem',
'strsetitem',
'foobar', 'baz', # for tests
])
```
#### File: metainterp/optimizeopt/dependency.py
```python
import py
from rpython.jit.metainterp import compile
from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method
from rpython.jit.metainterp.resoperation import (rop, GuardResOp, ResOperation)
from rpython.jit.metainterp.resume import Snapshot
from rpython.jit.codewriter.effectinfo import EffectInfo
from rpython.jit.metainterp.history import (ConstPtr, ConstInt,Const,
AbstractValue, AbstractFailDescr)
from rpython.rtyper.lltypesystem import llmemory
from rpython.rlib.unroll import unrolling_iterable
from rpython.rlib.objectmodel import we_are_translated
MODIFY_COMPLEX_OBJ = [ (rop.SETARRAYITEM_GC, 0, 1)
, (rop.SETARRAYITEM_RAW, 0, 1)
, (rop.RAW_STORE, 0, 1)
, (rop.SETINTERIORFIELD_GC, 0, -1)
, (rop.SETINTERIORFIELD_RAW, 0, -1)
, (rop.SETFIELD_GC, 0, -1)
, (rop.SETFIELD_RAW, 0, -1)
, (rop.ZERO_PTR_FIELD, 0, -1)
, (rop.ZERO_ARRAY, 0, -1)
, (rop.STRSETITEM, 0, -1)
, (rop.UNICODESETITEM, 0, -1)
]
LOAD_COMPLEX_OBJ = [ (rop.GETARRAYITEM_GC_I, 0, 1)
, (rop.GETARRAYITEM_GC_F, 0, 1)
, (rop.GETARRAYITEM_GC_R, 0, 1)
, (rop.GETARRAYITEM_RAW_I, 0, 1)
, (rop.GETARRAYITEM_RAW_F, 0, 1)
, (rop.RAW_LOAD_I, 0, 1)
, (rop.RAW_LOAD_F, 0, 1)
, (rop.GETINTERIORFIELD_GC_I, 0, 1)
, (rop.GETINTERIORFIELD_GC_F, 0, 1)
, (rop.GETINTERIORFIELD_GC_R, 0, 1)
, (rop.GETFIELD_GC_I, 0, -1)
, (rop.GETFIELD_GC_F, 0, -1)
, (rop.GETFIELD_GC_R, 0, -1)
, (rop.GETFIELD_RAW_I, 0, -1)
, (rop.GETFIELD_RAW_F, 0, -1)
, (rop.GETFIELD_RAW_R, 0, -1)
]
class Path(object):
def __init__(self,path):
self.path = path
def second(self):
if len(self.path) <= 1:
return None
return self.path[1]
def last_but_one(self):
if len(self.path) < 2:
return None
return self.path[-2]
def last(self):
if len(self.path) < 1:
return None
return self.path[-1]
def first(self):
return self.path[0]
def is_always_pure(self, exclude_first=False, exclude_last=False):
last = len(self.path)-1
count = len(self.path)
i = 0
if exclude_first:
i += 1
if exclude_last:
count -= 1
while i < count:
node = self.path[i]
if node.is_imaginary():
i += 1
continue
op = node.getoperation()
if op.is_guard():
descr = op.getdescr()
if not descr:
return False
assert isinstance(descr, AbstractFailDescr)
if not descr.exits_early():
return False
elif not op.is_always_pure():
return False
i += 1
return True
def set_schedule_priority(self, p):
for node in self.path:
node.setpriority(p)
def walk(self, node):
self.path.append(node)
def cut_off_at(self, index):
self.path = self.path[:index]
def check_acyclic(self):
"""NOT_RPYTHON"""
seen = set()
for segment in self.path:
if segment in seen:
print "path:"
for segment in self.path:
print " ->", segment
print ""
assert 0, "segment %s was already seen. this makes the path cyclic!" % segment
else:
seen.add(segment)
return True
def clone(self):
return Path(self.path[:])
def as_str(self):
""" NOT_RPYTHON """
return ' -> '.join([str(p) for p in self.path])
class Node(object):
def __init__(self, op, opidx):
self.op = op
self.opidx = opidx
self.adjacent_list = []
self.adjacent_list_back = []
self.memory_ref = None
self.pack = None
self.pack_position = -1
self.emitted = False
self.schedule_position = -1
self.priority = 0
self._stack = False
def is_imaginary(self):
return False
def getoperation(self):
return self.op
def getindex(self):
return self.opidx
def getopnum(self):
return self.op.getopnum()
def getopname(self):
return self.op.getopname()
def setpriority(self, value):
self.priority = value
def can_be_relaxed(self):
return self.op.getopnum() in (rop.GUARD_TRUE, rop.GUARD_FALSE)
def edge_to(self, to, arg=None, failarg=False, label=None):
if self is to:
return
dep = self.depends_on(to)
if not dep:
#if force or self.independent(idx_from, idx_to):
dep = Dependency(self, to, arg, failarg)
self.adjacent_list.append(dep)
dep_back = Dependency(to, self, arg, failarg)
dep.backward = dep_back
to.adjacent_list_back.append(dep_back)
if not we_are_translated():
if label is None:
label = ''
dep.label = label
else:
if not dep.because_of(arg):
dep.add_dependency(self,to,arg)
# if a fail argument is overwritten by another normal
# dependency it will remove the failarg flag
if not (dep.is_failarg() and failarg):
dep.set_failarg(False)
if not we_are_translated() and label is not None:
_label = getattr(dep, 'label', '')
dep.label = _label + ", " + label
return dep
def clear_dependencies(self):
self.adjacent_list = []
self.adjacent_list_back = []
def exits_early(self):
if self.op.is_guard():
descr = self.op.getdescr()
return descr.exits_early()
return False
def loads_from_complex_object(self):
return rop._ALWAYS_PURE_LAST <= self.op.getopnum() < rop._MALLOC_FIRST
def modifies_complex_object(self):
return rop.SETARRAYITEM_GC <= self.op.getopnum() <= rop.UNICODESETITEM
def side_effect_arguments(self):
# if an item in array p0 is modified or a call contains an argument
# it can modify it is returned in the destroyed list.
args = []
op = self.op
if self.modifies_complex_object():
for opnum, i, j in unrolling_iterable(MODIFY_COMPLEX_OBJ):
if op.getopnum() == opnum:
op_args = op.getarglist()
if j == -1:
args.append((op.getarg(i), None, True))
for j in range(i+1,len(op_args)):
args.append((op.getarg(j), None, False))
else:
args.append((op.getarg(i), op.getarg(j), True))
for x in range(j+1,len(op_args)):
args.append((op.getarg(x), None, False))
return args
# assume this destroys every argument... can be enhanced by looking
# at the effect info of a call for instance
for arg in op.getarglist():
# if it is a constant argument it cannot be destroyed.
# neither can a box float be destroyed. BoxInt can
# contain a reference thus it is assumed to be destroyed
if arg.is_constant() or arg.type == 'f':
args.append((arg, None, False))
else:
args.append((arg, None, True))
return args
def provides_count(self):
return len(self.adjacent_list)
def provides(self):
return self.adjacent_list
def depends_count(self):
return len(self.adjacent_list_back)
def depends(self):
return self.adjacent_list_back
def depends_on(self, to):
""" Does there exist a dependency from the instruction to another?
Returns None if there is no dependency or the Dependency object in
any other case.
"""
for edge in self.adjacent_list:
if edge.to is to:
return edge
return None
def dependencies(self):
return self.adjacent_list[:] + self.adjacent_list_back[:] # COPY
def is_after(self, other):
return self.opidx > other.opidx
def is_before(self, other):
return self.opidx < other.opidx
def independent(self, other):
""" An instruction depends on another if there is a path from
self to other. """
if self == other:
return True
# forward
worklist = [self]
while len(worklist) > 0:
node = worklist.pop()
for dep in node.provides():
if dep.to.is_after(other):
continue
if dep.points_to(other):
# dependent. There is a path from self to other
return False
worklist.append(dep.to)
# backward
worklist = [self]
while len(worklist) > 0:
node = worklist.pop()
for dep in node.depends():
if dep.to.is_before(other):
continue
if dep.points_to(other):
# dependent. There is a path from self to other
return False
worklist.append(dep.to)
return True
def iterate_paths(self, to, backwards=False, path_max_len=-1, blacklist=False):
""" Yield all nodes from self leading to 'to'.
backwards: Determines the iteration direction.
blacklist: Marks nodes that have already been visited.
It comes in handy if a property must hold for every path.
Not *every* possible instance must be iterated, but trees
that have already been visited can be ignored after the
first visit.
"""
if self is to:
return
blacklist_visit = {}
path = Path([self])
worklist = [(0, self, 1)]
while len(worklist) > 0:
index,node,pathlen = worklist.pop()
if backwards:
iterdir = node.depends()
else:
iterdir = node.provides()
if index >= len(iterdir):
if to is None and index == 0:
yield Path(path.path[:])
if blacklist:
blacklist_visit[node] = None
continue
else:
next_dep = iterdir[index]
next_node = next_dep.to
index += 1
if index < len(iterdir):
worklist.append((index, node, pathlen))
else:
blacklist_visit[node] = None
path.cut_off_at(pathlen)
path.walk(next_node)
if blacklist and next_node in blacklist_visit:
yield Path(path.path[:])
continue
pathlen += 1
if next_node is to or \
(path_max_len > 0 and pathlen >= path_max_len):
yield Path(path.path[:])
# note that the destiantion node ``to'' is never blacklisted
#if blacklist:
# blacklist_visit[next_node] = None
else:
worklist.append((0, next_node, pathlen))
def remove_edge_to(self, node):
i = 0
while i < len(self.adjacent_list):
dep = self.adjacent_list[i]
if dep.to is node:
del self.adjacent_list[i]
break
i += 1
i = 0
while i < len(node.adjacent_list_back):
dep = node.adjacent_list_back[i]
if dep.to is self:
del node.adjacent_list_back[i]
break
i += 1
def getedge_to(self, other):
for dep in self.adjacent_list:
if dep.to == other:
return dep
return None
def __repr__(self):
pack = ''
if self.pack:
pack = "p: %d" % self.pack.numops()
return "Node(%s,%s i: %d)" % (self.op, pack, self.opidx)
def getdotlabel(self):
""" NOT_RPTYHON """
op_str = str(self.op)
if self.op.is_guard():
args_str = []
for arg in self.op.getfailargs():
name = 'None'
if arg:
name = arg.repr_short(arg._repr_memo)
args_str.append(name)
op_str += " " + ','.join(args_str)
return "[%d] %s" % (self.opidx, op_str)
class ImaginaryNode(Node):
_index = 987654321 # big enough? :)
def __init__(self, label):
index = -1
if not we_are_translated():
self.dotlabel = label
index = ImaginaryNode._index
ImaginaryNode._index += 1
Node.__init__(self, None, index)
def is_imaginary(self):
return True
def getdotlabel(self):
""" NOT_RPTYHON """
return self.dotlabel
class Dependency(object):
def __init__(self, at, to, arg, failarg=False):
assert at != to
self.args = []
if arg is not None:
self.add_dependency(at, to, arg)
self.at = at
self.to = to
self.failarg = failarg
self.backward = None
def because_of(self, var):
for arg in self.args:
if arg[1] == var:
return True
return False
def target_node(self):
return self.to
def origin_node(self):
return self.at
def to_index(self):
return self.to.getindex()
def at_index(self):
return self.at.getindex()
def points_after_to(self, to):
return self.to.opidx < to.opidx
def points_above_at(self, at):
return self.at.opidx < at.opidx
def i_points_above_at(self, idx):
return self.at.opidx < idx
def points_to(self, to):
return self.to == to
def points_at(self, at):
return self.at == at
def add_dependency(self, at, to, arg):
self.args.append((at,arg))
def set_failarg(self, value):
self.failarg = value
if self.backward:
self.backward.failarg = value
def is_failarg(self):
return self.failarg
def reverse_direction(self, ref):
""" if the parameter index is the same as idx_to then
this edge is in reverse direction.
"""
return self.to == ref
def __repr__(self):
return 'Dep(T[%d] -> T[%d], arg: %s)' \
% (self.at.opidx, self.to.opidx, self.args)
class DefTracker(object):
def __init__(self, graph):
self.graph = graph
self.defs = {}
self.non_pure = []
def add_non_pure(self, node):
self.non_pure.append(node)
def define(self, arg, node, argcell=None):
if isinstance(arg, Const):
return
if arg in self.defs:
self.defs[arg].append((node,argcell))
else:
self.defs[arg] = [(node,argcell)]
def redefinitions(self, arg):
for _def in self.defs[arg]:
yield _def[0]
def is_defined(self, arg):
return arg in self.defs
def definition(self, arg, node=None, argcell=None):
if arg.is_constant():
return None
def_chain = self.defs.get(arg,None)
if not def_chain:
return None
if not argcell:
return def_chain[-1][0]
else:
assert node is not None
i = len(def_chain)-1
try:
mref = node.memory_ref
while i >= 0:
def_node = def_chain[i][0]
oref = def_node.memory_ref
if oref is not None and mref.alias(oref):
return def_node
elif oref is None:
return def_node
i -= 1
return None
except KeyError:
# when a key error is raised, this means
# no information is available, safe default
pass
return def_chain[-1][0]
def depends_on_arg(self, arg, to, argcell=None):
try:
at = self.definition(arg, to, argcell)
if at is None:
return
at.edge_to(to, arg)
except KeyError:
if not we_are_translated():
if not isinstance(arg, Const):
assert False, "arg %s must be defined" % arg
class DependencyGraph(object):
""" A graph that represents one of the following dependencies:
* True dependency
* Anti dependency (not present in SSA traces)
* Ouput dependency (not present in SSA traces)
Traces in RPython are not in SSA form when it comes to complex
object modification such as array or object side effects.
Representation is an adjacent list. The number of edges between the
vertices is expected to be small.
Note that adjacent lists order their dependencies. They are ordered
by the target instruction they point to if the instruction is
a dependency.
memory_refs: a dict that contains indices of memory references
(load,store,getarrayitem,...). If none provided, the construction
is conservative. It will never dismiss dependencies of two
modifications of one array even if the indices can never point to
the same element.
"""
def __init__(self, loop):
self.loop = loop
self.label = Node(loop.label, 0)
self.nodes = [ Node(op,0) for op in loop.operations if not op.is_jit_debug() ]
for i,node in enumerate(self.nodes):
node.opidx = i+1
self.inodes = [] # imaginary nodes
self.jump = Node(loop.jump, len(self.nodes)+1)
self.invariant_vars = {}
self.update_invariant_vars()
self.memory_refs = {}
self.schedulable_nodes = []
self.index_vars = {}
self.comparison_vars = {}
self.guards = []
self.build_dependencies()
def getnode(self, i):
return self.nodes[i]
def imaginary_node(self, label):
node = ImaginaryNode(label)
self.inodes.append(node)
return node
def update_invariant_vars(self):
label_op = self.label.getoperation()
jump_op = self.jump.getoperation()
assert label_op.numargs() == jump_op.numargs()
for i in range(label_op.numargs()):
label_box = label_op.getarg(i)
jump_box = jump_op.getarg(i)
if label_box == jump_box:
self.invariant_vars[label_box] = None
def box_is_invariant(self, box):
return box in self.invariant_vars
def build_dependencies(self):
""" This is basically building the definition-use chain and saving this
information in a graph structure. This is the same as calculating
the reaching definitions and the 'looking back' whenever it is used.
Write After Read, Write After Write dependencies are not possible,
the operations are in SSA form
"""
tracker = DefTracker(self)
#
label_pos = 0
jump_pos = len(self.nodes)-1
intformod = IntegralForwardModification(self.memory_refs, self.index_vars,
self.comparison_vars, self.invariant_vars)
# pass 1
for i,node in enumerate(self.nodes):
op = node.op
if op.is_always_pure():
node.setpriority(1)
if op.is_guard():
node.setpriority(2)
# the label operation defines all operations at the
# beginning of the loop
intformod.inspect_operation(op,node)
# definition of a new variable
if op.type != 'v':
# In SSA form. Modifications get a new variable
tracker.define(op, node)
# usage of defined variables
if op.is_always_pure() or op.is_final():
# normal case every arguments definition is set
for arg in op.getarglist():
tracker.depends_on_arg(arg, node)
elif op.is_guard():
if node.exits_early():
pass
else:
# consider cross iterations?
if len(self.guards) > 0:
last_guard = self.guards[-1]
last_guard.edge_to(node, failarg=True, label="guardorder")
for nonpure in tracker.non_pure:
nonpure.edge_to(node, failarg=True, label="nonpure")
tracker.non_pure = []
self.guards.append(node)
self.build_guard_dependencies(node, tracker)
else:
self.build_non_pure_dependencies(node, tracker)
def guard_argument_protection(self, guard_node, tracker):
""" the parameters the guard protects are an indicator for
dependencies. Consider the example:
i3 = ptr_eq(p1,p2)
guard_true(i3) [...]
guard_true|false are exceptions because they do not directly
protect the arguments, but a comparison function does.
"""
guard_op = guard_node.getoperation()
guard_opnum = guard_op.getopnum()
for arg in guard_op.getarglist():
if not arg.is_constant() and arg.type not in ('i','f'):
# redefine pointers, consider the following example
# guard_nonnull(r1)
# i1 = getfield(r1, ...)
# guard must be emitted before the getfield, thus
# redefine r1 at guard_nonnull
tracker.define(arg, guard_node)
if guard_opnum == rop.GUARD_NOT_FORCED_2:
# must be emitted before finish, thus delayed the longest
guard_node.setpriority(-10)
elif guard_opnum in (rop.GUARD_OVERFLOW, rop.GUARD_NO_OVERFLOW):
# previous operation must be an ovf_operation
guard_node.setpriority(100)
i = guard_node.getindex()-1
while i >= 0:
node = self.nodes[i]
op = node.getoperation()
if op.is_ovf():
break
i -= 1
else:
raise AssertionError("(no)overflow: no overflowing op present")
node.edge_to(guard_node, None, label='overflow')
elif guard_opnum in (rop.GUARD_NO_EXCEPTION, rop.GUARD_EXCEPTION, rop.GUARD_NOT_FORCED):
# previous op must be one that can raise or a not forced guard
guard_node.setpriority(100)
i = guard_node.getindex() - 1
while i >= 0:
node = self.nodes[i]
op = node.getoperation()
if op.can_raise():
node.edge_to(guard_node, None, label='exception/notforced')
break
if op.is_guard():
node.edge_to(guard_node, None, label='exception/notforced')
break
i -= 1
else:
raise AssertionError("(no)exception/not_forced: not op raises for them")
else:
pass # not invalidated, future condition!
def guard_exit_dependence(self, guard_node, var, tracker):
def_node = tracker.definition(var)
if def_node is None:
return
for dep in def_node.provides():
if guard_node.is_before(dep.to) and dep.because_of(var):
guard_node.edge_to(dep.to, var, label='guard_exit('+str(var)+')')
def build_guard_dependencies(self, guard_node, tracker):
guard_op = guard_node.op
if guard_op.getopnum() >= rop.GUARD_FUTURE_CONDITION:
# ignore invalidated & future condition guard & early exit
return
# true dependencies
for arg in guard_op.getarglist():
tracker.depends_on_arg(arg, guard_node)
# dependencies to uses of arguments it protects
self.guard_argument_protection(guard_node, tracker)
#
descr = guard_op.getdescr()
if descr.exits_early():
return
# handle fail args
if guard_op.getfailargs():
for i,arg in enumerate(guard_op.getfailargs()):
if arg is None:
continue
if not tracker.is_defined(arg):
continue
try:
for at in tracker.redefinitions(arg):
# later redefinitions are prohibited
if at.is_before(guard_node):
at.edge_to(guard_node, arg, failarg=True, label="fail")
except KeyError:
assert False
def build_non_pure_dependencies(self, node, tracker):
op = node.op
if node.loads_from_complex_object():
# If this complex object load operation loads an index that has been
# modified, the last modification should be used to put a def-use edge.
for opnum, i, j in unrolling_iterable(LOAD_COMPLEX_OBJ):
if opnum == op.getopnum():
cobj = op.getarg(i)
if j != -1:
index_var = op.getarg(j)
tracker.depends_on_arg(cobj, node, index_var)
tracker.depends_on_arg(index_var, node)
else:
tracker.depends_on_arg(cobj, node)
break
else:
for arg, argcell, destroyed in node.side_effect_arguments():
if argcell is not None:
# tracks the exact cell that is modified
tracker.depends_on_arg(arg, node, argcell)
tracker.depends_on_arg(argcell, node)
else:
if destroyed:
# cannot be sure that only a one cell is modified
# assume all cells are (equivalent to a redefinition)
try:
# A trace is not entirely in SSA form. complex object
# modification introduces WAR/WAW dependencies
def_node = tracker.definition(arg)
if def_node:
for dep in def_node.provides():
if dep.to != node:
dep.to.edge_to(node, argcell, label='war')
def_node.edge_to(node, argcell)
except KeyError:
pass
else:
# not destroyed, just a normal use of arg
tracker.depends_on_arg(arg, node)
if destroyed:
tracker.define(arg, node, argcell=argcell)
# it must be assumed that a side effect operation must not be executed
# before the last guard operation
if len(self.guards) > 0:
last_guard = self.guards[-1]
last_guard.edge_to(node, label="sideeffect")
# and the next guard instruction
tracker.add_non_pure(node)
def cycles(self):
""" NOT_RPYTHON """
stack = []
for node in self.nodes:
node._stack = False
#
label = self.nodes[0]
if _first_cycle(stack, label):
return stack
return None
def __repr__(self):
graph = "graph([\n"
for node in self.nodes:
graph += " " + str(node.opidx) + ": "
for dep in node.provides():
graph += "=>" + str(dep.to.opidx) + ","
graph += " | "
for dep in node.depends():
graph += "<=" + str(dep.to.opidx) + ","
graph += "\n"
return graph + " ])"
def view(self):
""" NOT_RPYTHON """
from rpython.translator.tool.graphpage import GraphPage
page = GraphPage()
page.source = self.as_dot()
page.links = []
page.display()
def as_dot(self):
""" NOT_RPTYHON """
if not we_are_translated():
dot = "digraph dep_graph {\n"
for node in self.nodes + self.inodes:
dot += " n%d [label=\"%s\"];\n" % (node.getindex(),node.getdotlabel())
dot += "\n"
for node in self.nodes + self.inodes:
for dep in node.provides():
label = ''
if getattr(dep, 'label', None):
label = '[label="%s"]' % dep.label
dot += " n%d -> n%d %s;\n" % (node.getindex(),dep.to_index(),label)
dot += "\n}\n"
return dot
raise NotImplementedError("dot only for debug purpose")
def _first_cycle(stack, node):
node._stack = True
stack.append(node)
for dep in node.provides():
succ = dep.to
if succ._stack:
# found cycle!
while stack[0] is not succ:
del stack[0]
return True
else:
return _first_cycle(stack, succ)
return False
def _strongly_connect(index, stack, cycles, node):
""" currently unused """
node._scc_index = index
node._scc_lowlink = index
index += 1
stack.append(node)
node._scc_stack = True
for dep in node.provides():
succ = dep.to
if succ._scc_index == -1:
index = _strongly_connect(index, stack, cycles, succ)
node._scc_lowlink = min(node._scc_lowlink, succ._scc_lowlink)
elif succ._scc_stack:
node._scc_lowlink = min(node._scc_lowlink, succ._scc_index)
if node._scc_lowlink == node._scc_index:
cycle = []
while True:
w = stack.pop()
w._scc_stack = False
cycle.append(w)
if w is node:
break
cycles.append(cycle)
return index
class IntegralForwardModification(object):
""" Calculates integral modifications on integer boxes. """
def __init__(self, memory_refs, index_vars, comparison_vars, invariant_vars):
self.index_vars = index_vars
self.comparison_vars = comparison_vars
self.memory_refs = memory_refs
self.invariant_vars = invariant_vars
def is_const_integral(self, box):
if isinstance(box, ConstInt):
return True
return False
def get_or_create(self, arg):
var = self.index_vars.get(arg, None)
if not var:
var = self.index_vars[arg] = IndexVar(arg)
return var
additive_func_source = """
def operation_{name}(self, op, node):
box_r = op
box_a0 = op.getarg(0)
box_a1 = op.getarg(1)
if self.is_const_integral(box_a0) and self.is_const_integral(box_a1):
idx_ref = IndexVar(box_r)
idx_ref.constant = box_a0.getint() {op} box_a1.getint()
self.index_vars[box_r] = idx_ref
elif self.is_const_integral(box_a0):
idx_ref = self.get_or_create(box_a1)
idx_ref = idx_ref.clone()
idx_ref.constant {op}= box_a0.getint()
self.index_vars[box_r] = idx_ref
elif self.is_const_integral(box_a1):
idx_ref = self.get_or_create(box_a0)
idx_ref = idx_ref.clone()
idx_ref.constant {op}= box_a1.getint()
self.index_vars[box_r] = idx_ref
"""
exec py.code.Source(additive_func_source
.format(name='INT_ADD', op='+')).compile()
exec py.code.Source(additive_func_source
.format(name='INT_SUB', op='-')).compile()
del additive_func_source
multiplicative_func_source = """
def operation_{name}(self, op, node):
box_r = op
if not box_r:
return
box_a0 = op.getarg(0)
box_a1 = op.getarg(1)
if self.is_const_integral(box_a0) and self.is_const_integral(box_a1):
idx_ref = IndexVar(box_r)
idx_ref.constant = box_a0.getint() {cop} box_a1.getint()
self.index_vars[box_r] = idx_ref
elif self.is_const_integral(box_a0):
idx_ref = self.get_or_create(box_a1)
idx_ref = idx_ref.clone()
idx_ref.coefficient_{tgt} *= box_a0.getint()
idx_ref.constant {cop}= box_a0.getint()
self.index_vars[box_r] = idx_ref
elif self.is_const_integral(box_a1):
idx_ref = self.get_or_create(box_a0)
idx_ref = idx_ref.clone()
idx_ref.coefficient_{tgt} {op}= box_a1.getint()
idx_ref.constant {cop}= box_a1.getint()
self.index_vars[box_r] = idx_ref
"""
exec py.code.Source(multiplicative_func_source
.format(name='INT_MUL', op='*', tgt='mul', cop='*')).compile()
exec py.code.Source(multiplicative_func_source
.format(name='INT_FLOORDIV', op='*', tgt='div', cop='/')).compile()
exec py.code.Source(multiplicative_func_source
.format(name='UINT_FLOORDIV', op='*', tgt='div', cop='/')).compile()
del multiplicative_func_source
array_access_source = """
def operation_{name}(self, op, node):
descr = op.getdescr()
idx_ref = self.get_or_create(op.getarg(1))
if descr and descr.is_array_of_primitives():
node.memory_ref = MemoryRef(op, idx_ref, {raw_access})
self.memory_refs[node] = node.memory_ref
"""
exec py.code.Source(array_access_source
.format(name='RAW_LOAD_I',raw_access=True)).compile()
exec py.code.Source(array_access_source
.format(name='RAW_LOAD_F',raw_access=True)).compile()
exec py.code.Source(array_access_source
.format(name='RAW_STORE',raw_access=True)).compile()
exec py.code.Source(array_access_source
.format(name='GETARRAYITEM_RAW_I',raw_access=False)).compile()
exec py.code.Source(array_access_source
.format(name='GETARRAYITEM_RAW_F',raw_access=False)).compile()
exec py.code.Source(array_access_source
.format(name='SETARRAYITEM_RAW',raw_access=False)).compile()
exec py.code.Source(array_access_source
.format(name='GETARRAYITEM_GC_I',raw_access=False)).compile()
exec py.code.Source(array_access_source
.format(name='GETARRAYITEM_GC_F',raw_access=False)).compile()
exec py.code.Source(array_access_source
.format(name='SETARRAYITEM_GC',raw_access=False)).compile()
del array_access_source
integral_dispatch_opt = make_dispatcher_method(IntegralForwardModification, 'operation_')
IntegralForwardModification.inspect_operation = integral_dispatch_opt
del integral_dispatch_opt
class IndexVar(AbstractValue):
""" IndexVar is an AbstractValue only to ensure that a box can be assigned
to the same variable as an index var.
"""
def __init__(self, var, coeff_mul=1, coeff_div=1, constant=0):
self.var = var
self.coefficient_mul = coeff_mul
self.coefficient_div = coeff_div
self.constant = constant
# saves the next modification that uses a variable
self.next_nonconst = None
self.current_end = None
def stride_const(self):
return self.next_nonconst is None
def add_const(self, number):
if self.current_end is None:
self.constant += number
else:
self.current_end.constant += number
def set_next_nonconst_mod(self, idxvar):
if self.current_end is None:
self.next_nonconst = idxvar
else:
self.current_end.next_nonconst = idxvar
self.current_end = idxvar
def getvariable(self):
return self.var
def is_identity(self):
return self.coefficient_mul == 1 and \
self.coefficient_div == 1 and \
self.constant == 0
def clone(self):
c = IndexVar(self.var)
c.coefficient_mul = self.coefficient_mul
c.coefficient_div = self.coefficient_div
c.constant = self.constant
return c
def same_variable(self, other):
assert isinstance(other, IndexVar)
return other.var == self.var
def same_mulfactor(self, other):
coeff = self.coefficient_mul == other.coefficient_mul
coeff = coeff and (self.coefficient_div == other.coefficient_div)
if not coeff:
# if not equal, try to check if they divide without rest
selfmod = self.coefficient_mul % self.coefficient_div
othermod = other.coefficient_mul % other.coefficient_div
if selfmod == 0 and othermod == 0:
# yet another chance for them to be equal
selfdiv = self.coefficient_mul // self.coefficient_div
otherdiv = other.coefficient_mul // other.coefficient_div
coeff = selfdiv == otherdiv
return coeff
def constant_diff(self, other):
""" calculates the difference as a second parameter """
assert isinstance(other, IndexVar)
return self.constant - other.constant
def emit_operations(self, opt, result_box=None):
var = self.var
if self.is_identity():
return var
if self.coefficient_mul != 1:
args = [var, ConstInt(self.coefficient_mul)]
var = ResOperation(rop.INT_MUL, args)
opt.emit_operation(var)
if self.coefficient_div != 1:
args = [var, ConstInt(self.coefficient_div)]
var = ResOperation(rop.INT_FLOORDIV, args)
opt.emit_operation(var)
if self.constant > 0:
args = [var, ConstInt(self.constant)]
var = ResOperation(rop.INT_ADD, args)
opt.emit_operation(var)
if self.constant < 0:
args = [var, ConstInt(self.constant)]
var = ResOperation(rop.INT_SUB, args)
opt.emit_operation(var)
return var
def compare(self, other):
""" Returns if the two are compareable as a first result
and a number (-1,0,1) of the ordering
"""
coeff = self.coefficient_mul == other.coefficient_mul
coeff = coeff and (self.coefficient_div == other.coefficient_div)
if not coeff:
# if not equal, try to check if they divide without rest
selfmod = self.coefficient_mul % self.coefficient_div
othermod = other.coefficient_mul % other.coefficient_div
if selfmod == 0 and othermod == 0:
# yet another chance for them to be equal
selfdiv = self.coefficient_mul // self.coefficient_div
otherdiv = other.coefficient_mul // other.coefficient_div
coeff = selfdiv == otherdiv
#
if not coeff:
return False, 0
#
c = (self.constant - other.constant)
svar = self.var
ovar = other.var
if isinstance(svar, ConstInt) and isinstance(ovar, ConstInt):
return True, (svar.getint() - ovar.getint())
if svar.same_box(ovar):
return True, c
return False, 0
def __eq__(self, other):
if not self.same_variable(other):
return False
if not self.same_mulfactor(other):
return False
return self.constant_diff(other) == 0
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
if self.is_identity():
return 'idx(%s)' % (self.var,)
return 'idx(%s*(%s/%s)+%s)' % (self.var, self.coefficient_mul,
self.coefficient_div, self.constant)
class MemoryRef(object):
""" a memory reference to an array object. IntegralForwardModification is able
to propagate changes to this object if applied in backwards direction.
Example:
i1 = int_add(i0,1)
i2 = int_mul(i1,2)
setarrayitem_gc(p0, i2, 1, ...)
will result in the linear combination i0 * (2/1) + 2
"""
def __init__(self, op, index_var, raw_access=False):
assert op.getdescr() is not None
self.array = op.getarg(0)
self.descr = op.getdescr()
self.index_var = index_var
self.raw_access = raw_access
def is_adjacent_to(self, other):
""" this is a symmetric relation """
if not self.same_array(other):
return False
if not self.index_var.same_variable(other.index_var):
return False
if not self.index_var.same_mulfactor(other.index_var):
return False
stride = self.stride()
return abs(self.index_var.constant_diff(other.index_var)) - stride == 0
def is_adjacent_after(self, other):
""" the asymetric relation to is_adjacent_to """
if not self.same_array(other):
return False
if not self.index_var.same_variable(other.index_var):
return False
if not self.index_var.same_mulfactor(other.index_var):
return False
stride = self.stride()
return other.index_var.constant_diff(self.index_var) == stride
def alias(self, other):
""" is this reference an alias to other?
they can alias iff self.origin != other.origin, or their
linear combination point to the same element.
"""
assert other is not None
if not self.same_array(other):
return False
svar = self.index_var
ovar = other.index_var
if not svar.same_variable(ovar):
return True
if not svar.same_mulfactor(ovar):
return True
return abs(svar.constant_diff(ovar)) < self.stride()
def same_array(self, other):
return self.array is other.array and self.descr == other.descr
def __eq__(self, other):
""" NOT_RPYTHON """
if not self.same_array(other):
return False
if not self.index_var.same_variable(other.index_var):
return False
if not self.index_var.same_mulfactor(other.index_var):
return False
stride = self.stride()
return other.index_var.constant_diff(self.index_var) == 0
#def __ne__(self, other):
# return not self.__eq__(other)
def stride(self):
""" the stride in bytes """
if not self.raw_access:
return 1
return self.descr.get_item_size_in_bytes()
def __repr__(self):
return 'MemRef(%s,%s)' % (self.array, self.index_var)
```
#### File: metainterp/optimizeopt/heap.py
```python
import os
from collections import OrderedDict
from rpython.jit.codewriter.effectinfo import EffectInfo
from rpython.jit.metainterp.optimizeopt.util import args_dict
from rpython.jit.metainterp.history import Const, ConstInt
from rpython.jit.metainterp.jitexc import JitException
from rpython.jit.metainterp.optimizeopt.optimizer import Optimization, REMOVED
from rpython.jit.metainterp.optimizeopt.util import make_dispatcher_method
from rpython.jit.metainterp.optimizeopt.intutils import IntBound
from rpython.jit.metainterp.optimizeopt.shortpreamble import PreambleOp
from rpython.jit.metainterp.optimize import InvalidLoop
from rpython.jit.metainterp.resoperation import rop, ResOperation, OpHelpers,\
AbstractResOp, GuardResOp
from rpython.rlib.objectmodel import we_are_translated
from rpython.jit.metainterp.optimizeopt import info
class BogusImmutableField(JitException):
pass
class CachedField(object):
def __init__(self):
# Cache information for a field descr, or for an (array descr, index)
# pair. It can be in one of two states:
#
# 1. 'cached_infos' is a list listing all the infos that are
# caching this descr
#
# 2. we just did one setfield, which is delayed (and thus
# not synchronized). 'lazy_setfield' is the delayed
# ResOperation. In this state, 'cached_infos' contains
# out-of-date information. More precisely, the field
# value pending in the ResOperation is *not* visible in
# 'cached_infos'.
#
self.cached_infos = []
self.cached_structs = []
self._lazy_setfield = None
self._lazy_setfield_registered = False
def register_dirty_field(self, structop, info):
self.cached_structs.append(structop)
self.cached_infos.append(info)
def invalidate(self, descr):
for opinfo in self.cached_infos:
assert isinstance(opinfo, info.AbstractStructPtrInfo)
opinfo._fields[descr.get_index()] = None
self.cached_infos = []
self.cached_structs = []
def produce_potential_short_preamble_ops(self, optimizer, shortboxes,
descr, index=-1):
assert self._lazy_setfield is None
for i, info in enumerate(self.cached_infos):
structbox = optimizer.get_box_replacement(self.cached_structs[i])
info.produce_short_preamble_ops(structbox, descr, index, optimizer,
shortboxes)
def possible_aliasing(self, optheap, opinfo):
# If lazy_setfield is set and contains a setfield on a different
# structvalue, then we are annoyed, because it may point to either
# the same or a different structure at runtime.
# XXX constants?
return (self._lazy_setfield is not None
and (not optheap.getptrinfo(
self._lazy_setfield.getarg(0)).same_info(opinfo)))
def do_setfield(self, optheap, op):
# Update the state with the SETFIELD_GC/SETARRAYITEM_GC operation 'op'.
structinfo = optheap.ensure_ptr_info_arg0(op)
arg1 = optheap.get_box_replacement(self._getvalue(op))
if self.possible_aliasing(optheap, structinfo):
self.force_lazy_setfield(optheap, op.getdescr())
assert not self.possible_aliasing(optheap, structinfo)
cached_field = self._getfield(structinfo, op.getdescr(), optheap, False)
if cached_field is not None:
cached_field = optheap.get_box_replacement(cached_field)
# Hack to ensure constants are imported from the preamble
# XXX no longer necessary?
#if cached_fieldvalue and fieldvalue.is_constant():
# optheap.optimizer.ensure_imported(cached_fieldvalue)
# cached_fieldvalue = self._cached_fields.get(structvalue, None)
if not cached_field or not cached_field.same_box(arg1):
# common case: store the 'op' as lazy_setfield, and register
# myself in the optheap's _lazy_setfields_and_arrayitems list
self._lazy_setfield = op
#if not self._lazy_setfield_registered:
# self._lazy_setfield_registered = True
else:
# this is the case where the pending setfield ends up
# storing precisely the value that is already there,
# as proved by 'cached_fields'. In this case, we don't
# need any _lazy_setfield: the heap value is already right.
# Note that this may reset to None a non-None lazy_setfield,
# cancelling its previous effects with no side effect.
# Now, we have to force the item in the short preamble
self._getfield(structinfo, op.getdescr(), optheap)
self._lazy_setfield = None
def getfield_from_cache(self, optheap, opinfo, descr):
# Returns the up-to-date field's value, or None if not cached.
if self.possible_aliasing(optheap, opinfo):
self.force_lazy_setfield(optheap, descr)
if self._lazy_setfield is not None:
op = self._lazy_setfield
return optheap.get_box_replacement(self._getvalue(op))
else:
res = self._getfield(opinfo, descr, optheap)
if res is not None:
return res.get_box_replacement()
return None
def _getvalue(self, op):
return op.getarg(1)
def _getfield(self, opinfo, descr, optheap, true_force=True):
res = opinfo.getfield(descr, optheap)
if isinstance(res, PreambleOp):
if not true_force:
return res.op
res = optheap.optimizer.force_op_from_preamble(res)
opinfo.setfield(descr, None, res, optheap)
return res
def force_lazy_setfield(self, optheap, descr, can_cache=True):
op = self._lazy_setfield
if op is not None:
# This is the way _lazy_setfield is usually reset to None.
# Now we clear _cached_fields, because actually doing the
# setfield might impact any of the stored result (because of
# possible aliasing).
self.invalidate(descr)
self._lazy_setfield = None
if optheap.postponed_op:
for a in op.getarglist():
if a is optheap.postponed_op:
optheap.emit_postponed_op()
break
optheap.next_optimization.propagate_forward(op)
if not can_cache:
return
# Once it is done, we can put at least one piece of information
# back in the cache: the value of this particular structure's
# field.
opinfo = optheap.ensure_ptr_info_arg0(op)
self._setfield(op, opinfo, optheap)
elif not can_cache:
self.invalidate(descr)
def _setfield(self, op, opinfo, optheap):
arg = optheap.get_box_replacement(op.getarg(1))
struct = optheap.get_box_replacement(op.getarg(0))
opinfo.setfield(op.getdescr(), struct, arg, optheap, self)
class ArrayCachedField(CachedField):
def __init__(self, index):
self.index = index
CachedField.__init__(self)
def _getvalue(self, op):
return op.getarg(2)
def _getfield(self, opinfo, descr, optheap, true_force=True):
res = opinfo.getitem(descr, self.index, optheap)
if (isinstance(res, PreambleOp) and
optheap.optimizer.cpu.supports_guard_gc_type):
if not true_force:
return res.op
index = res.preamble_op.getarg(1).getint()
res = optheap.optimizer.force_op_from_preamble(res)
opinfo.setitem(descr, index, None, res, optheap=optheap)
return res
def _setfield(self, op, opinfo, optheap):
arg = optheap.get_box_replacement(op.getarg(2))
struct = optheap.get_box_replacement(op.getarg(0))
opinfo.setitem(op.getdescr(), self.index, struct, arg, self, optheap)
def invalidate(self, descr):
for opinfo in self.cached_infos:
assert isinstance(opinfo, info.ArrayPtrInfo)
opinfo._items = None
self.cached_infos = []
self.cached_structs = []
class OptHeap(Optimization):
"""Cache repeated heap accesses"""
def __init__(self):
# mapping descr -> CachedField
self.cached_fields = OrderedDict()
self.cached_arrayitems = OrderedDict()
self.postponed_op = None
# XXXX the rest is old
# cached array items: {array descr: {index: CachedField}}
#self.cached_arrayitems = {}
# cached dict items: {dict descr: {(optval, index): box-or-const}}
self.cached_dict_reads = {}
# cache of corresponding {array descrs: dict 'entries' field descr}
self.corresponding_array_descrs = {}
#
self._lazy_setfields_and_arrayitems = []
self._remove_guard_not_invalidated = False
self._seen_guard_not_invalidated = False
def setup(self):
self.optimizer.optheap = self
# mapping const value -> info corresponding to it's heap cache
self.const_infos = self.optimizer.cpu.ts.new_ref_dict()
def flush(self):
self.cached_dict_reads.clear()
self.corresponding_array_descrs.clear()
self.force_all_lazy_setfields_and_arrayitems()
self.emit_postponed_op()
def emit_postponed_op(self):
if self.postponed_op:
postponed_op = self.postponed_op
self.postponed_op = None
self.next_optimization.propagate_forward(postponed_op)
def produce_potential_short_preamble_ops(self, sb):
descrkeys = self.cached_fields.keys()
if not we_are_translated():
# XXX Pure operation of boxes that are cached in several places will
# only be removed from the peeled loop when red from the first
# place discovered here. This is far from ideal, as it makes
# the effectiveness of our optimization a bit random. It should
# howevere always generate correct results. For tests we dont
# want this randomness.
descrkeys.sort(key=str, reverse=True)
for descr in descrkeys:
d = self.cached_fields[descr]
d.produce_potential_short_preamble_ops(self.optimizer, sb, descr)
for descr, submap in self.cached_arrayitems.items():
for index, d in submap.items():
d.produce_potential_short_preamble_ops(self.optimizer, sb,
descr, index)
def register_dirty_field(self, descr, op, info):
self.field_cache(descr).register_dirty_field(op, info)
def register_dirty_array_field(self, arraydescr, op, index, info):
self.arrayitem_cache(arraydescr, index).register_dirty_field(op, info)
def clean_caches(self):
del self._lazy_setfields_and_arrayitems[:]
items = self.cached_fields.items()
if not we_are_translated():
items.sort(key=str, reverse=True)
for descr, cf in items:
if not descr.is_always_pure():
cf.invalidate(descr)
for descr, submap in self.cached_arrayitems.iteritems():
if not descr.is_always_pure():
for index, cf in submap.iteritems():
cf.invalidate(None)
#self.cached_arrayitems.clear()
self.cached_dict_reads.clear()
def field_cache(self, descr):
try:
cf = self.cached_fields[descr]
except KeyError:
cf = self.cached_fields[descr] = CachedField()
return cf
def arrayitem_cache(self, descr, index):
try:
submap = self.cached_arrayitems[descr]
except KeyError:
submap = self.cached_arrayitems[descr] = {}
try:
cf = submap[index]
except KeyError:
cf = submap[index] = ArrayCachedField(index)
return cf
def emit_operation(self, op):
self.emitting_operation(op)
self.emit_postponed_op()
if (op.is_comparison() or op.is_call_may_force()
or op.is_ovf()):
self.postponed_op = op
else:
Optimization.emit_operation(self, op)
def emitting_operation(self, op):
if op.has_no_side_effect():
return
if op.is_ovf():
return
if op.is_guard():
self.optimizer.pendingfields = (
self.force_lazy_setfields_and_arrayitems_for_guard())
return
opnum = op.getopnum()
if (opnum == rop.SETFIELD_GC or # handled specially
opnum == rop.SETFIELD_RAW or # no effect on GC struct/array
opnum == rop.SETARRAYITEM_GC or # handled specially
opnum == rop.SETARRAYITEM_RAW or # no effect on GC struct
opnum == rop.SETINTERIORFIELD_RAW or # no effect on GC struct
opnum == rop.RAW_STORE or # no effect on GC struct
opnum == rop.STRSETITEM or # no effect on GC struct/array
opnum == rop.UNICODESETITEM or # no effect on GC struct/array
opnum == rop.DEBUG_MERGE_POINT or # no effect whatsoever
opnum == rop.JIT_DEBUG or # no effect whatsoever
opnum == rop.ENTER_PORTAL_FRAME or # no effect whatsoever
opnum == rop.LEAVE_PORTAL_FRAME or # no effect whatsoever
opnum == rop.COPYSTRCONTENT or # no effect on GC struct/array
opnum == rop.COPYUNICODECONTENT): # no effect on GC struct/array
return
if op.is_call():
if op.is_call_assembler():
self._seen_guard_not_invalidated = False
else:
effectinfo = op.getdescr().get_extra_info()
if effectinfo.check_can_invalidate():
self._seen_guard_not_invalidated = False
if not effectinfo.has_random_effects():
self.force_from_effectinfo(effectinfo)
return
self.force_all_lazy_setfields_and_arrayitems()
self.clean_caches()
def optimize_CALL_I(self, op):
# dispatch based on 'oopspecindex' to a method that handles
# specifically the given oopspec call. For non-oopspec calls,
# oopspecindex is just zero.
effectinfo = op.getdescr().get_extra_info()
oopspecindex = effectinfo.oopspecindex
if oopspecindex == EffectInfo.OS_DICT_LOOKUP:
if self._optimize_CALL_DICT_LOOKUP(op):
return
self.emit_operation(op)
optimize_CALL_F = optimize_CALL_I
optimize_CALL_R = optimize_CALL_I
optimize_CALL_N = optimize_CALL_I
def _optimize_CALL_DICT_LOOKUP(self, op):
# Cache consecutive lookup() calls on the same dict and key,
# depending on the 'flag_store' argument passed:
# FLAG_LOOKUP: always cache and use the cached result.
# FLAG_STORE: don't cache (it might return -1, which would be
# incorrect for future lookups); but if found in
# the cache and the cached value was already checked
# non-negative, then we can reuse it.
# FLAG_DELETE: never cache, never use the cached result (because
# if there is a cached result, the FLAG_DELETE call
# is needed for its side-effect of removing it).
# In theory we could cache a -1 for the case where
# the delete is immediately followed by a lookup,
# but too obscure.
#
from rpython.rtyper.lltypesystem.rordereddict import FLAG_LOOKUP
from rpython.rtyper.lltypesystem.rordereddict import FLAG_STORE
flag_value = self.getintbound(op.getarg(4))
if not flag_value.is_constant():
return False
flag = flag_value.getint()
if flag != FLAG_LOOKUP and flag != FLAG_STORE:
return False
#
descrs = op.getdescr().get_extra_info().extradescrs
assert descrs # translation hint
descr1 = descrs[0]
try:
d = self.cached_dict_reads[descr1]
except KeyError:
d = self.cached_dict_reads[descr1] = args_dict()
self.corresponding_array_descrs[descrs[1]] = descr1
#
key = [self.optimizer.get_box_replacement(op.getarg(1)), # dict
self.optimizer.get_box_replacement(op.getarg(2))] # key
# other args can be ignored here (hash, store_flag)
try:
res_v = d[key]
except KeyError:
if flag == FLAG_LOOKUP:
d[key] = op
return False
else:
if flag != FLAG_LOOKUP:
if not self.getintbound(res_v).known_ge(IntBound(0, 0)):
return False
self.make_equal_to(op, res_v)
self.last_emitted_operation = REMOVED
return True
def optimize_GUARD_NO_EXCEPTION(self, op):
if self.last_emitted_operation is REMOVED:
return
self.emit_operation(op)
optimize_GUARD_EXCEPTION = optimize_GUARD_NO_EXCEPTION
def force_from_effectinfo(self, effectinfo):
# XXX we can get the wrong complexity here, if the lists
# XXX stored on effectinfo are large
for fielddescr in effectinfo.readonly_descrs_fields:
self.force_lazy_setfield(fielddescr)
for arraydescr in effectinfo.readonly_descrs_arrays:
self.force_lazy_setarrayitem(arraydescr)
for fielddescr in effectinfo.write_descrs_fields:
if fielddescr.is_always_pure():
continue
try:
del self.cached_dict_reads[fielddescr]
except KeyError:
pass
self.force_lazy_setfield(fielddescr, can_cache=False)
for arraydescr in effectinfo.write_descrs_arrays:
self.force_lazy_setarrayitem(arraydescr, can_cache=False)
if arraydescr in self.corresponding_array_descrs:
dictdescr = self.corresponding_array_descrs.pop(arraydescr)
try:
del self.cached_dict_reads[dictdescr]
except KeyError:
pass # someone did it already
if effectinfo.check_forces_virtual_or_virtualizable():
vrefinfo = self.optimizer.metainterp_sd.virtualref_info
self.force_lazy_setfield(vrefinfo.descr_forced)
# ^^^ we only need to force this field; the other fields
# of virtualref_info and virtualizable_info are not gcptrs.
def force_lazy_setfield(self, descr, can_cache=True):
try:
cf = self.cached_fields[descr]
except KeyError:
return
cf.force_lazy_setfield(self, descr, can_cache)
def force_lazy_setarrayitem(self, arraydescr, indexb=None, can_cache=True):
try:
submap = self.cached_arrayitems[arraydescr]
except KeyError:
return
for idx, cf in submap.iteritems():
if indexb is None or indexb.contains(idx):
cf.force_lazy_setfield(self, None, can_cache)
def force_all_lazy_setfields_and_arrayitems(self):
items = self.cached_fields.items()
if not we_are_translated():
items.sort(key=str, reverse=True)
for descr, cf in items:
cf.force_lazy_setfield(self, descr)
for submap in self.cached_arrayitems.itervalues():
for index, cf in submap.iteritems():
cf.force_lazy_setfield(self, None)
def force_lazy_setfields_and_arrayitems_for_guard(self):
pendingfields = []
items = self.cached_fields.items()
if not we_are_translated():
items.sort(key=str, reverse=True)
for descr, cf in items:
op = cf._lazy_setfield
if op is None:
continue
val = op.getarg(1)
if self.optimizer.is_virtual(val):
pendingfields.append(op)
continue
cf.force_lazy_setfield(self, descr)
for descr, submap in self.cached_arrayitems.iteritems():
for index, cf in submap.iteritems():
op = cf._lazy_setfield
if op is None:
continue
# the only really interesting case that we need to handle in the
# guards' resume data is that of a virtual object that is stored
# into a field of a non-virtual object. Here, 'op' in either
# SETFIELD_GC or SETARRAYITEM_GC.
opinfo = self.getptrinfo(op.getarg(0))
assert not opinfo.is_virtual() # it must be a non-virtual
if self.optimizer.is_virtual(op.getarg(2)):
pendingfields.append(op)
else:
cf.force_lazy_setfield(self, descr)
return pendingfields
def optimize_GETFIELD_GC_I(self, op):
structinfo = self.ensure_ptr_info_arg0(op)
cf = self.field_cache(op.getdescr())
field = cf.getfield_from_cache(self, structinfo, op.getdescr())
if field is not None:
self.make_equal_to(op, field)
return
# default case: produce the operation
self.make_nonnull(op.getarg(0))
self.emit_operation(op)
# then remember the result of reading the field
structinfo.setfield(op.getdescr(), op.getarg(0), op, self, cf)
optimize_GETFIELD_GC_R = optimize_GETFIELD_GC_I
optimize_GETFIELD_GC_F = optimize_GETFIELD_GC_I
def optimize_GETFIELD_GC_PURE_I(self, op):
structinfo = self.ensure_ptr_info_arg0(op)
cf = self.field_cache(op.getdescr())
field = cf.getfield_from_cache(self, structinfo, op.getdescr())
if field is not None:
self.make_equal_to(op, field)
return
# default case: produce the operation
self.make_nonnull(op.getarg(0))
self.emit_operation(op)
optimize_GETFIELD_GC_PURE_R = optimize_GETFIELD_GC_PURE_I
optimize_GETFIELD_GC_PURE_F = optimize_GETFIELD_GC_PURE_I
def optimize_SETFIELD_GC(self, op):
self.setfield(op)
#opnum = OpHelpers.getfield_pure_for_descr(op.getdescr())
#if self.has_pure_result(opnum, [op.getarg(0)],
# op.getdescr()):
# os.write(2, '[bogus _immutable_field_ declaration: %s]\n' %
# (op.getdescr().repr_of_descr()))
# raise BogusImmutableField
#
def setfield(self, op):
cf = self.field_cache(op.getdescr())
cf.do_setfield(self, op)
def optimize_GETARRAYITEM_GC_I(self, op):
arrayinfo = self.ensure_ptr_info_arg0(op)
indexb = self.getintbound(op.getarg(1))
cf = None
if indexb.is_constant():
index = indexb.getint()
arrayinfo.getlenbound(None).make_gt_const(index)
# use the cache on (arraydescr, index), which is a constant
cf = self.arrayitem_cache(op.getdescr(), index)
field = cf.getfield_from_cache(self, arrayinfo, op.getdescr())
if field is not None:
self.make_equal_to(op, field)
return
else:
# variable index, so make sure the lazy setarrayitems are done
self.force_lazy_setarrayitem(op.getdescr(),
self.getintbound(op.getarg(1)))
# default case: produce the operation
self.make_nonnull(op.getarg(0))
self.emit_operation(op)
# the remember the result of reading the array item
if cf is not None:
arrayinfo.setitem(op.getdescr(), indexb.getint(),
self.get_box_replacement(op.getarg(0)),
self.get_box_replacement(op), cf,
self)
optimize_GETARRAYITEM_GC_R = optimize_GETARRAYITEM_GC_I
optimize_GETARRAYITEM_GC_F = optimize_GETARRAYITEM_GC_I
def optimize_GETARRAYITEM_GC_PURE_I(self, op):
arrayinfo = self.ensure_ptr_info_arg0(op)
indexb = self.getintbound(op.getarg(1))
cf = None
if indexb.is_constant():
index = indexb.getint()
arrayinfo.getlenbound(None).make_gt_const(index)
# use the cache on (arraydescr, index), which is a constant
cf = self.arrayitem_cache(op.getdescr(), index)
fieldvalue = cf.getfield_from_cache(self, arrayinfo, op.getdescr())
if fieldvalue is not None:
self.make_equal_to(op, fieldvalue)
return
else:
# variable index, so make sure the lazy setarrayitems are done
self.force_lazy_setarrayitem(op.getdescr(), self.getintbound(op.getarg(1)))
# default case: produce the operation
self.make_nonnull(op.getarg(0))
self.emit_operation(op)
optimize_GETARRAYITEM_GC_PURE_R = optimize_GETARRAYITEM_GC_PURE_I
optimize_GETARRAYITEM_GC_PURE_F = optimize_GETARRAYITEM_GC_PURE_I
def optimize_SETARRAYITEM_GC(self, op):
#opnum = OpHelpers.getarrayitem_pure_for_descr(op.getdescr())
#if self.has_pure_result(opnum, [op.getarg(0), op.getarg(1)],
# op.getdescr()):
# os.write(2, '[bogus immutable array declaration: %s]\n' %
# (op.getdescr().repr_of_descr()))
# raise BogusImmutableField
#
indexb = self.getintbound(op.getarg(1))
if indexb.is_constant():
arrayinfo = self.ensure_ptr_info_arg0(op)
# arraybound
arrayinfo.getlenbound(None).make_gt_const(indexb.getint())
cf = self.arrayitem_cache(op.getdescr(), indexb.getint())
cf.do_setfield(self, op)
else:
# variable index, so make sure the lazy setarrayitems are done
self.force_lazy_setarrayitem(op.getdescr(), indexb, can_cache=False)
# and then emit the operation
self.emit_operation(op)
def optimize_QUASIIMMUT_FIELD(self, op):
# Pattern: QUASIIMMUT_FIELD(s, descr=QuasiImmutDescr)
# x = GETFIELD_GC_PURE(s, descr='inst_x')
# If 's' is a constant (after optimizations) we rely on the rest of the
# optimizations to constant-fold the following getfield_gc_pure.
# in addition, we record the dependency here to make invalidation work
# correctly.
# NB: emitting the GETFIELD_GC_PURE is only safe because the
# QUASIIMMUT_FIELD is also emitted to make sure the dependency is
# registered.
structvalue = self.ensure_ptr_info_arg0(op)
if not structvalue.is_constant():
self._remove_guard_not_invalidated = True
return # not a constant at all; ignore QUASIIMMUT_FIELD
#
from rpython.jit.metainterp.quasiimmut import QuasiImmutDescr
qmutdescr = op.getdescr()
assert isinstance(qmutdescr, QuasiImmutDescr)
# check that the value is still correct; it could have changed
# already between the tracing and now. In this case, we mark the loop
# as invalid
if not qmutdescr.is_still_valid_for(
self.get_box_replacement(op.getarg(0))):
raise InvalidLoop('quasi immutable field changed during tracing')
# record as an out-of-line guard
if self.optimizer.quasi_immutable_deps is None:
self.optimizer.quasi_immutable_deps = {}
self.optimizer.quasi_immutable_deps[qmutdescr.qmut] = None
self._remove_guard_not_invalidated = False
def optimize_GUARD_NOT_INVALIDATED(self, op):
if self._remove_guard_not_invalidated:
return
if self._seen_guard_not_invalidated:
return
self._seen_guard_not_invalidated = True
self.emit_operation(op)
dispatch_opt = make_dispatcher_method(OptHeap, 'optimize_',
default=OptHeap.emit_operation)
OptHeap.propagate_forward = dispatch_opt
```
#### File: optimizeopt/test/test_dependency.py
```python
import py
import pytest
from rpython.jit.metainterp.compile import invent_fail_descr_for_op
from rpython.jit.metainterp.history import TargetToken, JitCellToken, TreeLoop
from rpython.jit.metainterp.optimizeopt.dependency import (DependencyGraph, Dependency,
IndexVar, MemoryRef, Node)
from rpython.jit.metainterp.optimizeopt.vector import VectorLoop
from rpython.jit.metainterp.optimizeopt.test.test_util import (
LLtypeMixin, BaseTest, FakeMetaInterpStaticData, convert_old_style_to_targets,
FakeJitDriverStaticData)
from rpython.jit.metainterp.resoperation import rop, ResOperation
from rpython.jit.backend.llgraph.runner import ArrayDescr
from rpython.jit.tool.oparser import OpParser
from rpython.rtyper.lltypesystem import rffi
from rpython.rtyper.lltypesystem import lltype
from rpython.conftest import option
class FakeDependencyGraph(DependencyGraph):
""" A dependency graph that is able to emit every instruction
one by one. """
def __init__(self, loop):
self.loop = loop
if isinstance(loop, list):
self.nodes = loop
else:
operations = loop.operations
self.nodes = [Node(op,i) for i,op in \
enumerate(operations)]
self.schedulable_nodes = list(reversed(self.nodes))
self.guards = []
class DependencyBaseTest(BaseTest):
def setup_method(self, method):
self.test_name = method.__name__
def build_dependency(self, ops):
loop = self.parse_loop(ops)
graph = DependencyGraph(loop)
self.show_dot_graph(graph, self.test_name)
for node in graph.nodes:
assert node.independent(node)
graph.parsestr = ops
return graph
def match_op(self, expected, actual, remap):
if expected.getopnum() != actual.getopnum():
return False
expargs = expected.getarglist()
actargs = [remap.get(arg, None) for arg in actual.getarglist()]
if not all([e == a or a is None for e,a in zip(expargs,actargs)]):
return False
if expected.getfailargs():
expargs = expected.getfailargs()
actargs = [remap.get(arg, None) for arg in actual.getfailargs()]
if not all([e == a or a is None for e,a in zip(expargs,actargs)]):
return False
return True
def ensure_operations(self, opstrlist, trace, inthatorder=True):
oparse = OpParser('', self.cpu, self.namespace, None,
None, True, None)
oplist = []
for op_str in opstrlist:
op = oparse.parse_next_op(op_str)
if not op.returns_void():
var = op_str.split('=')[0].strip()
if '[' in var:
var = var[:var.find('[')]
elem = op_str[:len(var)]
oparse._cache['lltype', elem] = op
oplist.append(op)
oplist_i = 0
match = False
remap = {}
last_match = 0
for i, op in enumerate(trace.operations):
if oplist_i >= len(oplist):
break
curtomatch = oplist[oplist_i]
if self.match_op(curtomatch, op, remap):
if not op.returns_void():
remap[curtomatch] = op
oplist_i += 1
last_match = i
msg = "could not find all ops in the trace sequence\n\n"
if oplist_i != len(oplist):
l = [str(o) for o in oplist[oplist_i:]]
msg += "sequence\n " + '\n '.join(l)
msg += "\n\ndoes not match\n "
l = [str(o) for o in trace.operations[last_match+1:]]
msg += '\n '.join(l)
assert oplist_i == len(oplist), msg
def parse_loop(self, ops, add_label=True):
loop = self.parse(ops, postprocess=self.postprocess)
loop.operations = filter(lambda op: op.getopnum() != rop.DEBUG_MERGE_POINT, loop.operations)
token = JitCellToken()
if add_label:
label = ResOperation(rop.LABEL, loop.inputargs, descr=TargetToken(token))
else:
label = loop.operations[0]
label.setdescr(TargetToken(token))
jump = loop.operations[-1]
loop = VectorLoop(label, loop.operations[0:-1], jump)
loop.jump.setdescr(token)
class Optimizer(object):
metainterp_sd = FakeMetaInterpStaticData(self.cpu)
jitdriver_sd = FakeJitDriverStaticData()
opt = Optimizer()
opt.jitdriver_sd.vec = True
for op in loop.operations:
if op.is_guard() and not op.getdescr():
descr = invent_fail_descr_for_op(op.getopnum(), opt)
op.setdescr(descr)
return loop
def parse_trace(self, source, inc_label_jump=True, pargs=2, iargs=10,
fargs=6, additional_args=None, replace_args=None):
args = []
for prefix, rang in [('p',range(pargs)),
('i',range(iargs)),
('f',range(fargs))]:
for i in rang:
args.append(prefix + str(i))
assert additional_args is None or isinstance(additional_args,list)
for arg in additional_args or []:
args.append(arg)
for k,v in (replace_args or {}).items():
for i,_ in enumerate(args):
if k == args[i]:
args[i] = v
break
indent = " "
joinedargs = ','.join(args)
fmt = (indent, joinedargs, source, indent, joinedargs)
src = "%s[%s]\n%s\n%sjump(%s)" % fmt
loop = self.parse_loop(src)
# needed to assign the right number to the input
# arguments
[str(arg) for arg in loop.inputargs]
loop.graph = FakeDependencyGraph(loop)
return loop
def assert_edges(self, graph, edge_list, exceptions):
""" Check if all dependencies are met. for complex cases
adding None instead of a list of integers skips the test.
This checks both if a dependency forward and backward exists.
"""
assert len(edge_list) == len(graph.nodes) + 2
edge_list = edge_list[1:-1]
for idx,edges in enumerate(edge_list):
if edges is None:
continue
node_a = graph.getnode(idx)
dependencies = node_a.provides()[:]
for idx_b in edges:
if idx_b == 0 or idx_b >= len(graph.nodes) + 2 -1:
continue
idx_b -= 1
node_b = graph.getnode(idx_b)
dependency = node_a.getedge_to(node_b)
if dependency is None and idx_b not in exceptions.setdefault(idx,[]):
self.show_dot_graph(graph, self.test_name + '_except')
assert dependency is not None or node_b.getopnum() == rop.JUMP, \
" it is expected that instruction at index" + \
" %s depends on instr on index %s but it does not.\n%s" \
% (node_a.getindex(), node_b.getindex(), graph)
elif dependency is not None:
dependencies.remove(dependency)
assert dependencies == [], \
"dependencies unexpected %s.\n%s" \
% (dependencies,graph)
def assert_dependencies(self, graph, full_check=True):
import re
deps = {}
exceptions = {}
for i,line in enumerate(graph.parsestr.splitlines()):
dep_pattern = re.compile("#\s*(\d+):")
dep_match = dep_pattern.search(line)
if dep_match:
label = int(dep_match.group(1))
deps_list = []
deps[label] = []
for to in [d for d in line[dep_match.end():].split(',') if len(d) > 0]:
exception = to.endswith("?")
if exception:
to = to[:-1]
exceptions.setdefault(label,[]).append(int(to))
deps[label].append(int(to))
if full_check:
edges = [ None ] * len(deps)
for k,l in deps.items():
edges[k] = l
self.assert_edges(graph, edges, exceptions)
return graph
def assert_independent(self, graph, a, b):
a -= 1
b -= 1
a = graph.getnode(a)
b = graph.getnode(b)
assert a.independent(b), "{a} and {b} are dependent!".format(a=a,b=b)
def assert_dependent(self, graph, a, b):
a -= 1
b -= 1
a = graph.getnode(a)
b = graph.getnode(b)
assert not a.independent(b), "{a} and {b} are independent!".format(a=a,b=b)
def show_dot_graph(self, graph, name):
if option and option.viewdeps:
from rpython.translator.tool.graphpage import GraphPage
page = GraphPage()
page.source = graph.as_dot()
page.links = []
page.display()
def debug_print_operations(self, loop):
print('--- loop instr numbered ---')
for i,op in enumerate(loop.operations):
print "[",i,"]",op,
if op.is_guard():
if op.rd_snapshot:
print op.rd_snapshot.boxes
else:
print op.getfailargs()
else:
print ""
def assert_memory_ref_adjacent(self, m1, m2):
assert m1.is_adjacent_to(m2)
assert m2.is_adjacent_to(m1)
def assert_memory_ref_not_adjacent(self, m1, m2):
assert not m1.is_adjacent_to(m2)
assert not m2.is_adjacent_to(m1)
class BaseTestDependencyGraph(DependencyBaseTest):
def test_index_var_basic(self):
b = FakeBox()
i = IndexVar(b,1,1,0)
j = IndexVar(b,1,1,0)
assert i.is_identity()
assert i.same_variable(j)
assert i.constant_diff(j) == 0
def test_index_var_diff(self):
b = FakeBox()
i = IndexVar(b,4,2,0)
j = IndexVar(b,1,1,1)
assert not i.is_identity()
assert not j.is_identity()
assert not i.same_mulfactor(j)
assert i.constant_diff(j) == -1
def test_memoryref_basic(self):
i = FakeBox()
a = FakeBox()
m1 = memoryref(a, i, (1,1,0))
m2 = memoryref(a, i, (1,1,0))
assert m1.alias(m2)
@py.test.mark.parametrize('coeff1,coeff2,state',
# +------------------ adjacent
# |+----------------- adjacent_after
# ||+---------------- adjacent_befure
# |||+--------------- alias
# ||||
[((1,1,0), (1,1,0), 'ffft'),
((4,2,0), (8,4,0), 'ffft'),
((4,2,0), (8,2,0), 'ffft'),
((4,2,1), (8,4,0), 'tftf'),
])
def test_memoryref_adjacent_alias(self, coeff1, coeff2, state):
i = FakeBox()
a = FakeBox()
m1 = memoryref(a, i, coeff1)
m2 = memoryref(a, i, coeff2)
adja = state[0] == 't'
adja_after = state[1] == 't'
adja_before = state[2] == 't'
alias = state[3] == 't'
assert m1.is_adjacent_to(m2) == adja
assert m2.is_adjacent_to(m1) == adja
assert m1.is_adjacent_after(m2) == adja_after
assert m2.is_adjacent_after(m1) == adja_before
assert m1.alias(m2) == alias
def test_dependency_empty(self):
graph = self.build_dependency("""
[] # 0: 1
jump() # 1:
""")
self.assert_dependencies(graph, full_check=True)
def test_dependency_of_constant_not_used(self):
graph = self.build_dependency("""
[] # 0: 2
i1 = int_add(1,1) # 1: 2
jump() # 2:
""")
self.assert_dependencies(graph, full_check=True)
def test_dependency_simple(self):
graph = self.build_dependency("""
[] # 0: 4
i1 = int_add(1,1) # 1: 2
i2 = int_add(i1,1) # 2: 3
guard_value(i2,3) [] # 3: 4
jump() # 4:
""")
graph = self.assert_dependencies(graph, full_check=True)
self.assert_dependent(graph, 1,2)
self.assert_dependent(graph, 2,3)
self.assert_dependent(graph, 1,3)
def test_def_use_jump_use_def(self):
graph = self.build_dependency("""
[i3] # 0: 1
i1 = int_add(i3,1) # 1: 2, 3
guard_value(i1,0) [] # 2: 3
jump(i1) # 3:
""")
self.assert_dependencies(graph, full_check=True)
def test_dependency_guard(self):
graph = self.build_dependency("""
[i3] # 0: 2,3
i1 = int_add(1,1) # 1: 2
guard_value(i1,0) [i3] # 2: 3
jump(i3) # 3:
""")
self.assert_dependencies(graph, full_check=True)
def test_dependency_guard_2(self):
graph = self.build_dependency("""
[i1] # 0: 1,2?,3
i2 = int_le(i1, 10) # 1: 2
guard_true(i2) [i1] # 2:
i3 = int_add(i1,1) # 3: 4
jump(i3) # 4:
""")
self.assert_dependencies(graph, full_check=True)
def test_no_edge_duplication(self):
graph = self.build_dependency("""
[i1] # 0: 1,2?,3
i2 = int_lt(i1,10) # 1: 2
guard_false(i2) [i1] # 2:
i3 = int_add(i1,i1) # 3: 4
jump(i3) # 4:
""")
self.assert_dependencies(graph, full_check=True)
def test_no_edge_duplication_in_guard_failargs(self):
graph = self.build_dependency("""
[i1] # 0: 1,2?,3?
i2 = int_lt(i1,10) # 1: 2
guard_false(i2) [i1,i1,i2,i1,i2,i1] # 2: 3
jump(i1) # 3:
""")
self.assert_dependencies(graph, full_check=True)
def test_dependencies_1(self):
graph = self.build_dependency("""
[i0, i1, i2] # 0: 1,3,6,7,11?
i4 = int_gt(i1, 0) # 1: 2
guard_true(i4) [] # 2: 5, 11?
i6 = int_sub(i1, 1) # 3: 4
i8 = int_gt(i6, 0) # 4: 5
guard_false(i8) [] # 5: 10
i10 = int_add(i2, 1) # 6: 8
i12 = int_sub(i0, 1) # 7: 9, 11
i14 = int_add(i10, 1) # 8: 11
i16 = int_gt(i12, 0) # 9: 10
guard_true(i16) [] # 10: 11
jump(i12, i1, i14) # 11:
""")
self.assert_dependencies(graph, full_check=True)
self.assert_independent(graph, 6, 2)
self.assert_independent(graph, 6, 1)
def test_prevent_double_arg(self):
graph = self.build_dependency("""
[i0, i1, i2] # 0: 1,3
i4 = int_gt(i1, i0) # 1: 2
guard_true(i4) [] # 2: 3
jump(i0, i1, i2) # 3:
""")
self.assert_dependencies(graph, full_check=True)
def test_ovf_dep(self):
graph = self.build_dependency("""
[i0, i1, i2] # 0: 2,3
i4 = int_sub_ovf(1, 0) # 1: 2
guard_overflow() [i2] # 2: 3
jump(i0, i1, i2) # 3:
""")
self.assert_dependencies(graph, full_check=True)
def test_exception_dep(self):
graph = self.build_dependency("""
[p0, i1, i2] # 0: 1,3?
i4 = call_i(p0, 1, descr=nonwritedescr) # 1: 2,3
guard_no_exception() [] # 2: 3
jump(p0, i1, i2) # 3:
""")
self.assert_dependencies(graph, full_check=True)
def test_call_dependency_on_ptr_but_not_index_value(self):
graph = self.build_dependency("""
[p0, p1, i2] # 0: 1,2?,3?,4?,5?
i3 = int_add(i2,1) # 1: 2
i4 = call_i(p0, i3, descr=nonwritedescr) # 2: 3,4,5?
guard_no_exception() [i2] # 3:
p2 = getarrayitem_gc_r(p1, i3, descr=arraydescr) # 4: 5
jump(p2, p1, i3) # 5:
""")
self.assert_dependencies(graph, full_check=True)
def test_call_dependency(self):
graph = self.build_dependency("""
[p0, p1, i2, i5] # 0: 1,2?,3?,4?,5?
i3 = int_add(i2,1) # 1: 2
i4 = call_i(i5, i3, descr=nonwritedescr) # 2: 3,4,5?
guard_no_exception() [i2] # 3: 5?
p2 = getarrayitem_gc_r(p1,i3,descr=chararraydescr) # 4: 5
jump(p2, p1, i3, i5) # 5:
""")
self.assert_dependencies(graph, full_check=True)
def test_call_not_forced_exception(self):
graph = self.build_dependency("""
[p0, p1, i2, i5] # 0: 1,2,4?,5,6
i4 = call_i(i5, i2, descr=nonwritedescr) # 1: 2,4,6
guard_not_forced() [i2] # 2: 3
guard_no_exception() [] # 3: 6
i3 = int_add(i2,1) # 4: 5
p2 = getarrayitem_gc_r(p1,i3,descr=chararraydescr) # 5: 6
jump(p2, p1, i2, i5) # 6:
""")
self.assert_dependencies(graph, full_check=True)
assert graph.nodes[1].priority == 100
assert graph.nodes[2].priority == 100
def test_setarrayitem_dependency(self):
graph = self.build_dependency("""
[p0, i1] # 0: 1,2?,3?,4?
setarrayitem_raw(p0, i1, 1, descr=floatarraydescr) # 1: 2,3
i2 = getarrayitem_raw_i(p0, i1, descr=floatarraydescr) # 2: 4
setarrayitem_raw(p0, i1, 2, descr=floatarraydescr) # 3: 4
jump(p0, i2) # 4:
""")
self.assert_dependencies(graph, full_check=True)
def test_setarrayitem_alias_dependency(self):
# #1 depends on #2, i1 and i2 might alias, reordering would destroy
# coorectness
graph = self.build_dependency("""
[p0, i1, i2] # 0: 1,2?,3?
setarrayitem_raw(p0, i1, 1, descr=floatarraydescr) # 1: 2
setarrayitem_raw(p0, i2, 2, descr=floatarraydescr) # 2: 3
jump(p0, i1, i2) # 3:
""")
self.assert_dependencies(graph, full_check=True)
self.assert_dependent(graph, 1,2)
def test_setarrayitem_dont_depend_with_memref_info(self):
graph = self.build_dependency("""
[p0, i1] # 0: 1,2,3?,4?
setarrayitem_raw(p0, i1, 1, descr=chararraydescr) # 1: 4
i2 = int_add(i1,1) # 2: 3
setarrayitem_raw(p0, i2, 2, descr=chararraydescr) # 3: 4
jump(p0, i1) # 4:
""")
self.assert_dependencies(graph, full_check=True)
self.assert_independent(graph, 1,2)
self.assert_independent(graph, 1,3) # they modify 2 different cells
def test_dependency_complex_trace(self):
graph = self.build_dependency("""
[i0, i1, i2, i3, i4, i5, i6, i7] # 0:
i9 = int_mul(i0, 8) # 1: 2
i10 = raw_load_i(i3, i9, descr=arraydescr) # 2: 5, 10
i11 = int_mul(i0, 8) # 3: 4
i12 = raw_load_i(i4, i11, descr=arraydescr) # 4: 5,10
i13 = int_add(i10, i12) # 5: 7,10
i14 = int_mul(i0, 8) # 6: 7
raw_store(i3, i14, i13, descr=arraydescr) # 7: 10,12,20
i16 = int_add(i0, 1) # 8: 9,10,11,13,16,18
i17 = int_lt(i16, i7) # 9: 10
guard_true(i17) [i7, i13, i5, i4, i3, i12, i10, i16] # 10: 17, 20
i18 = int_mul(i16, 9) # 11: 12
i19 = raw_load_i(i3, i18, descr=arraydescr) # 12: 15, 20
i20 = int_mul(i16, 8) # 13: 14
i21 = raw_load_i(i4, i20, descr=arraydescr) # 14: 15, 20
i22 = int_add(i19, i21) # 15: 17, 20
i23 = int_mul(i16, 8) # 16: 17
raw_store(i5, i23, i22, descr=arraydescr) # 17: 20
i24 = int_add(i16, 1) # 18: 19, 20
i25 = int_lt(i24, i7) # 19: 20
guard_true(i25) [i7, i22, i5, i4, i3, i21, i19, i24] # 20:
jump(i24, i19, i21, i3, i4, i5, i22, i7) # 21:
""")
self.assert_dependencies(graph, full_check=True)
self.assert_dependent(graph, 2,12)
self.assert_dependent(graph, 7,12)
self.assert_dependent(graph, 4,12)
def test_getfield(self):
graph = self.build_dependency("""
[p0, p1] # 0: 1,2,5
p2 = getfield_gc_r(p0) # 1: 3,5
p3 = getfield_gc_r(p0) # 2: 4
guard_nonnull(p2) [p2] # 3: 4,5
guard_nonnull(p3) [p3] # 4: 5
jump(p0,p2) # 5:
""")
self.assert_dependencies(graph, full_check=True)
def test_cyclic(self):
graph = self.build_dependency("""
[p0, p1, p5, p6, p7, p9, p11, p12] # 0: 1,6
p13 = getfield_gc_r(p9) # 1: 2,5
guard_nonnull(p13) [] # 2: 4,5
i14 = getfield_gc_i(p9) # 3: 5
p15 = getfield_gc_r(p13) # 4: 5
guard_class(p15, 14073732) [p1, p0, p9, i14, p15, p13, p5, p6, p7] # 5: 6
jump(p0,p1,p5,p6,p7,p9,p11,p12) # 6:
""")
self.assert_dependencies(graph, full_check=True)
def test_iterate(self):
n1,n2,n3,n4,n5 = [FakeNode(i+1) for i in range(5)]
# n1 -> n2 -> n4 -> n5
# +---> n3 --^
n1.edge_to(n2); n2.edge_to(n4); n4.edge_to(n5)
n1.edge_to(n3); n3.edge_to(n4);
paths = list(n5.iterate_paths(n1, backwards=True))
assert all([path.check_acyclic() for path in paths])
assert len(paths) == 2
assert paths[0].as_str() == "n5 -> n4 -> n2 -> n1"
assert paths[1].as_str() == "n5 -> n4 -> n3 -> n1"
paths = list(n1.iterate_paths(n5))
assert all([path.check_acyclic() for path in paths])
assert len(paths) == 2
assert paths[0].as_str() == "n1 -> n2 -> n4 -> n5"
assert paths[1].as_str() == "n1 -> n3 -> n4 -> n5"
def test_iterate_one_many_one(self):
r = range(19)
n0 = FakeNode(0)
nodes = [FakeNode(i+1) for i in r]
nend = FakeNode(len(r)+1)
assert len(list(n0.iterate_paths(nodes[0], backwards=True))) == 0
for i in r:
n0.edge_to(nodes[i])
nodes[i].edge_to(nend)
paths = list(nend.iterate_paths(n0, backwards=True))
assert all([path.check_acyclic() for path in paths])
assert len(paths) == len(r)
for i in r:
assert paths[i].as_str() == "n%d -> %s -> n0" % (len(r)+1, nodes[i])
# forward
paths = list(n0.iterate_paths(nend))
assert all([path.check_acyclic() for path in paths])
assert len(paths) == len(r)
for i in r:
assert paths[i].as_str() == "n0 -> %s -> n%d" % (nodes[i], len(r)+1)
def test_iterate_blacklist_diamond(self):
blacklist = {}
n1,n2,n3,n4 = [FakeNode(i+1) for i in range(4)]
# n1 -> n2 -> n4
# +---> n3 --^
n1.edge_to(n2); n2.edge_to(n4);
n1.edge_to(n3); n3.edge_to(n4);
paths = list(n1.iterate_paths(n4, blacklist=True))
assert len(paths) == 2
assert paths[0].as_str() == "n1 -> n2 -> n4"
assert paths[1].as_str() == "n1 -> n3 -> n4"
def test_iterate_blacklist_double_diamond(self):
blacklist = {}
n1,n2,n3,n4,n5,n6,n7,n8 = [FakeNode(i+1) for i in range(8)]
# n1 -> n2 -> n4 -> n5 -> n6 --> n8
# +---> n3 --^ +---> n7 --^
n1.edge_to(n2); n2.edge_to(n4);
n1.edge_to(n3); n3.edge_to(n4);
n4.edge_to(n5)
n5.edge_to(n6); n6.edge_to(n8);
n5.edge_to(n7); n7.edge_to(n8);
paths = list(n1.iterate_paths(n8, blacklist=True))
assert len(paths) == 3
assert paths[0].as_str() == "n1 -> n2 -> n4 -> n5 -> n6 -> n8"
assert paths[1].as_str() == "n1 -> n2 -> n4 -> n5 -> n7 -> n8"
assert paths[2].as_str() == "n1 -> n3 -> n4"
def test_iterate_blacklist_split_path(self):
blacklist = {}
n1,n2,n3,n4,n5,n6,n7,n8 = [FakeNode(i+1) for i in range(8)]
n1.edge_to(n2);
n3.edge_to(n2);
n2.edge_to(n4);
n3.edge_to(n4);
paths = list(n4.iterate_paths(n3, backwards=True, blacklist=True))
assert len(paths) == 2
assert paths[0].as_str() == "n4 -> n2 -> n3"
assert paths[1].as_str() == "n4 -> n3"
n5.edge_to(n1)
n5.edge_to(n3)
paths = list(n4.iterate_paths(n5, backwards=True, blacklist=True))
assert len(paths) == 3
assert paths[0].as_str() == "n4 -> n2 -> n1 -> n5"
assert paths[1].as_str() == "n4 -> n2 -> n3 -> n5"
assert paths[2].as_str() == "n4 -> n3"
def test_sccs(self):
n1,n2 = FakeNode(1), FakeNode(2)
n1.edge_to(n2); n2.edge_to(n1)
graph = FakeDependencyGraph([n1,n2])
cycle = graph.cycles()
assert cycle == [n1, n2]
n3 = FakeNode(0)
graph.nodes = [n3]
cycle = graph.cycles()
assert cycle is None
def test_cycles_2(self):
n1,n2,n3,n4 = FakeNode(1), FakeNode(2), FakeNode(3), FakeNode(4)
n1.edge_to(n3); n3.edge_to(n4); n4.edge_to(n1)
graph = FakeDependencyGraph([n1,n2])
graph.nodes = [n1,n2,n3]
cycle = graph.cycles()
assert cycle is not None
assert cycle == [n1,n3,n4]
class FakeMemoryRefResOp(object):
def __init__(self, array, descr):
self.array = array
self.descr = descr
def getarg(self, index):
return self.array
def getdescr(self):
return self.descr
FLOAT = ArrayDescr(lltype.GcArray(lltype.Float), None)
def memoryref(array, var, mod=(1,1,0), descr=None, raw=False):
if descr is None:
descr = FLOAT
mul, div, off = mod
op = FakeMemoryRefResOp(array, descr)
return MemoryRef(op,
IndexVar(var, mul, div, off),
raw)
class FakeBox(object):
pass
class FakeNode(Node):
def __init__(self, i):
Node.__init__(self, None, i)
pass
def __repr__(self):
return "n%d" % self.opidx
class TestLLtype(BaseTestDependencyGraph, LLtypeMixin):
pass
```
#### File: optimizeopt/test/test_intbound.py
```python
from rpython.jit.metainterp.optimizeopt.intutils import IntBound, IntUpperBound, \
IntLowerBound, IntUnbounded
from rpython.jit.metainterp.optimizeopt.intbounds import next_pow2_m1
from copy import copy
import sys
from rpython.rlib.rarithmetic import LONG_BIT
def bound(a,b):
if a is None and b is None:
return IntUnbounded()
elif a is None:
return IntUpperBound(b)
elif b is None:
return IntLowerBound(a)
else:
return IntBound(a,b)
def const(a):
return bound(a,a)
def some_bounds():
brd = [None] + range(-2, 3)
for lower in brd:
for upper in brd:
if lower is not None and upper is not None and lower > upper:
continue
yield (lower, upper, bound(lower, upper))
nbr = range(-5, 6)
def test_known():
for lower, upper, b in some_bounds():
inside = []
border = []
for n in nbr:
if (lower is None or n >= lower) and \
(upper is None or n <= upper):
if n == lower or n ==upper:
border.append(n)
else:
inside.append(n)
for n in nbr:
c = const(n)
if n in inside:
assert b.contains(n)
assert not b.known_lt(c)
assert not b.known_gt(c)
assert not b.known_le(c)
assert not b.known_ge(c)
elif n in border:
assert b.contains(n)
if n == upper:
assert b.known_le(const(upper))
else:
assert b.known_ge(const(lower))
else:
assert not b.contains(n)
some = (border + inside)[0]
if n < some:
assert b.known_gt(c)
else:
assert b.known_lt(c)
def test_make():
for _, _, b1 in some_bounds():
for _, _, b2 in some_bounds():
lt = IntUnbounded()
lt.make_lt(b1)
lt.make_lt(b2)
for n in nbr:
c = const(n)
if b1.known_le(c) or b2.known_le(c):
assert lt.known_lt(c)
else:
assert not lt.known_lt(c)
assert not lt.known_gt(c)
assert not lt.known_ge(c)
gt = IntUnbounded()
gt.make_gt(b1)
gt.make_gt(b2)
for n in nbr:
c = const(n)
if b1.known_ge(c) or b2.known_ge(c):
assert gt.known_gt(c)
else:
assert not gt.known_gt(c)
assert not gt.known_lt(c)
assert not gt.known_le(c)
le = IntUnbounded()
le.make_le(b1)
le.make_le(b2)
for n in nbr:
c = const(n)
if b1.known_le(c) or b2.known_le(c):
assert le.known_le(c)
else:
assert not le.known_le(c)
assert not le.known_gt(c)
assert not le.known_ge(c)
ge = IntUnbounded()
ge.make_ge(b1)
ge.make_ge(b2)
for n in nbr:
c = const(n)
if b1.known_ge(c) or b2.known_ge(c):
assert ge.known_ge(c)
else:
assert not ge.known_ge(c)
assert not ge.known_lt(c)
assert not ge.known_le(c)
gl = IntUnbounded()
gl.make_ge(b1)
gl.make_le(b2)
for n in nbr:
c = const(n)
if b1.known_ge(c):
assert gl.known_ge(c)
else:
assert not gl.known_ge(c)
assert not gl.known_gt(c)
if b2.known_le(c):
assert gl.known_le(c)
else:
assert not gl.known_le(c)
assert not gl.known_lt(c)
def test_intersect():
for _, _, b1 in some_bounds():
for _, _, b2 in some_bounds():
b = copy(b1)
b.intersect(b2)
for n in nbr:
if b1.contains(n) and b2.contains(n):
assert b.contains(n)
else:
assert not b.contains(n)
def test_add():
for _, _, b1 in some_bounds():
for n1 in nbr:
b2 = b1.add(n1)
for n2 in nbr:
c1 = const(n2)
c2 = const(n2 + n1)
if b1.known_le(c1):
assert b2.known_le(c2)
else:
assert not b2.known_le(c2)
if b1.known_ge(c1):
assert b2.known_ge(c2)
else:
assert not b2.known_ge(c2)
if b1.known_le(c1):
assert b2.known_le(c2)
else:
assert not b2.known_lt(c2)
if b1.known_lt(c1):
assert b2.known_lt(c2)
else:
assert not b2.known_lt(c2)
if b1.known_gt(c1):
assert b2.known_gt(c2)
else:
assert not b2.known_gt(c2)
def test_add_bound():
for _, _, b1 in some_bounds():
for _, _, b2 in some_bounds():
b3 = b1.add_bound(b2)
for n1 in nbr:
for n2 in nbr:
if b1.contains(n1) and b2.contains(n2):
assert b3.contains(n1 + n2)
a=bound(2, 4).add_bound(bound(1, 2))
assert not a.contains(2)
assert not a.contains(7)
def test_mul_bound():
for _, _, b1 in some_bounds():
for _, _, b2 in some_bounds():
b3 = b1.mul_bound(b2)
for n1 in nbr:
for n2 in nbr:
if b1.contains(n1) and b2.contains(n2):
assert b3.contains(n1 * n2)
a=bound(2, 4).mul_bound(bound(1, 2))
assert not a.contains(1)
assert not a.contains(9)
a=bound(-3, 2).mul_bound(bound(1, 2))
assert not a.contains(-7)
assert not a.contains(5)
assert a.contains(-6)
assert a.contains(4)
a=bound(-3, 2).mul(-1)
for i in range(-2,4):
assert a.contains(i)
assert not a.contains(4)
assert not a.contains(-3)
def test_shift_bound():
for _, _, b1 in some_bounds():
for _, _, b2 in some_bounds():
bleft = b1.lshift_bound(b2)
bright = b1.rshift_bound(b2)
for n1 in nbr:
for n2 in range(10):
if b1.contains(n1) and b2.contains(n2):
assert bleft.contains(n1 << n2)
assert bright.contains(n1 >> n2)
def test_shift_overflow():
b10 = IntBound(0, 10)
b100 = IntBound(0, 100)
bmax = IntBound(0, sys.maxint/2)
assert not b10.lshift_bound(b100).has_upper
assert not bmax.lshift_bound(b10).has_upper
assert b10.lshift_bound(b10).has_upper
for b in (b10, b100, bmax, IntBound(0, 0)):
for shift_count_bound in (IntBound(7, LONG_BIT), IntBound(-7, 7)):
#assert not b.lshift_bound(shift_count_bound).has_upper
assert not b.rshift_bound(shift_count_bound).has_upper
def test_div_bound():
for _, _, b1 in some_bounds():
for _, _, b2 in some_bounds():
b3 = b1.div_bound(b2)
for n1 in nbr:
for n2 in nbr:
if b1.contains(n1) and b2.contains(n2):
if n2 != 0:
assert b3.contains(n1 / n2)
a=bound(2, 4).div_bound(bound(1, 2))
assert not a.contains(0)
assert not a.contains(5)
a=bound(-3, 2).div_bound(bound(1, 2))
assert not a.contains(-4)
assert not a.contains(3)
assert a.contains(-3)
assert a.contains(0)
def test_sub_bound():
for _, _, b1 in some_bounds():
for _, _, b2 in some_bounds():
b3 = b1.sub_bound(b2)
for n1 in nbr:
for n2 in nbr:
if b1.contains(n1) and b2.contains(n2):
assert b3.contains(n1 - n2)
a=bound(2, 4).sub_bound(bound(1, 2))
assert not a.contains(-1)
assert not a.contains(4)
def test_next_pow2_m1():
assert next_pow2_m1(0) == 0
assert next_pow2_m1(1) == 1
assert next_pow2_m1(7) == 7
assert next_pow2_m1(256) == 511
assert next_pow2_m1(255) == 255
assert next_pow2_m1(80) == 127
assert next_pow2_m1((1 << 32) - 5) == (1 << 32) - 1
assert next_pow2_m1((1 << 64) - 1) == (1 << 64) - 1
```
#### File: metainterp/test/support.py
```python
import py, sys
from rpython.rtyper.lltypesystem import lltype, llmemory
from rpython.jit.backend.llgraph import runner
from rpython.jit.metainterp.warmspot import ll_meta_interp, get_stats
from rpython.jit.metainterp.warmspot import reset_stats
from rpython.jit.metainterp.warmstate import unspecialize_value
from rpython.jit.metainterp.optimizeopt import ALL_OPTS_DICT
from rpython.jit.metainterp import pyjitpl, history, jitexc
from rpython.jit.codewriter.policy import JitPolicy
from rpython.jit.codewriter import codewriter, longlong
from rpython.rlib.rfloat import isnan
from rpython.rlib.jit import ENABLE_ALL_OPTS
from rpython.translator.backendopt.all import backend_optimizations
def _get_jitcodes(testself, CPUClass, func, values,
supports_floats=True,
supports_longlong=False,
supports_singlefloats=False,
translationoptions={}, **kwds):
from rpython.jit.codewriter import support
class FakeJitCell(object):
__product_token = None
def get_procedure_token(self):
return self.__product_token
def set_procedure_token(self, token):
self.__product_token = token
class FakeWarmRunnerState(object):
def attach_procedure_to_interp(self, greenkey, procedure_token):
assert greenkey == []
self._cell.set_procedure_token(procedure_token)
def helper_func(self, FUNCPTR, func):
from rpython.rtyper.annlowlevel import llhelper
return llhelper(FUNCPTR, func)
def get_unique_id(self, *args):
return 0
def get_location_str(self, args):
return 'location'
class JitCell:
@staticmethod
def get_jit_cell_at_key(greenkey):
assert greenkey == []
return FakeWarmRunnerState._cell
_cell = FakeJitCell()
trace_limit = sys.maxint
enable_opts = ALL_OPTS_DICT
vec = True
if kwds.pop('disable_optimizations', False):
FakeWarmRunnerState.enable_opts = {}
func._jit_unroll_safe_ = True
rtyper = support.annotate(func, values,
translationoptions=translationoptions)
graphs = rtyper.annotator.translator.graphs
testself.all_graphs = graphs
result_kind = history.getkind(graphs[0].getreturnvar().concretetype)[0]
class FakeJitDriverSD:
num_green_args = 0
portal_graph = graphs[0]
virtualizable_info = None
greenfield_info = None
result_type = result_kind
portal_runner_ptr = "???"
vec = False
stats = history.Stats()
cpu = CPUClass(rtyper, stats, None, False)
cw = codewriter.CodeWriter(cpu, [FakeJitDriverSD()])
cw.debug = True
testself.cw = cw
if supports_floats and not cpu.supports_floats:
py.test.skip("this test requires supports_floats=True")
if supports_longlong and not cpu.supports_longlong:
py.test.skip("this test requires supports_longlong=True")
if supports_singlefloats and not cpu.supports_singlefloats:
py.test.skip("this test requires supports_singlefloats=True")
policy = JitPolicy()
policy.set_supports_floats(supports_floats)
policy.set_supports_longlong(supports_longlong)
policy.set_supports_singlefloats(supports_singlefloats)
graphs = cw.find_all_graphs(policy)
if kwds.get("backendopt"):
backend_optimizations(rtyper.annotator.translator, graphs=graphs)
#
testself.warmrunnerstate = FakeWarmRunnerState()
testself.warmrunnerstate.cpu = cpu
FakeJitDriverSD.warmstate = testself.warmrunnerstate
if hasattr(testself, 'finish_setup_for_interp_operations'):
testself.finish_setup_for_interp_operations()
#
cw.make_jitcodes(verbose=True)
def _run_with_blackhole(testself, args):
from rpython.jit.metainterp.blackhole import BlackholeInterpBuilder
cw = testself.cw
blackholeinterpbuilder = BlackholeInterpBuilder(cw)
blackholeinterp = blackholeinterpbuilder.acquire_interp()
count_i = count_r = count_f = 0
for value in args:
T = lltype.typeOf(value)
if T == lltype.Signed:
blackholeinterp.setarg_i(count_i, value)
count_i += 1
elif T == llmemory.GCREF:
blackholeinterp.setarg_r(count_r, value)
count_r += 1
elif T == lltype.Float:
value = longlong.getfloatstorage(value)
blackholeinterp.setarg_f(count_f, value)
count_f += 1
else:
raise TypeError(T)
[jitdriver_sd] = cw.callcontrol.jitdrivers_sd
blackholeinterp.setposition(jitdriver_sd.mainjitcode, 0)
blackholeinterp.run()
return blackholeinterp._final_result_anytype()
def _run_with_pyjitpl(testself, args):
cw = testself.cw
opt = history.Options(listops=True)
metainterp_sd = pyjitpl.MetaInterpStaticData(cw.cpu, opt)
metainterp_sd.finish_setup(cw)
[jitdriver_sd] = metainterp_sd.jitdrivers_sd
metainterp = pyjitpl.MetaInterp(metainterp_sd, jitdriver_sd)
testself.metainterp = metainterp
try:
metainterp.compile_and_run_once(jitdriver_sd, *args)
except (jitexc.DoneWithThisFrameInt,
jitexc.DoneWithThisFrameRef,
jitexc.DoneWithThisFrameFloat) as e:
return e.result
else:
raise Exception("FAILED")
def _run_with_machine_code(testself, args):
metainterp = testself.metainterp
num_green_args = metainterp.jitdriver_sd.num_green_args
procedure_token = metainterp.get_procedure_token(args[:num_green_args])
# a loop was successfully created by _run_with_pyjitpl(); call it
cpu = metainterp.cpu
args1 = []
for i in range(len(args) - num_green_args):
x = args[num_green_args + i]
args1.append(unspecialize_value(x))
deadframe = cpu.execute_token(procedure_token, *args1)
faildescr = cpu.get_latest_descr(deadframe)
assert faildescr.__class__.__name__.startswith('DoneWithThisFrameDescr')
if metainterp.jitdriver_sd.result_type == history.INT:
return deadframe, cpu.get_int_value(deadframe, 0)
elif metainterp.jitdriver_sd.result_type == history.REF:
return deadframe, cpu.get_ref_value(deadframe, 0)
elif metainterp.jitdriver_sd.result_type == history.FLOAT:
return deadframe, cpu.get_float_value(deadframe, 0)
else:
return deadframe, None
class JitMixin:
basic = True
enable_opts = ENABLE_ALL_OPTS
# Basic terminology: the JIT produces "loops" and "bridges".
# Bridges are always attached to failing guards. Every loop is
# the "trunk" of a tree of compiled code, which is formed by first
# compiling a loop and then incrementally adding some number of
# bridges to it. Each loop and each bridge ends with either a
# FINISH or a JUMP instruction (the name "loop" is not really
# adapted any more). The JUMP instruction jumps to any LABEL
# pseudo-instruction, which can be anywhere, within the same tree
# or another one.
def check_resops(self, expected=None, **check):
"""Check the instructions in all loops and bridges, ignoring
the ones that end in FINISH. Either pass a dictionary (then
the check must match exactly), or some keyword arguments (then
the check is only about the instructions named)."""
if self.enable_opts == ENABLE_ALL_OPTS:
get_stats().check_resops(expected=expected, **check)
def check_simple_loop(self, expected=None, **check):
"""Useful in the simplest case when we have only one loop
ending with a jump back to itself and possibly a few bridges.
Only the operations within the loop formed by that single jump
will be counted; the bridges are all ignored. If several loops
were compiled, complains."""
if self.enable_opts == ENABLE_ALL_OPTS:
get_stats().check_simple_loop(expected=expected, **check)
def check_trace_count(self, count): # was check_loop_count
"""Check the number of loops and bridges compiled."""
if self.enable_opts == ENABLE_ALL_OPTS:
assert get_stats().compiled_count == count
def check_trace_count_at_most(self, count):
"""Check the number of loops and bridges compiled."""
if self.enable_opts == ENABLE_ALL_OPTS:
assert get_stats().compiled_count <= count
def check_jitcell_token_count(self, count): # was check_tree_loop_count
"""This should check the number of independent trees of code.
(xxx it is not 100% clear that the count is correct)"""
if self.enable_opts == ENABLE_ALL_OPTS:
assert len(get_stats().jitcell_token_wrefs) == count
def check_target_token_count(self, count):
"""(xxx unknown)"""
if self.enable_opts == ENABLE_ALL_OPTS:
tokens = get_stats().get_all_jitcell_tokens()
n = sum([len(t.target_tokens) for t in tokens])
assert n == count
def check_enter_count(self, count):
"""Check the number of times pyjitpl ran. (Every time, it
should have produced either one loop or one bridge, or aborted;
but it is not 100% clear that this is still correct in the
presence of unrolling.)"""
if self.enable_opts == ENABLE_ALL_OPTS:
assert get_stats().enter_count == count
def check_enter_count_at_most(self, count):
"""Check the number of times pyjitpl ran."""
if self.enable_opts == ENABLE_ALL_OPTS:
assert get_stats().enter_count <= count
def check_aborted_count(self, count):
"""Check the number of times pyjitpl was aborted."""
if self.enable_opts == ENABLE_ALL_OPTS:
assert get_stats().aborted_count == count
def check_aborted_count_at_least(self, count):
"""Check the number of times pyjitpl was aborted."""
if self.enable_opts == ENABLE_ALL_OPTS:
assert get_stats().aborted_count >= count
def meta_interp(self, *args, **kwds):
kwds['CPUClass'] = self.CPUClass
if "backendopt" not in kwds:
kwds["backendopt"] = False
if "enable_opts" not in kwds and hasattr(self, 'enable_opts'):
kwds['enable_opts'] = self.enable_opts
old = codewriter.CodeWriter.debug
try:
codewriter.CodeWriter.debug = True
return ll_meta_interp(*args, **kwds)
finally:
codewriter.CodeWriter.debug = old
def interp_operations(self, f, args, **kwds):
# get the JitCodes for the function f
_get_jitcodes(self, self.CPUClass, f, args, **kwds)
# try to run it with blackhole.py
result1 = _run_with_blackhole(self, args)
# try to run it with pyjitpl.py
result2 = _run_with_pyjitpl(self, args)
assert result1 == result2 or isnan(result1) and isnan(result2)
# try to run it by running the code compiled just before
df, result3 = _run_with_machine_code(self, args)
self._lastframe = df
assert result1 == result3 or result3 == NotImplemented or isnan(result1) and isnan(result3)
#
if (longlong.supports_longlong and
isinstance(result1, longlong.r_float_storage)):
result1 = longlong.getrealfloat(result1)
return result1
def check_history(self, expected=None, **isns):
# this can be used after calling meta_interp
get_stats().check_history(expected, **isns)
def check_operations_history(self, expected=None, **isns):
# this can be used after interp_operations
if expected is not None:
expected = dict(expected)
expected['finish'] = 1
self.metainterp.staticdata.stats.check_history(expected, **isns)
class LLJitMixin(JitMixin):
CPUClass = runner.LLGraphCPU
@staticmethod
def Ptr(T):
return lltype.Ptr(T)
@staticmethod
def GcStruct(name, *fields, **kwds):
S = lltype.GcStruct(name, *fields, **kwds)
return S
malloc = staticmethod(lltype.malloc)
nullptr = staticmethod(lltype.nullptr)
@staticmethod
def malloc_immortal(T):
return lltype.malloc(T, immortal=True)
def _get_NODE(self):
NODE = lltype.GcForwardReference()
NODE.become(lltype.GcStruct('NODE', ('value', lltype.Signed),
('next', lltype.Ptr(NODE))))
return NODE
# ____________________________________________________________
class _Foo:
pass
def noConst(x):
"""Helper function for tests, returning 'x' as a BoxInt/BoxPtr
even if it is a ConstInt/ConstPtr."""
from rpython.rlib import jit
return jit.hint(x, force_no_const=True)
```
#### File: metainterp/test/test_typesystem.py
```python
from rpython.jit.metainterp import typesystem
from rpython.rtyper.lltypesystem import lltype, llmemory
class TypeSystemTests(object):
def test_ref_dict(self):
d = self.helper.new_ref_dict()
ref1 = self.fresh_ref()
ref2 = self.fresh_ref()
ref3 = self.fresh_ref()
d[ref1] = 123
d[ref2] = 456
d[ref3] = 789
ref1b = self.duplicate_ref(ref1)
ref2b = self.duplicate_ref(ref2)
ref3b = self.duplicate_ref(ref3)
assert d[ref1b] == 123
assert d[ref2b] == 456
assert d[ref3b] == 789
class TestLLtype(TypeSystemTests):
helper = typesystem.llhelper
def fresh_ref(self):
S = lltype.GcStruct('S')
s = lltype.malloc(S)
return lltype.cast_opaque_ptr(llmemory.GCREF, s)
def duplicate_ref(self, x):
s = x._obj.container._as_ptr()
return lltype.cast_opaque_ptr(llmemory.GCREF, s)
def null_ref(self):
return lltype.nullptr(llmemory.GCREF.TO)
```
#### File: gc/test/test_minimarkpage.py
```python
import py
from rpython.memory.gc.minimarkpage import ArenaCollection
from rpython.memory.gc.minimarkpage import PAGE_HEADER, PAGE_PTR
from rpython.memory.gc.minimarkpage import PAGE_NULL, WORD
from rpython.memory.gc.minimarkpage import _dummy_size
from rpython.rtyper.lltypesystem import lltype, llmemory, llarena
from rpython.rtyper.lltypesystem.llmemory import cast_ptr_to_adr
NULL = llmemory.NULL
SHIFT = WORD
hdrsize = llmemory.raw_malloc_usage(llmemory.sizeof(PAGE_HEADER))
def test_allocate_arena():
ac = ArenaCollection(SHIFT + 64*20, 64, 1)
ac.allocate_new_arena()
assert ac.num_uninitialized_pages == 20
upages = ac.current_arena.freepages
upages + 64*20 # does not raise
py.test.raises(llarena.ArenaError, "upages + 64*20 + 1")
#
ac = ArenaCollection(SHIFT + 64*20 + 7, 64, 1)
ac.allocate_new_arena()
assert ac.num_uninitialized_pages == 20
upages = ac.current_arena.freepages
upages + 64*20 + 7 # does not raise
py.test.raises(llarena.ArenaError, "upages + 64*20 + 64")
def test_allocate_new_page():
pagesize = hdrsize + 16
arenasize = pagesize * 4 - 1
#
def checknewpage(page, size_class):
size = WORD * size_class
assert (ac._nuninitialized(page, size_class) ==
(pagesize - hdrsize) // size)
assert page.nfree == 0
page1 = page.freeblock - hdrsize
assert llmemory.cast_ptr_to_adr(page) == page1
assert page.nextpage == PAGE_NULL
#
ac = ArenaCollection(arenasize, pagesize, 99)
assert ac.num_uninitialized_pages == 0
assert ac.total_memory_used == 0
#
page = ac.allocate_new_page(5)
checknewpage(page, 5)
assert ac.num_uninitialized_pages == 2
assert ac.current_arena.freepages - pagesize == cast_ptr_to_adr(page)
assert ac.page_for_size[5] == page
#
page = ac.allocate_new_page(3)
checknewpage(page, 3)
assert ac.num_uninitialized_pages == 1
assert ac.current_arena.freepages - pagesize == cast_ptr_to_adr(page)
assert ac.page_for_size[3] == page
#
page = ac.allocate_new_page(4)
checknewpage(page, 4)
assert ac.num_uninitialized_pages == 0
assert ac.page_for_size[4] == page
def arena_collection_for_test(pagesize, pagelayout, fill_with_objects=False):
assert " " not in pagelayout.rstrip(" ")
nb_pages = len(pagelayout)
arenasize = pagesize * (nb_pages + 1) - 1
ac = ArenaCollection(arenasize, pagesize, 9*WORD)
#
def link(pageaddr, size_class, size_block, nblocks, nusedblocks, step=1):
assert step in (1, 2)
llarena.arena_reserve(pageaddr, llmemory.sizeof(PAGE_HEADER))
page = llmemory.cast_adr_to_ptr(pageaddr, PAGE_PTR)
if step == 1:
page.nfree = 0
nuninitialized = nblocks - nusedblocks
else:
page.nfree = nusedblocks
nuninitialized = nblocks - 2*nusedblocks
page.freeblock = pageaddr + hdrsize + nusedblocks * size_block
if nusedblocks < nblocks:
chainedlists = ac.page_for_size
else:
chainedlists = ac.full_page_for_size
page.nextpage = chainedlists[size_class]
page.arena = ac.current_arena
chainedlists[size_class] = page
if fill_with_objects:
for i in range(0, nusedblocks*step, step):
objaddr = pageaddr + hdrsize + i * size_block
llarena.arena_reserve(objaddr, _dummy_size(size_block))
if step == 2:
prev = 'page.freeblock'
for i in range(1, nusedblocks*step, step):
holeaddr = pageaddr + hdrsize + i * size_block
llarena.arena_reserve(holeaddr,
llmemory.sizeof(llmemory.Address))
exec '%s = holeaddr' % prev in globals(), locals()
prevhole = holeaddr
prev = 'prevhole.address[0]'
endaddr = pageaddr + hdrsize + 2*nusedblocks * size_block
exec '%s = endaddr' % prev in globals(), locals()
assert ac._nuninitialized(page, size_class) == nuninitialized
#
ac.allocate_new_arena()
num_initialized_pages = len(pagelayout.rstrip(" "))
ac._startpageaddr = ac.current_arena.freepages
if pagelayout.endswith(" "):
ac.current_arena.freepages += pagesize * num_initialized_pages
else:
ac.current_arena.freepages = NULL
ac.num_uninitialized_pages -= num_initialized_pages
#
for i in reversed(range(num_initialized_pages)):
pageaddr = pagenum(ac, i)
c = pagelayout[i]
if '1' <= c <= '9': # a partially used page (1 block free)
size_class = int(c)
size_block = WORD * size_class
nblocks = (pagesize - hdrsize) // size_block
link(pageaddr, size_class, size_block, nblocks, nblocks-1)
elif c == '.': # a free, but initialized, page
llarena.arena_reserve(pageaddr, llmemory.sizeof(llmemory.Address))
pageaddr.address[0] = ac.current_arena.freepages
ac.current_arena.freepages = pageaddr
ac.current_arena.nfreepages += 1
elif c == '#': # a random full page, in the list 'full_pages'
size_class = fill_with_objects or 1
size_block = WORD * size_class
nblocks = (pagesize - hdrsize) // size_block
link(pageaddr, size_class, size_block, nblocks, nblocks)
elif c == '/': # a page 1/3 allocated, 1/3 freed, 1/3 uninit objs
size_class = fill_with_objects or 1
size_block = WORD * size_class
nblocks = (pagesize - hdrsize) // size_block
link(pageaddr, size_class, size_block, nblocks, nblocks // 3,
step=2)
#
ac.allocate_new_arena = lambda: should_not_allocate_new_arenas
return ac
def pagenum(ac, i):
return ac._startpageaddr + ac.page_size * i
def getpage(ac, i):
return llmemory.cast_adr_to_ptr(pagenum(ac, i), PAGE_PTR)
def checkpage(ac, page, expected_position):
assert llmemory.cast_ptr_to_adr(page) == pagenum(ac, expected_position)
def freepages(ac):
return ac.current_arena.freepages
def test_simple_arena_collection():
pagesize = hdrsize + 16
ac = arena_collection_for_test(pagesize, "##....# ")
#
assert freepages(ac) == pagenum(ac, 2)
page = ac.allocate_new_page(1); checkpage(ac, page, 2)
assert freepages(ac) == pagenum(ac, 3)
page = ac.allocate_new_page(2); checkpage(ac, page, 3)
assert freepages(ac) == pagenum(ac, 4)
page = ac.allocate_new_page(3); checkpage(ac, page, 4)
assert freepages(ac) == pagenum(ac, 5)
page = ac.allocate_new_page(4); checkpage(ac, page, 5)
assert freepages(ac) == pagenum(ac, 7) and ac.num_uninitialized_pages == 3
page = ac.allocate_new_page(5); checkpage(ac, page, 7)
assert freepages(ac) == pagenum(ac, 8) and ac.num_uninitialized_pages == 2
page = ac.allocate_new_page(6); checkpage(ac, page, 8)
assert freepages(ac) == pagenum(ac, 9) and ac.num_uninitialized_pages == 1
page = ac.allocate_new_page(7); checkpage(ac, page, 9)
assert not ac.current_arena and ac.num_uninitialized_pages == 0
def chkob(ac, num_page, pos_obj, obj):
pageaddr = pagenum(ac, num_page)
assert obj == pageaddr + hdrsize + pos_obj
def test_malloc_common_case():
pagesize = hdrsize + 7*WORD
ac = arena_collection_for_test(pagesize, "#23..2 ")
assert ac.total_memory_used == 0 # so far
obj = ac.malloc(2*WORD); chkob(ac, 1, 4*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 5, 4*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 3, 0*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 3, 2*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 3, 4*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 4, 0*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 4, 2*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 4, 4*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 6, 0*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 6, 2*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 6, 4*WORD, obj)
assert ac.total_memory_used == 11*2*WORD
def test_malloc_mixed_sizes():
pagesize = hdrsize + 7*WORD
ac = arena_collection_for_test(pagesize, "#23..2 ")
obj = ac.malloc(2*WORD); chkob(ac, 1, 4*WORD, obj)
obj = ac.malloc(3*WORD); chkob(ac, 2, 3*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 5, 4*WORD, obj)
obj = ac.malloc(3*WORD); chkob(ac, 3, 0*WORD, obj) # 3rd page -> size 3
obj = ac.malloc(2*WORD); chkob(ac, 4, 0*WORD, obj) # 4th page -> size 2
obj = ac.malloc(3*WORD); chkob(ac, 3, 3*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 4, 2*WORD, obj)
obj = ac.malloc(3*WORD); chkob(ac, 6, 0*WORD, obj) # 6th page -> size 3
obj = ac.malloc(2*WORD); chkob(ac, 4, 4*WORD, obj)
obj = ac.malloc(3*WORD); chkob(ac, 6, 3*WORD, obj)
def test_malloc_from_partial_page():
pagesize = hdrsize + 18*WORD
ac = arena_collection_for_test(pagesize, "/.", fill_with_objects=2)
page = getpage(ac, 0)
assert page.nfree == 3
assert ac._nuninitialized(page, 2) == 3
chkob(ac, 0, 2*WORD, page.freeblock)
#
obj = ac.malloc(2*WORD); chkob(ac, 0, 2*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 0, 6*WORD, obj)
assert page.nfree == 1
assert ac._nuninitialized(page, 2) == 3
chkob(ac, 0, 10*WORD, page.freeblock)
#
obj = ac.malloc(2*WORD); chkob(ac, 0, 10*WORD, obj)
assert page.nfree == 0
assert ac._nuninitialized(page, 2) == 3
chkob(ac, 0, 12*WORD, page.freeblock)
#
obj = ac.malloc(2*WORD); chkob(ac, 0, 12*WORD, obj)
assert ac._nuninitialized(page, 2) == 2
obj = ac.malloc(2*WORD); chkob(ac, 0, 14*WORD, obj)
obj = ac.malloc(2*WORD); chkob(ac, 0, 16*WORD, obj)
assert page.nfree == 0
assert ac._nuninitialized(page, 2) == 0
obj = ac.malloc(2*WORD); chkob(ac, 1, 0*WORD, obj)
def test_malloc_new_arena():
pagesize = hdrsize + 7*WORD
ac = arena_collection_for_test(pagesize, "### ")
arena_size = ac.arena_size
obj = ac.malloc(2*WORD); chkob(ac, 3, 0*WORD, obj) # 3rd page -> size 2
#
del ac.allocate_new_arena # restore the one from the class
obj = ac.malloc(3*WORD) # need a new arena
assert ac.num_uninitialized_pages == (arena_size // ac.page_size
- 1 # the just-allocated page
)
class OkToFree(object):
def __init__(self, ac, answer, multiarenas=False):
assert callable(answer) or 0.0 <= answer <= 1.0
self.ac = ac
self.answer = answer
self.multiarenas = multiarenas
self.lastnum = 0.0
self.seen = {}
def __call__(self, addr):
if callable(self.answer):
ok_to_free = self.answer(addr)
else:
self.lastnum += self.answer
ok_to_free = self.lastnum >= 1.0
if ok_to_free:
self.lastnum -= 1.0
if self.multiarenas:
key = (addr.arena, addr.offset)
else:
key = addr - self.ac._startpageaddr
assert key not in self.seen
self.seen[key] = ok_to_free
return ok_to_free
def test_mass_free_partial_remains():
pagesize = hdrsize + 7*WORD
ac = arena_collection_for_test(pagesize, "2", fill_with_objects=2)
ok_to_free = OkToFree(ac, False)
ac.mass_free(ok_to_free)
assert ok_to_free.seen == {hdrsize + 0*WORD: False,
hdrsize + 2*WORD: False}
page = getpage(ac, 0)
assert page == ac.page_for_size[2]
assert page.nextpage == PAGE_NULL
assert ac._nuninitialized(page, 2) == 1
assert page.nfree == 0
chkob(ac, 0, 4*WORD, page.freeblock)
assert freepages(ac) == NULL
def test_mass_free_emptied_page():
pagesize = hdrsize + 7*WORD
ac = arena_collection_for_test(pagesize, "2", fill_with_objects=2)
ok_to_free = OkToFree(ac, True)
ac.mass_free(ok_to_free)
assert ok_to_free.seen == {hdrsize + 0*WORD: True,
hdrsize + 2*WORD: True}
pageaddr = pagenum(ac, 0)
assert pageaddr == freepages(ac)
assert pageaddr.address[0] == NULL
assert ac.page_for_size[2] == PAGE_NULL
def test_mass_free_full_remains_full():
pagesize = hdrsize + 7*WORD
ac = arena_collection_for_test(pagesize, "#", fill_with_objects=2)
ok_to_free = OkToFree(ac, False)
ac.mass_free(ok_to_free)
assert ok_to_free.seen == {hdrsize + 0*WORD: False,
hdrsize + 2*WORD: False,
hdrsize + 4*WORD: False}
page = getpage(ac, 0)
assert page == ac.full_page_for_size[2]
assert page.nextpage == PAGE_NULL
assert ac._nuninitialized(page, 2) == 0
assert page.nfree == 0
assert freepages(ac) == NULL
assert ac.page_for_size[2] == PAGE_NULL
def test_mass_free_full_is_partially_emptied():
pagesize = hdrsize + 9*WORD
ac = arena_collection_for_test(pagesize, "#", fill_with_objects=2)
ok_to_free = OkToFree(ac, 0.5)
ac.mass_free(ok_to_free)
assert ok_to_free.seen == {hdrsize + 0*WORD: False,
hdrsize + 2*WORD: True,
hdrsize + 4*WORD: False,
hdrsize + 6*WORD: True}
page = getpage(ac, 0)
pageaddr = pagenum(ac, 0)
assert page == ac.page_for_size[2]
assert page.nextpage == PAGE_NULL
assert ac._nuninitialized(page, 2) == 0
assert page.nfree == 2
assert page.freeblock == pageaddr + hdrsize + 2*WORD
assert page.freeblock.address[0] == pageaddr + hdrsize + 6*WORD
assert page.freeblock.address[0].address[0] == pageaddr + hdrsize + 8*WORD
assert freepages(ac) == NULL
assert ac.full_page_for_size[2] == PAGE_NULL
def test_mass_free_half_page_remains():
pagesize = hdrsize + 24*WORD
ac = arena_collection_for_test(pagesize, "/", fill_with_objects=2)
page = getpage(ac, 0)
assert ac._nuninitialized(page, 2) == 4
assert page.nfree == 4
#
ok_to_free = OkToFree(ac, False)
ac.mass_free(ok_to_free)
assert ok_to_free.seen == {hdrsize + 0*WORD: False,
hdrsize + 4*WORD: False,
hdrsize + 8*WORD: False,
hdrsize + 12*WORD: False}
page = getpage(ac, 0)
pageaddr = pagenum(ac, 0)
assert page == ac.page_for_size[2]
assert page.nextpage == PAGE_NULL
assert ac._nuninitialized(page, 2) == 4
assert page.nfree == 4
assert page.freeblock == pageaddr + hdrsize + 2*WORD
assert page.freeblock.address[0] == pageaddr + hdrsize + 6*WORD
assert page.freeblock.address[0].address[0] == \
pageaddr + hdrsize + 10*WORD
assert page.freeblock.address[0].address[0].address[0] == \
pageaddr + hdrsize + 14*WORD
assert freepages(ac) == NULL
assert ac.full_page_for_size[2] == PAGE_NULL
def test_mass_free_half_page_becomes_more_free():
pagesize = hdrsize + 24*WORD
ac = arena_collection_for_test(pagesize, "/", fill_with_objects=2)
page = getpage(ac, 0)
assert ac._nuninitialized(page, 2) == 4
assert page.nfree == 4
#
ok_to_free = OkToFree(ac, 0.5)
ac.mass_free(ok_to_free)
assert ok_to_free.seen == {hdrsize + 0*WORD: False,
hdrsize + 4*WORD: True,
hdrsize + 8*WORD: False,
hdrsize + 12*WORD: True}
page = getpage(ac, 0)
pageaddr = pagenum(ac, 0)
assert page == ac.page_for_size[2]
assert page.nextpage == PAGE_NULL
assert ac._nuninitialized(page, 2) == 4
assert page.nfree == 6
fb = page.freeblock
assert fb == pageaddr + hdrsize + 2*WORD
assert fb.address[0] == pageaddr + hdrsize + 4*WORD
assert fb.address[0].address[0] == pageaddr + hdrsize + 6*WORD
assert fb.address[0].address[0].address[0] == \
pageaddr + hdrsize + 10*WORD
assert fb.address[0].address[0].address[0].address[0] == \
pageaddr + hdrsize + 12*WORD
assert fb.address[0].address[0].address[0].address[0].address[0] == \
pageaddr + hdrsize + 14*WORD
assert freepages(ac) == NULL
assert ac.full_page_for_size[2] == PAGE_NULL
# ____________________________________________________________
def test_random(incremental=False):
import random
pagesize = hdrsize + 24*WORD
num_pages = 3
ac = arena_collection_for_test(pagesize, " " * num_pages)
live_objects = {}
#
# Run the test until three arenas are freed. This is a quick test
# that the arenas are really freed by the logic.
class DoneTesting(Exception):
counter = 0
def my_allocate_new_arena():
# the following output looks cool on a 112-character-wide terminal.
lst = sorted(ac._all_arenas(), key=lambda a: a.base.arena._arena_index)
for a in lst:
print a.base.arena, a.base.arena.usagemap
print '-' * 80
ac.__class__.allocate_new_arena(ac)
a = ac.current_arena.base.arena
def my_mark_freed():
a.freed = True
DoneTesting.counter += 1
if DoneTesting.counter > 3:
raise DoneTesting
a.mark_freed = my_mark_freed
ac.allocate_new_arena = my_allocate_new_arena
def allocate_object(live_objects):
size_class = random.randrange(1, 7)
obj = ac.malloc(size_class * WORD)
at = (obj.arena, obj.offset)
assert at not in live_objects
live_objects[at] = size_class * WORD
try:
while True:
#
# Allocate some more objects
for i in range(random.randrange(50, 100)):
allocate_object(live_objects)
#
# Free half the objects, randomly
ok_to_free = OkToFree(ac, lambda obj: random.random() < 0.5,
multiarenas=True)
live_objects_extra = {}
fresh_extra = 0
if not incremental:
ac.mass_free(ok_to_free)
else:
ac.mass_free_prepare()
while not ac.mass_free_incremental(ok_to_free,
random.randrange(1, 3)):
print '[]'
prev = ac.total_memory_used
allocate_object(live_objects_extra)
fresh_extra += ac.total_memory_used - prev
#
# Check that we have seen all objects
assert sorted(ok_to_free.seen) == sorted(live_objects)
surviving_total_size = fresh_extra
for at, freed in ok_to_free.seen.items():
if freed:
del live_objects[at]
else:
surviving_total_size += live_objects[at]
assert ac.total_memory_used == surviving_total_size
#
assert not (set(live_objects) & set(live_objects_extra))
live_objects.update(live_objects_extra)
#
except DoneTesting:
pass
def test_random_incremental():
test_random(incremental=True)
```
#### File: memory/gctransform/asmgcroot.py
```python
from rpython.flowspace.model import (Constant, Variable, Block, Link,
copygraph, SpaceOperation, checkgraph)
from rpython.rlib.debug import ll_assert
from rpython.rlib.nonconst import NonConstant
from rpython.rlib import rgil
from rpython.rtyper.annlowlevel import llhelper
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.memory.gctransform.framework import (
BaseFrameworkGCTransformer, BaseRootWalker)
from rpython.rtyper.llannotation import SomeAddress
from rpython.rtyper.rbuiltin import gen_cast
from rpython.translator.unsimplify import varoftype
from rpython.translator.tool.cbuild import ExternalCompilationInfo
import sys
#
# This transformer avoids the use of a shadow stack in a completely
# platform-specific way, by directing genc to insert asm() special
# instructions in the C source, which are recognized by GCC.
# The .s file produced by GCC is then parsed by trackgcroot.py.
#
IS_64_BITS = sys.maxint > 2147483647
class AsmGcRootFrameworkGCTransformer(BaseFrameworkGCTransformer):
_asmgcc_save_restore_arguments = None
_seen_gctransformer_hint_close_stack = None
def push_roots(self, hop, keep_current_args=False):
livevars = self.get_livevars_for_roots(hop, keep_current_args)
self.num_pushs += len(livevars)
return livevars
def pop_roots(self, hop, livevars):
if not livevars:
return
# mark the values as gc roots
for var in livevars:
v_adr = gen_cast(hop.llops, llmemory.Address, var)
v_newaddr = hop.genop("direct_call", [c_asm_gcroot, v_adr],
resulttype=llmemory.Address)
hop.genop("gc_reload_possibly_moved", [v_newaddr, var])
def build_root_walker(self):
return AsmStackRootWalker(self)
def mark_call_cannotcollect(self, hop, name):
hop.genop("direct_call", [c_asm_nocollect, name])
def gct_direct_call(self, hop):
fnptr = hop.spaceop.args[0].value
try:
close_stack = fnptr._obj._callable._gctransformer_hint_close_stack_
except AttributeError:
close_stack = False
if close_stack:
self.handle_call_with_close_stack(hop)
else:
BaseFrameworkGCTransformer.gct_direct_call(self, hop)
def handle_call_with_close_stack(self, hop):
fnptr = hop.spaceop.args[0].value
if self._seen_gctransformer_hint_close_stack is None:
self._seen_gctransformer_hint_close_stack = {}
if fnptr._obj.graph not in self._seen_gctransformer_hint_close_stack:
self._transform_hint_close_stack(fnptr)
self._seen_gctransformer_hint_close_stack[fnptr._obj.graph] = True
#
livevars = self.push_roots(hop)
self.default(hop)
self.pop_roots(hop, livevars)
def _transform_hint_close_stack(self, fnptr):
# We cannot easily pass variable amount of arguments of the call
# across the call to the pypy_asm_stackwalk helper. So we store
# them away and restore them. More precisely, we need to
# replace 'graph' with code that saves the arguments, and make
# a new graph that starts with restoring the arguments.
if self._asmgcc_save_restore_arguments is None:
self._asmgcc_save_restore_arguments = {}
sradict = self._asmgcc_save_restore_arguments
sra = [] # list of pointers to raw-malloced containers for args
seen = {}
FUNC1 = lltype.typeOf(fnptr).TO
for TYPE in FUNC1.ARGS:
if isinstance(TYPE, lltype.Ptr):
TYPE = llmemory.Address
num = seen.get(TYPE, 0)
seen[TYPE] = num + 1
key = (TYPE, num)
if key not in sradict:
CONTAINER = lltype.FixedSizeArray(TYPE, 1)
p = lltype.malloc(CONTAINER, flavor='raw', zero=True,
immortal=True)
sradict[key] = Constant(p, lltype.Ptr(CONTAINER))
sra.append(sradict[key])
#
# make a copy of the graph that will reload the values
graph = fnptr._obj.graph
graph2 = copygraph(graph)
#
# edit the original graph to only store the value of the arguments
block = Block(graph.startblock.inputargs)
c_item0 = Constant('item0', lltype.Void)
assert len(block.inputargs) == len(sra)
for v_arg, c_p in zip(block.inputargs, sra):
if isinstance(v_arg.concretetype, lltype.Ptr):
v_adr = varoftype(llmemory.Address)
block.operations.append(
SpaceOperation("cast_ptr_to_adr", [v_arg], v_adr))
v_arg = v_adr
v_void = varoftype(lltype.Void)
block.operations.append(
SpaceOperation("bare_setfield", [c_p, c_item0, v_arg], v_void))
#
# call asm_stackwalk(graph2)
FUNC2 = lltype.FuncType([], FUNC1.RESULT)
fnptr2 = lltype.functionptr(FUNC2,
fnptr._obj._name + '_reload',
graph=graph2)
c_fnptr2 = Constant(fnptr2, lltype.Ptr(FUNC2))
HELPERFUNC = lltype.FuncType([lltype.Ptr(FUNC2),
ASM_FRAMEDATA_HEAD_PTR], FUNC1.RESULT)
v_asm_stackwalk = varoftype(lltype.Ptr(HELPERFUNC), "asm_stackwalk")
block.operations.append(
SpaceOperation("cast_pointer", [c_asm_stackwalk], v_asm_stackwalk))
v_result = varoftype(FUNC1.RESULT)
block.operations.append(
SpaceOperation("indirect_call", [v_asm_stackwalk, c_fnptr2,
c_gcrootanchor,
Constant(None, lltype.Void)],
v_result))
block.closeblock(Link([v_result], graph.returnblock))
graph.startblock = block
#
# edit the copy of the graph to reload the values
block2 = graph2.startblock
block1 = Block([])
reloadedvars = []
for v, c_p in zip(block2.inputargs, sra):
v = v.copy()
if isinstance(v.concretetype, lltype.Ptr):
w = varoftype(llmemory.Address)
else:
w = v
block1.operations.append(SpaceOperation('getfield',
[c_p, c_item0], w))
if w is not v:
block1.operations.append(SpaceOperation('cast_adr_to_ptr',
[w], v))
reloadedvars.append(v)
block1.closeblock(Link(reloadedvars, block2))
graph2.startblock = block1
#
checkgraph(graph)
checkgraph(graph2)
class AsmStackRootWalker(BaseRootWalker):
def __init__(self, gctransformer):
BaseRootWalker.__init__(self, gctransformer)
def _asm_callback():
self.walk_stack_from()
self._asm_callback = _asm_callback
self._shape_decompressor = ShapeDecompressor()
self._with_jit = hasattr(gctransformer.translator, '_jit2gc')
if self._with_jit:
jit2gc = gctransformer.translator._jit2gc
self.frame_tid = jit2gc['frame_tid']
self.gctransformer = gctransformer
#
# unless overridden in need_thread_support():
self.belongs_to_current_thread = lambda framedata: True
def need_stacklet_support(self, gctransformer, getfn):
from rpython.annotator import model as annmodel
from rpython.rlib import _stacklet_asmgcc
# stacklet support: BIG HACK for rlib.rstacklet
_stacklet_asmgcc._asmstackrootwalker = self # as a global! argh
_stacklet_asmgcc.complete_destrptr(gctransformer)
#
def gc_detach_callback_pieces():
anchor = llmemory.cast_ptr_to_adr(gcrootanchor)
result = llmemory.NULL
framedata = anchor.address[1]
while framedata != anchor:
next = framedata.address[1]
if self.belongs_to_current_thread(framedata):
# detach it
prev = framedata.address[0]
prev.address[1] = next
next.address[0] = prev
# update the global stack counter
rffi.stackcounter.stacks_counter -= 1
# reattach framedata into the singly-linked list 'result'
framedata.address[0] = rffi.cast(llmemory.Address, -1)
framedata.address[1] = result
result = framedata
framedata = next
return result
#
def gc_reattach_callback_pieces(pieces):
anchor = llmemory.cast_ptr_to_adr(gcrootanchor)
while pieces != llmemory.NULL:
framedata = pieces
pieces = pieces.address[1]
# attach 'framedata' into the normal doubly-linked list
following = anchor.address[1]
following.address[0] = framedata
framedata.address[1] = following
anchor.address[1] = framedata
framedata.address[0] = anchor
# update the global stack counter
rffi.stackcounter.stacks_counter += 1
#
s_addr = SomeAddress()
s_None = annmodel.s_None
self.gc_detach_callback_pieces_ptr = getfn(gc_detach_callback_pieces,
[], s_addr)
self.gc_reattach_callback_pieces_ptr=getfn(gc_reattach_callback_pieces,
[s_addr], s_None)
def need_thread_support(self, gctransformer, getfn):
# Threads supported "out of the box" by the rest of the code.
# The whole code in this function is only there to support
# fork()ing in a multithreaded process :-(
# For this, we need to handle gc_thread_start and gc_thread_die
# to record the mapping {thread_id: stack_start}, and
# gc_thread_before_fork and gc_thread_after_fork to get rid of
# all ASM_FRAMEDATA structures that do no belong to the current
# thread after a fork().
from rpython.rlib import rthread
from rpython.memory.support import AddressDict
from rpython.memory.support import copy_without_null_values
from rpython.annotator import model as annmodel
gcdata = self.gcdata
def get_aid():
"""Return the thread identifier, cast to an (opaque) address."""
return llmemory.cast_int_to_adr(rthread.get_ident())
def thread_start():
value = llmemory.cast_int_to_adr(llop.stack_current(lltype.Signed))
gcdata.aid2stack.setitem(get_aid(), value)
thread_start._always_inline_ = True
def thread_setup():
gcdata.aid2stack = AddressDict()
gcdata.dead_threads_count = 0
# to also register the main thread's stack
thread_start()
thread_setup._always_inline_ = True
def thread_die():
gcdata.aid2stack.setitem(get_aid(), llmemory.NULL)
# from time to time, rehash the dictionary to remove
# old NULL entries
gcdata.dead_threads_count += 1
if (gcdata.dead_threads_count & 511) == 0:
copy = copy_without_null_values(gcdata.aid2stack)
gcdata.aid2stack.delete()
gcdata.aid2stack = copy
def belongs_to_current_thread(framedata):
# xxx obscure: the answer is Yes if, as a pointer, framedata
# lies between the start of the current stack and the top of it.
stack_start = gcdata.aid2stack.get(get_aid(), llmemory.NULL)
ll_assert(stack_start != llmemory.NULL,
"current thread not found in gcdata.aid2stack!")
stack_stop = llmemory.cast_int_to_adr(
llop.stack_current(lltype.Signed))
return (stack_start <= framedata <= stack_stop or
stack_start >= framedata >= stack_stop)
self.belongs_to_current_thread = belongs_to_current_thread
def thread_before_fork():
# before fork(): collect all ASM_FRAMEDATA structures that do
# not belong to the current thread, and move them out of the
# way, i.e. out of the main circular doubly linked list.
detached_pieces = llmemory.NULL
anchor = llmemory.cast_ptr_to_adr(gcrootanchor)
initialframedata = anchor.address[1]
while initialframedata != anchor: # while we have not looped back
if not belongs_to_current_thread(initialframedata):
# Unlink it
prev = initialframedata.address[0]
next = initialframedata.address[1]
prev.address[1] = next
next.address[0] = prev
# Link it to the singly linked list 'detached_pieces'
initialframedata.address[0] = detached_pieces
detached_pieces = initialframedata
rffi.stackcounter.stacks_counter -= 1
# Then proceed to the next piece of stack
initialframedata = initialframedata.address[1]
return detached_pieces
def thread_after_fork(result_of_fork, detached_pieces):
if result_of_fork == 0:
# We are in the child process. Assumes that only the
# current thread survived. All the detached_pieces
# are pointers in other stacks, so have likely been
# freed already by the multithreaded library.
# Nothing more for us to do.
pass
else:
# We are still in the parent process. The fork() may
# have succeeded or not, but that's irrelevant here.
# We need to reattach the detached_pieces now, to the
# circular doubly linked list at 'gcrootanchor'. The
# order is not important.
anchor = llmemory.cast_ptr_to_adr(gcrootanchor)
while detached_pieces != llmemory.NULL:
reattach = detached_pieces
detached_pieces = detached_pieces.address[0]
a_next = anchor.address[1]
reattach.address[0] = anchor
reattach.address[1] = a_next
anchor.address[1] = reattach
a_next.address[0] = reattach
rffi.stackcounter.stacks_counter += 1
self.thread_setup = thread_setup
self.thread_start_ptr = getfn(thread_start, [], annmodel.s_None,
inline=True)
self.thread_die_ptr = getfn(thread_die, [], annmodel.s_None)
self.thread_before_fork_ptr = getfn(thread_before_fork, [],
SomeAddress())
self.thread_after_fork_ptr = getfn(thread_after_fork,
[annmodel.SomeInteger(),
SomeAddress()],
annmodel.s_None)
#
# check that the order of the need_*() is correct for us: if we
# need both threads and stacklets, need_thread_support() must be
# called first, to initialize self.belongs_to_current_thread.
assert not hasattr(self, 'gc_detach_callback_pieces_ptr')
def walk_stack_roots(self, collect_stack_root, is_minor=False):
gcdata = self.gcdata
gcdata._gc_collect_stack_root = collect_stack_root
gcdata._gc_collect_is_minor = is_minor
pypy_asm_stackwalk(llhelper(ASM_CALLBACK_PTR, self._asm_callback),
gcrootanchor)
def walk_stack_from(self):
curframe = lltype.malloc(WALKFRAME, flavor='raw')
otherframe = lltype.malloc(WALKFRAME, flavor='raw')
# Walk over all the pieces of stack. They are in a circular linked
# list of structures of 7 words, the 2 first words being prev/next.
# The anchor of this linked list is:
anchor = llmemory.cast_ptr_to_adr(gcrootanchor)
initialframedata = anchor.address[1]
stackscount = 0
while initialframedata != anchor: # while we have not looped back
self.walk_frames(curframe, otherframe, initialframedata)
# Then proceed to the next piece of stack
initialframedata = initialframedata.address[1]
stackscount += 1
#
# for the JIT: rpy_fastgil may contain an extra framedata
rpy_fastgil = rgil.gil_fetch_fastgil().signed[0]
if rpy_fastgil != 1:
ll_assert(rpy_fastgil != 0, "walk_stack_from doesn't have the GIL")
initialframedata = rffi.cast(llmemory.Address, rpy_fastgil)
#
# very rare issue: initialframedata.address[0] is uninitialized
# in this case, but "retaddr = callee.frame_address.address[0]"
# reads it. If it happens to be exactly a valid return address
# inside the C code, then bad things occur.
initialframedata.address[0] = llmemory.NULL
#
self.walk_frames(curframe, otherframe, initialframedata)
stackscount += 1
#
expected = rffi.stackcounter.stacks_counter
if NonConstant(0):
rffi.stackcounter.stacks_counter += 42 # hack to force it
ll_assert(not (stackscount < expected), "non-closed stacks around")
ll_assert(not (stackscount > expected), "stacks counter corruption?")
lltype.free(otherframe, flavor='raw')
lltype.free(curframe, flavor='raw')
def walk_frames(self, curframe, otherframe, initialframedata):
self.fill_initial_frame(curframe, initialframedata)
# Loop over all the frames in the stack
while self.walk_to_parent_frame(curframe, otherframe):
swap = curframe
curframe = otherframe # caller becomes callee
otherframe = swap
def fill_initial_frame(self, curframe, initialframedata):
# Read the information provided by initialframedata
initialframedata += 2*sizeofaddr #skip the prev/next words at the start
reg = 0
while reg < CALLEE_SAVED_REGS:
# NB. 'initialframedata' stores the actual values of the
# registers %ebx etc., and if these values are modified
# they are reloaded by pypy_asm_stackwalk(). By contrast,
# 'regs_stored_at' merely points to the actual values
# from the 'initialframedata'.
curframe.regs_stored_at[reg] = initialframedata + reg*sizeofaddr
reg += 1
curframe.frame_address = initialframedata.address[CALLEE_SAVED_REGS]
def walk_to_parent_frame(self, callee, caller):
"""Starting from 'callee', walk the next older frame on the stack
and fill 'caller' accordingly. Also invokes the collect_stack_root()
callback from the GC code for each GC root found in 'caller'.
"""
#
# The gcmap table is a list of entries, two machine words each:
# void *SafePointAddress;
# int Shape;
#
# A "safe point" is the return address of a call.
# The "shape" of a safe point is a list of integers
# that represent "locations". A "location" can be
# either in the stack or in a register. See
# getlocation() for the decoding of this integer.
# The locations stored in a "shape" are as follows:
#
# * The "location" of the return address. This is just
# after the end of the frame of 'callee'; it is the
# first word of the frame of 'caller' (see picture
# below).
#
# * Four "locations" that specify where the function saves
# each of the four callee-saved registers (%ebx, %esi,
# %edi, %ebp).
#
# * The number of live GC roots around the call.
#
# * For each GC root, an integer that specify where the
# GC pointer is stored. This is a "location" too.
#
# XXX the details are completely specific to X86!!!
# a picture of the stack may help:
# ^ ^ ^
# | ... | to older frames
# +--------------+
# | ret addr | <------ caller_frame (addr of retaddr)
# | ... |
# | caller frame |
# | ... |
# +--------------+
# | ret addr | <------ callee_frame (addr of retaddr)
# | ... |
# | callee frame |
# | ... | lower addresses
# +--------------+ v v v
#
retaddr = callee.frame_address.address[0]
#
# try to locate the caller function based on retaddr.
# set up self._shape_decompressor.
#
ebp_in_caller = callee.regs_stored_at[INDEX_OF_EBP].address[0]
self.locate_caller_based_on_retaddr(retaddr, ebp_in_caller)
#
# found! Enumerate the GC roots in the caller frame
#
collect_stack_root = self.gcdata._gc_collect_stack_root
gc = self.gc
while True:
location = self._shape_decompressor.next()
if location == 0:
break
addr = self.getlocation(callee, ebp_in_caller, location)
if gc.points_to_valid_gc_object(addr):
collect_stack_root(gc, addr)
#
# small hack: the JIT reserves THREADLOCAL_OFS's last bit for
# us. We use it to store an "already traced past this frame"
# flag.
if self._with_jit and self.gcdata._gc_collect_is_minor:
if self.mark_jit_frame_can_stop(callee):
return False
#
# track where the caller_frame saved the registers from its own
# caller
#
reg = CALLEE_SAVED_REGS - 1
while reg >= 0:
location = self._shape_decompressor.next()
addr = self.getlocation(callee, ebp_in_caller, location)
caller.regs_stored_at[reg] = addr
reg -= 1
location = self._shape_decompressor.next()
caller.frame_address = self.getlocation(callee, ebp_in_caller,
location)
# we get a NULL marker to mean "I'm the frame
# of the entry point, stop walking"
return caller.frame_address != llmemory.NULL
def locate_caller_based_on_retaddr(self, retaddr, ebp_in_caller):
gcmapstart = llop.gc_asmgcroot_static(llmemory.Address, 0)
gcmapend = llop.gc_asmgcroot_static(llmemory.Address, 1)
item = search_in_gcmap(gcmapstart, gcmapend, retaddr)
if item:
self._shape_decompressor.setpos(item.signed[1])
return
if not self._shape_decompressor.sorted:
# the item may have been not found because the main array was
# not sorted. Sort it and try again.
win32_follow_gcmap_jmp(gcmapstart, gcmapend)
sort_gcmap(gcmapstart, gcmapend)
self._shape_decompressor.sorted = True
item = search_in_gcmap(gcmapstart, gcmapend, retaddr)
if item:
self._shape_decompressor.setpos(item.signed[1])
return
if self._with_jit:
# item not found. We assume that it's a JIT-generated
# location -- but we check for consistency that ebp points
# to a JITFRAME object.
from rpython.jit.backend.llsupport.jitframe import STACK_DEPTH_OFS
tid = self.gc.get_possibly_forwarded_type_id(ebp_in_caller)
if (rffi.cast(lltype.Signed, tid) ==
rffi.cast(lltype.Signed, self.frame_tid)):
# fish the depth
extra_stack_depth = (ebp_in_caller + STACK_DEPTH_OFS).signed[0]
ll_assert((extra_stack_depth & (rffi.sizeof(lltype.Signed) - 1))
== 0, "asmgcc: misaligned extra_stack_depth")
extra_stack_depth //= rffi.sizeof(lltype.Signed)
self._shape_decompressor.setjitframe(extra_stack_depth)
return
llop.debug_fatalerror(lltype.Void, "cannot find gc roots!")
def getlocation(self, callee, ebp_in_caller, location):
"""Get the location in the 'caller' frame of a variable, based
on the integer 'location' that describes it. All locations are
computed based on information saved by the 'callee'.
"""
ll_assert(location >= 0, "negative location")
kind = location & LOC_MASK
offset = location & ~ LOC_MASK
if IS_64_BITS:
offset <<= 1
if kind == LOC_REG: # register
if location == LOC_NOWHERE:
return llmemory.NULL
reg = (location >> 2) - 1
ll_assert(reg < CALLEE_SAVED_REGS, "bad register location")
return callee.regs_stored_at[reg]
elif kind == LOC_ESP_PLUS: # in the caller stack frame at N(%esp)
esp_in_caller = callee.frame_address + sizeofaddr
return esp_in_caller + offset
elif kind == LOC_EBP_PLUS: # in the caller stack frame at N(%ebp)
return ebp_in_caller + offset
else: # kind == LOC_EBP_MINUS: at -N(%ebp)
return ebp_in_caller - offset
def mark_jit_frame_can_stop(self, callee):
location = self._shape_decompressor.get_threadlocal_loc()
if location == LOC_NOWHERE:
return False
addr = self.getlocation(callee, llmemory.NULL, location)
#
x = addr.signed[0]
if x & 1:
return True # this JIT stack frame is already marked!
else:
addr.signed[0] = x | 1 # otherwise, mark it but don't stop
return False
LOC_REG = 0
LOC_ESP_PLUS = 1
LOC_EBP_PLUS = 2
LOC_EBP_MINUS = 3
LOC_MASK = 0x03
LOC_NOWHERE = LOC_REG | 0
# ____________________________________________________________
sizeofaddr = llmemory.sizeof(llmemory.Address)
arrayitemsize = 2 * sizeofaddr
def binary_search(start, end, addr1):
"""Search for an element in a sorted array.
The interval from the start address (included) to the end address
(excluded) is assumed to be a sorted arrays of pairs (addr1, addr2).
This searches for the item with a given addr1 and returns its
address. If not found exactly, it tries to return the address
of the item left of addr1 (i.e. such that result.address[0] < addr1).
"""
count = (end - start) // arrayitemsize
while count > 1:
middleindex = count // 2
middle = start + middleindex * arrayitemsize
if addr1 < middle.address[0]:
count = middleindex
else:
start = middle
count -= middleindex
return start
def search_in_gcmap(gcmapstart, gcmapend, retaddr):
item = binary_search(gcmapstart, gcmapend, retaddr)
if item.address[0] == retaddr:
return item # found
# 'retaddr' not exactly found. Check that 'item' is the start of a
# compressed range that includes 'retaddr'.
if retaddr > item.address[0] and item.signed[1] < 0:
return item # ok
else:
return llmemory.NULL # failed
def search_in_gcmap2(gcmapstart, gcmapend, retaddr):
# same as 'search_in_gcmap', but without range checking support
# (item.signed[1] is an address in this case, not a signed at all!)
item = binary_search(gcmapstart, gcmapend, retaddr)
if item.address[0] == retaddr:
return item.address[1] # found
else:
return llmemory.NULL # failed
def sort_gcmap(gcmapstart, gcmapend):
count = (gcmapend - gcmapstart) // arrayitemsize
qsort(gcmapstart,
rffi.cast(rffi.SIZE_T, count),
rffi.cast(rffi.SIZE_T, arrayitemsize),
llhelper(QSORT_CALLBACK_PTR, _compare_gcmap_entries))
def replace_dead_entries_with_nulls(start, end):
# replace the dead entries (null value) with a null key.
count = (end - start) // arrayitemsize - 1
while count >= 0:
item = start + count * arrayitemsize
if item.address[1] == llmemory.NULL:
item.address[0] = llmemory.NULL
count -= 1
if sys.platform == 'win32':
def win32_follow_gcmap_jmp(start, end):
# The initial gcmap table contains addresses to a JMP
# instruction that jumps indirectly to the real code.
# Replace them with the target addresses.
assert rffi.SIGNEDP is rffi.LONGP, "win64 support missing"
while start < end:
code = rffi.cast(rffi.CCHARP, start.address[0])[0]
if code == '\xe9': # jmp
rel32 = rffi.cast(rffi.SIGNEDP, start.address[0]+1)[0]
target = start.address[0] + (rel32 + 5)
start.address[0] = target
start += arrayitemsize
else:
def win32_follow_gcmap_jmp(start, end):
pass
def _compare_gcmap_entries(addr1, addr2):
key1 = addr1.address[0]
key2 = addr2.address[0]
if key1 < key2:
result = -1
elif key1 == key2:
result = 0
else:
result = 1
return rffi.cast(rffi.INT, result)
# ____________________________________________________________
class ShapeDecompressor:
_alloc_flavor_ = "raw"
sorted = False
def setpos(self, pos):
if pos < 0:
pos = ~ pos # can ignore this "range" marker here
gccallshapes = llop.gc_asmgcroot_static(llmemory.Address, 2)
self.addr = gccallshapes + pos
self.jit_index = -1
def setjitframe(self, extra_stack_depth):
self.jit_index = 0
self.extra_stack_depth = extra_stack_depth
def next(self):
index = self.jit_index
if index < 0:
# case "outside the jit"
addr = self.addr
value = 0
while True:
b = ord(addr.char[0])
addr += 1
value += b
if b < 0x80:
break
value = (value - 0x80) << 7
self.addr = addr
return value
else:
# case "in the jit"
from rpython.jit.backend.x86.arch import FRAME_FIXED_SIZE
from rpython.jit.backend.x86.arch import PASS_ON_MY_FRAME
self.jit_index = index + 1
if index == 0:
# the jitframe is an object in EBP
return LOC_REG | ((INDEX_OF_EBP + 1) << 2)
if index == 1:
return 0
# the remaining returned values should be:
# saved %rbp
# saved %r15 or on 32bit:
# saved %r14 saved %ebp
# saved %r13 saved %edi
# saved %r12 saved %esi
# saved %rbx saved %ebx
# return addr return addr
stack_depth = PASS_ON_MY_FRAME + self.extra_stack_depth
if IS_64_BITS:
if index == 2: # rbp
return LOC_ESP_PLUS | (stack_depth << 2)
if index == 3: # r15
return LOC_ESP_PLUS | ((stack_depth + 5) << 2)
if index == 4: # r14
return LOC_ESP_PLUS | ((stack_depth + 4) << 2)
if index == 5: # r13
return LOC_ESP_PLUS | ((stack_depth + 3) << 2)
if index == 6: # r12
return LOC_ESP_PLUS | ((stack_depth + 2) << 2)
if index == 7: # rbx
return LOC_ESP_PLUS | ((stack_depth + 1) << 2)
if index == 8: # return addr
return (LOC_ESP_PLUS |
((FRAME_FIXED_SIZE + self.extra_stack_depth) << 2))
else:
if index == 2: # ebp
return LOC_ESP_PLUS | (stack_depth << 2)
if index == 3: # edi
return LOC_ESP_PLUS | ((stack_depth + 3) << 2)
if index == 4: # esi
return LOC_ESP_PLUS | ((stack_depth + 2) << 2)
if index == 5: # ebx
return LOC_ESP_PLUS | ((stack_depth + 1) << 2)
if index == 6: # return addr
return (LOC_ESP_PLUS |
((FRAME_FIXED_SIZE + self.extra_stack_depth) << 2))
llop.debug_fatalerror(lltype.Void, "asmgcroot: invalid index")
return 0 # annotator fix
def get_threadlocal_loc(self):
index = self.jit_index
if index < 0:
return LOC_NOWHERE # case "outside the jit"
else:
# case "in the jit"
from rpython.jit.backend.x86.arch import THREADLOCAL_OFS, WORD
return (LOC_ESP_PLUS |
((THREADLOCAL_OFS // WORD + self.extra_stack_depth) << 2))
# ____________________________________________________________
#
# The special pypy_asm_stackwalk(), implemented directly in
# assembler, fills information about the current stack top in an
# ASM_FRAMEDATA array and invokes an RPython callback with it.
# An ASM_FRAMEDATA is an array of 5 values that describe everything
# we need to know about a stack frame:
#
# - the value that %ebx had when the current function started
# - the value that %esi had when the current function started
# - the value that %edi had when the current function started
# - the value that %ebp had when the current function started
# - frame address (actually the addr of the retaddr of the current function;
# that's the last word of the frame in memory)
#
# On 64 bits, it is an array of 7 values instead of 5:
#
# - %rbx, %r12, %r13, %r14, %r15, %rbp; and the frame address
#
if IS_64_BITS:
CALLEE_SAVED_REGS = 6
INDEX_OF_EBP = 5
FRAME_PTR = CALLEE_SAVED_REGS
else:
CALLEE_SAVED_REGS = 4 # there are 4 callee-saved registers
INDEX_OF_EBP = 3
FRAME_PTR = CALLEE_SAVED_REGS # the frame is at index 4 in the array
JIT_USE_WORDS = 2 + FRAME_PTR + 1
ASM_CALLBACK_PTR = lltype.Ptr(lltype.FuncType([], lltype.Void))
# used internally by walk_stack_from()
WALKFRAME = lltype.Struct('WALKFRAME',
('regs_stored_at', # address of where the registers have been saved
lltype.FixedSizeArray(llmemory.Address, CALLEE_SAVED_REGS)),
('frame_address',
llmemory.Address),
)
# We have a circular doubly-linked list of all the ASM_FRAMEDATAs currently
# alive. The list's starting point is given by 'gcrootanchor', which is not
# a full ASM_FRAMEDATA but only contains the prev/next pointers:
ASM_FRAMEDATA_HEAD_PTR = lltype.Ptr(lltype.ForwardReference())
ASM_FRAMEDATA_HEAD_PTR.TO.become(lltype.Struct('ASM_FRAMEDATA_HEAD',
('prev', ASM_FRAMEDATA_HEAD_PTR),
('next', ASM_FRAMEDATA_HEAD_PTR)
))
gcrootanchor = lltype.malloc(ASM_FRAMEDATA_HEAD_PTR.TO, immortal=True)
gcrootanchor.prev = gcrootanchor
gcrootanchor.next = gcrootanchor
c_gcrootanchor = Constant(gcrootanchor, ASM_FRAMEDATA_HEAD_PTR)
eci = ExternalCompilationInfo(compile_extra=['-DPYPY_USE_ASMGCC'])
pypy_asm_stackwalk = rffi.llexternal('pypy_asm_stackwalk',
[ASM_CALLBACK_PTR,
ASM_FRAMEDATA_HEAD_PTR],
lltype.Signed,
sandboxsafe=True,
_nowrapper=True,
random_effects_on_gcobjs=True,
compilation_info=eci)
c_asm_stackwalk = Constant(pypy_asm_stackwalk,
lltype.typeOf(pypy_asm_stackwalk))
pypy_asm_gcroot = rffi.llexternal('pypy_asm_gcroot',
[llmemory.Address],
llmemory.Address,
sandboxsafe=True,
_nowrapper=True)
c_asm_gcroot = Constant(pypy_asm_gcroot, lltype.typeOf(pypy_asm_gcroot))
pypy_asm_nocollect = rffi.llexternal('pypy_asm_gc_nocollect',
[rffi.CCHARP], lltype.Void,
sandboxsafe=True,
_nowrapper=True)
c_asm_nocollect = Constant(pypy_asm_nocollect, lltype.typeOf(pypy_asm_nocollect))
QSORT_CALLBACK_PTR = lltype.Ptr(lltype.FuncType([llmemory.Address,
llmemory.Address], rffi.INT))
qsort = rffi.llexternal('qsort',
[llmemory.Address,
rffi.SIZE_T,
rffi.SIZE_T,
QSORT_CALLBACK_PTR],
lltype.Void,
sandboxsafe=True,
random_effects_on_gcobjs=False, # but has a callback
_nowrapper=True)
```
#### File: memory/test/test_transformed_gc.py
```python
import py
import inspect
from rpython.rlib.objectmodel import compute_hash, compute_identity_hash
from rpython.translator.c import gc
from rpython.annotator import model as annmodel
from rpython.rtyper.llannotation import SomePtr
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, llgroup
from rpython.memory.gctransform import framework, shadowstack
from rpython.rtyper.lltypesystem.lloperation import llop, void
from rpython.rlib.objectmodel import compute_unique_id, we_are_translated
from rpython.rlib.debug import ll_assert
from rpython.rlib import rgc
from rpython.conftest import option
from rpython.rlib.rstring import StringBuilder
from rpython.rlib.rarithmetic import LONG_BIT
WORD = LONG_BIT // 8
def rtype(func, inputtypes, specialize=True, gcname='ref',
backendopt=False, **extraconfigopts):
from rpython.translator.translator import TranslationContext
t = TranslationContext()
# XXX XXX XXX mess
t.config.translation.gc = gcname
t.config.translation.gcremovetypeptr = True
t.config.set(**extraconfigopts)
ann = t.buildannotator()
ann.build_types(func, inputtypes)
if specialize:
t.buildrtyper().specialize()
if backendopt:
from rpython.translator.backendopt.all import backend_optimizations
backend_optimizations(t)
if option.view:
t.viewcg()
return t
ARGS = lltype.FixedSizeArray(lltype.Signed, 3)
class GCTest(object):
gcpolicy = None
GC_CAN_MOVE = False
taggedpointers = False
def setup_class(cls):
cls.marker = lltype.malloc(rffi.CArray(lltype.Signed), 1,
flavor='raw', zero=True)
funcs0 = []
funcs2 = []
cleanups = []
name_to_func = {}
mixlevelstuff = []
for fullname in dir(cls):
if not fullname.startswith('define'):
continue
definefunc = getattr(cls, fullname)
_, name = fullname.split('_', 1)
func_fixup = definefunc.im_func(cls)
cleanup = None
if isinstance(func_fixup, tuple):
func, cleanup, fixup = func_fixup
mixlevelstuff.append(fixup)
else:
func = func_fixup
func.func_name = "f_%s" % name
if cleanup:
cleanup.func_name = "clean_%s" % name
nargs = len(inspect.getargspec(func)[0])
name_to_func[name] = len(funcs0)
if nargs == 2:
funcs2.append(func)
funcs0.append(None)
elif nargs == 0:
funcs0.append(func)
funcs2.append(None)
else:
raise NotImplementedError(
"defined test functions should have 0/2 arguments")
# used to let test cleanup static root pointing to runtime
# allocated stuff
cleanups.append(cleanup)
def entrypoint(args):
num = args[0]
func = funcs0[num]
if func:
res = func()
else:
func = funcs2[num]
res = func(args[1], args[2])
cleanup = cleanups[num]
if cleanup:
cleanup()
return res
from rpython.translator.c.genc import CStandaloneBuilder
s_args = SomePtr(lltype.Ptr(ARGS))
t = rtype(entrypoint, [s_args], gcname=cls.gcname,
taggedpointers=cls.taggedpointers)
for fixup in mixlevelstuff:
if fixup:
fixup(t)
cbuild = CStandaloneBuilder(t, entrypoint, config=t.config,
gcpolicy=cls.gcpolicy)
db = cbuild.generate_graphs_for_llinterp()
entrypointptr = cbuild.getentrypointptr()
entrygraph = entrypointptr._obj.graph
if option.view:
t.viewcg()
cls.name_to_func = name_to_func
cls.entrygraph = entrygraph
cls.rtyper = t.rtyper
cls.db = db
def runner(self, name, transformer=False):
db = self.db
name_to_func = self.name_to_func
entrygraph = self.entrygraph
from rpython.rtyper.llinterp import LLInterpreter
llinterp = LLInterpreter(self.rtyper)
gct = db.gctransformer
if self.__class__.__dict__.get('_used', False):
teardowngraph = gct.frameworkgc__teardown_ptr.value._obj.graph
llinterp.eval_graph(teardowngraph, [])
self.__class__._used = True
# FIIIIISH
setupgraph = gct.frameworkgc_setup_ptr.value._obj.graph
# setup => resets the gc
llinterp.eval_graph(setupgraph, [])
def run(args):
ll_args = lltype.malloc(ARGS, immortal=True)
ll_args[0] = name_to_func[name]
for i in range(len(args)):
ll_args[1+i] = args[i]
res = llinterp.eval_graph(entrygraph, [ll_args])
return res
if transformer:
return run, gct
else:
return run
class GenericGCTests(GCTest):
GC_CAN_SHRINK_ARRAY = False
def define_instances(cls):
class A(object):
pass
class B(A):
def __init__(self, something):
self.something = something
def malloc_a_lot():
i = 0
first = None
while i < 10:
i += 1
a = somea = A()
a.last = first
first = a
j = 0
while j < 30:
b = B(somea)
b.last = first
j += 1
return 0
return malloc_a_lot
def test_instances(self):
run = self.runner("instances")
run([])
def define_llinterp_lists(cls):
def malloc_a_lot():
i = 0
while i < 10:
i += 1
a = [1] * 10
j = 0
while j < 30:
j += 1
a.append(j)
return 0
return malloc_a_lot
def test_llinterp_lists(self):
run = self.runner("llinterp_lists")
run([])
def define_llinterp_tuples(cls):
def malloc_a_lot():
i = 0
while i < 10:
i += 1
a = (1, 2, i)
b = [a] * 10
j = 0
while j < 20:
j += 1
b.append((1, j, i))
return 0
return malloc_a_lot
def test_llinterp_tuples(self):
run = self.runner("llinterp_tuples")
run([])
def define_llinterp_dict(self):
class A(object):
pass
def malloc_a_lot():
i = 0
while i < 10:
i += 1
a = (1, 2, i)
b = {a: A()}
j = 0
while j < 20:
j += 1
b[1, j, i] = A()
return 0
return malloc_a_lot
def test_llinterp_dict(self):
run = self.runner("llinterp_dict")
run([])
def skipdefine_global_list(cls):
gl = []
class Box:
def __init__(self):
self.lst = gl
box = Box()
def append_to_list(i, j):
box.lst.append([i] * 50)
llop.gc__collect(lltype.Void)
return box.lst[j][0]
return append_to_list, None, None
def test_global_list(self):
py.test.skip("doesn't fit in the model, tested elsewhere too")
run = self.runner("global_list")
res = run([0, 0])
assert res == 0
for i in range(1, 5):
res = run([i, i - 1])
assert res == i - 1 # crashes if constants are not considered roots
def define_string_concatenation(cls):
def concat(j, dummy):
lst = []
for i in range(j):
lst.append(str(i))
return len("".join(lst))
return concat
def test_string_concatenation(self):
run = self.runner("string_concatenation")
res = run([100, 0])
assert res == len(''.join([str(x) for x in range(100)]))
def define_nongc_static_root(cls):
T1 = lltype.GcStruct("C", ('x', lltype.Signed))
T2 = lltype.Struct("C", ('p', lltype.Ptr(T1)))
static = lltype.malloc(T2, immortal=True)
def f():
t1 = lltype.malloc(T1)
t1.x = 42
static.p = t1
llop.gc__collect(lltype.Void)
return static.p.x
def cleanup():
static.p = lltype.nullptr(T1)
return f, cleanup, None
def test_nongc_static_root(self):
run = self.runner("nongc_static_root")
res = run([])
assert res == 42
def define_finalizer(cls):
class B(object):
pass
b = B()
b.nextid = 0
b.num_deleted = 0
class A(object):
def __init__(self):
self.id = b.nextid
b.nextid += 1
def __del__(self):
b.num_deleted += 1
def f(x, y):
a = A()
i = 0
while i < x:
i += 1
a = A()
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
return b.num_deleted
return f
def test_finalizer(self):
run = self.runner("finalizer")
res = run([5, 42]) #XXX pure lazyness here too
assert res == 6
def define_finalizer_calls_malloc(cls):
class B(object):
pass
b = B()
b.nextid = 0
b.num_deleted = 0
class AAA(object):
def __init__(self):
self.id = b.nextid
b.nextid += 1
def __del__(self):
b.num_deleted += 1
C()
class C(AAA):
def __del__(self):
b.num_deleted += 1
def f(x, y):
a = AAA()
i = 0
while i < x:
i += 1
a = AAA()
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
return b.num_deleted
return f
def test_finalizer_calls_malloc(self):
run = self.runner("finalizer_calls_malloc")
res = run([5, 42]) #XXX pure lazyness here too
assert res == 12
def define_finalizer_resurrects(cls):
class B(object):
pass
b = B()
b.nextid = 0
b.num_deleted = 0
class A(object):
def __init__(self):
self.id = b.nextid
b.nextid += 1
def __del__(self):
b.num_deleted += 1
b.a = self
def f(x, y):
a = A()
i = 0
while i < x:
i += 1
a = A()
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
aid = b.a.id
b.a = None
# check that __del__ is not called again
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
return b.num_deleted * 10 + aid + 100 * (b.a is None)
return f
def test_finalizer_resurrects(self):
run = self.runner("finalizer_resurrects")
res = run([5, 42]) #XXX pure lazyness here too
assert 160 <= res <= 165
def define_custom_trace(cls):
#
S = lltype.GcStruct('S', ('x', llmemory.Address))
T = lltype.GcStruct('T', ('z', lltype.Signed))
offset_of_x = llmemory.offsetof(S, 'x')
def customtrace(gc, obj, callback, arg):
gc._trace_callback(callback, arg, obj + offset_of_x)
lambda_customtrace = lambda: customtrace
#
def setup():
rgc.register_custom_trace_hook(S, lambda_customtrace)
tx = lltype.malloc(T)
tx.z = 4243
s1 = lltype.malloc(S)
s1.x = llmemory.cast_ptr_to_adr(tx)
return s1
def f():
s1 = setup()
llop.gc__collect(lltype.Void)
return llmemory.cast_adr_to_ptr(s1.x, lltype.Ptr(T)).z
return f
def test_custom_trace(self):
run = self.runner("custom_trace")
res = run([])
assert res == 4243
def define_weakref(cls):
import weakref, gc
class A(object):
pass
def g():
a = A()
return weakref.ref(a)
def f():
a = A()
ref = weakref.ref(a)
result = ref() is a
ref = g()
llop.gc__collect(lltype.Void)
result = result and (ref() is None)
# check that a further collection is fine
llop.gc__collect(lltype.Void)
result = result and (ref() is None)
return result
return f
def test_weakref(self):
run = self.runner("weakref")
res = run([])
assert res
def define_weakref_to_object_with_finalizer(cls):
import weakref, gc
class A(object):
count = 0
a = A()
class B(object):
def __del__(self):
a.count += 1
def g():
b = B()
return weakref.ref(b)
def f():
ref = g()
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
result = a.count == 1 and (ref() is None)
return result
return f
def test_weakref_to_object_with_finalizer(self):
run = self.runner("weakref_to_object_with_finalizer")
res = run([])
assert res
def define_collect_during_collect(cls):
class B(object):
pass
b = B()
b.nextid = 1
b.num_deleted = 0
b.num_deleted_c = 0
class A(object):
def __init__(self):
self.id = b.nextid
b.nextid += 1
def __del__(self):
llop.gc__collect(lltype.Void)
b.num_deleted += 1
C()
C()
class C(A):
def __del__(self):
b.num_deleted += 1
b.num_deleted_c += 1
def f(x, y):
persistent_a1 = A()
persistent_a2 = A()
i = 0
while i < x:
i += 1
a = A()
persistent_a3 = A()
persistent_a4 = A()
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
b.bla = persistent_a1.id + persistent_a2.id + persistent_a3.id + persistent_a4.id
# NB print would create a static root!
llop.debug_print(lltype.Void, b.num_deleted_c)
return b.num_deleted
return f
def test_collect_during_collect(self):
run = self.runner("collect_during_collect")
# runs collect recursively 4 times
res = run([4, 42]) #XXX pure lazyness here too
assert res == 12
def define_collect_0(cls):
def concat(j, dummy):
lst = []
for i in range(j):
lst.append(str(i))
result = len("".join(lst))
if we_are_translated():
llop.gc__collect(lltype.Void, 0)
return result
return concat
def test_collect_0(self):
run = self.runner("collect_0")
res = run([100, 0])
assert res == len(''.join([str(x) for x in range(100)]))
def define_interior_ptrs(cls):
from rpython.rtyper.lltypesystem.lltype import Struct, GcStruct, GcArray
from rpython.rtyper.lltypesystem.lltype import Array, Signed, malloc
S1 = Struct("S1", ('x', Signed))
T1 = GcStruct("T1", ('s', S1))
def f1():
t = malloc(T1)
t.s.x = 1
return t.s.x
S2 = Struct("S2", ('x', Signed))
T2 = GcArray(S2)
def f2():
t = malloc(T2, 1)
t[0].x = 1
return t[0].x
S3 = Struct("S3", ('x', Signed))
T3 = GcStruct("T3", ('items', Array(S3)))
def f3():
t = malloc(T3, 1)
t.items[0].x = 1
return t.items[0].x
S4 = Struct("S4", ('x', Signed))
T4 = Struct("T4", ('s', S4))
U4 = GcArray(T4)
def f4():
u = malloc(U4, 1)
u[0].s.x = 1
return u[0].s.x
S5 = Struct("S5", ('x', Signed))
T5 = GcStruct("T5", ('items', Array(S5)))
def f5():
t = malloc(T5, 1)
return len(t.items)
T6 = GcStruct("T6", ('s', Array(Signed)))
def f6():
t = malloc(T6, 1)
t.s[0] = 1
return t.s[0]
def func():
return (f1() * 100000 +
f2() * 10000 +
f3() * 1000 +
f4() * 100 +
f5() * 10 +
f6())
assert func() == 111111
return func
def test_interior_ptrs(self):
run = self.runner("interior_ptrs")
res = run([])
assert res == 111111
def define_id(cls):
class A(object):
pass
a1 = A()
def func():
a2 = A()
a3 = A()
id1 = compute_unique_id(a1)
id2 = compute_unique_id(a2)
id3 = compute_unique_id(a3)
llop.gc__collect(lltype.Void)
error = 0
if id1 != compute_unique_id(a1): error += 1
if id2 != compute_unique_id(a2): error += 2
if id3 != compute_unique_id(a3): error += 4
return error
return func
def test_id(self):
run = self.runner("id")
res = run([])
assert res == 0
def define_can_move(cls):
TP = lltype.GcArray(lltype.Float)
def func():
return rgc.can_move(lltype.malloc(TP, 1))
return func
def test_can_move(self):
run = self.runner("can_move")
res = run([])
assert res == self.GC_CAN_MOVE
def define_shrink_array(cls):
from rpython.rtyper.lltypesystem.rstr import STR
def f():
ptr = lltype.malloc(STR, 3)
ptr.hash = 0x62
ptr.chars[0] = '0'
ptr.chars[1] = 'B'
ptr.chars[2] = 'C'
ptr2 = rgc.ll_shrink_array(ptr, 2)
return ((ptr == ptr2) +
ord(ptr2.chars[0]) +
(ord(ptr2.chars[1]) << 8) +
(len(ptr2.chars) << 16) +
(ptr2.hash << 24))
return f
def test_shrink_array(self):
run = self.runner("shrink_array")
if self.GC_CAN_SHRINK_ARRAY:
expected = 0x62024231
else:
expected = 0x62024230
assert run([]) == expected
def define_string_builder_over_allocation(cls):
import gc
def fn():
s = StringBuilder(4)
s.append("abcd")
s.append("defg")
s.append("rty")
s.append_multiple_char('y', 1000)
gc.collect()
s.append_multiple_char('y', 1000)
res = s.build()[1000]
gc.collect()
return ord(res)
return fn
def test_string_builder_over_allocation(self):
fn = self.runner("string_builder_over_allocation")
res = fn([])
assert res == ord('y')
class GenericMovingGCTests(GenericGCTests):
GC_CAN_MOVE = True
GC_CAN_TEST_ID = False
def define_many_ids(cls):
class A(object):
pass
def f():
from rpython.rtyper.lltypesystem import rffi
alist = [A() for i in range(50)]
idarray = lltype.malloc(rffi.SIGNEDP.TO, len(alist), flavor='raw')
# Compute the id of all the elements of the list. The goal is
# to not allocate memory, so that if the GC needs memory to
# remember the ids, it will trigger some collections itself
i = 0
while i < len(alist):
idarray[i] = compute_unique_id(alist[i])
i += 1
j = 0
while j < 2:
if j == 1: # allocate some stuff between the two iterations
[A() for i in range(20)]
i = 0
while i < len(alist):
assert idarray[i] == compute_unique_id(alist[i])
i += 1
j += 1
lltype.free(idarray, flavor='raw')
return 0
return f
def test_many_ids(self):
if not self.GC_CAN_TEST_ID:
py.test.skip("fails for bad reasons in lltype.py :-(")
run = self.runner("many_ids")
run([])
@classmethod
def ensure_layoutbuilder(cls, translator):
jit2gc = getattr(translator, '_jit2gc', None)
if jit2gc:
assert 'invoke_after_minor_collection' in jit2gc
return jit2gc['layoutbuilder']
marker = cls.marker
GCClass = cls.gcpolicy.transformerclass.GCClass
layoutbuilder = framework.TransformerLayoutBuilder(translator, GCClass)
layoutbuilder.delay_encoding()
def seeme():
marker[0] += 1
translator._jit2gc = {
'layoutbuilder': layoutbuilder,
'invoke_after_minor_collection': seeme,
}
return layoutbuilder
def define_do_malloc_operations(cls):
P = lltype.GcStruct('P', ('x', lltype.Signed))
def g():
r = lltype.malloc(P)
r.x = 1
p = llop.do_malloc_fixedsize(llmemory.GCREF) # placeholder
p = lltype.cast_opaque_ptr(lltype.Ptr(P), p)
p.x = r.x
return p.x
def f():
i = 0
while i < 40:
g()
i += 1
return 0
if cls.gcname == 'incminimark':
marker = cls.marker
def cleanup():
assert marker[0] > 0
marker[0] = 0
else:
cleanup = None
def fix_graph_of_g(translator):
from rpython.translator.translator import graphof
from rpython.flowspace.model import Constant
from rpython.rtyper.lltypesystem import rffi
layoutbuilder = cls.ensure_layoutbuilder(translator)
type_id = layoutbuilder.get_type_id(P)
#
# now fix the do_malloc_fixedsize in the graph of g
graph = graphof(translator, g)
for op in graph.startblock.operations:
if op.opname == 'do_malloc_fixedsize':
op.args = [Constant(type_id, llgroup.HALFWORD),
Constant(llmemory.sizeof(P), lltype.Signed),
Constant(False, lltype.Bool), # has_finalizer
Constant(False, lltype.Bool), # is_finalizer_light
Constant(False, lltype.Bool)] # contains_weakptr
break
else:
assert 0, "oups, not found"
return f, cleanup, fix_graph_of_g
def test_do_malloc_operations(self):
run = self.runner("do_malloc_operations")
run([])
def define_do_malloc_operations_in_call(cls):
P = lltype.GcStruct('P', ('x', lltype.Signed))
def g():
llop.do_malloc_fixedsize(llmemory.GCREF) # placeholder
def f():
q = lltype.malloc(P)
q.x = 1
i = 0
while i < 40:
g()
i += q.x
return 0
def fix_graph_of_g(translator):
from rpython.translator.translator import graphof
from rpython.flowspace.model import Constant
from rpython.rtyper.lltypesystem import rffi
layoutbuilder = cls.ensure_layoutbuilder(translator)
type_id = layoutbuilder.get_type_id(P)
#
# now fix the do_malloc_fixedsize in the graph of g
graph = graphof(translator, g)
for op in graph.startblock.operations:
if op.opname == 'do_malloc_fixedsize':
op.args = [Constant(type_id, llgroup.HALFWORD),
Constant(llmemory.sizeof(P), lltype.Signed),
Constant(False, lltype.Bool), # has_finalizer
Constant(False, lltype.Bool), # is_finalizer_light
Constant(False, lltype.Bool)] # contains_weakptr
break
else:
assert 0, "oups, not found"
return f, None, fix_graph_of_g
def test_do_malloc_operations_in_call(self):
run = self.runner("do_malloc_operations_in_call")
run([])
def define_gc_heap_stats(cls):
S = lltype.GcStruct('S', ('x', lltype.Signed))
l1 = []
l2 = []
l3 = []
l4 = []
def f():
for i in range(10):
s = lltype.malloc(S)
l1.append(s)
l2.append(s)
if i < 3:
l3.append(s)
l4.append(s)
# We cheat here and only read the table which we later on
# process ourselves, otherwise this test takes ages
llop.gc__collect(lltype.Void)
tb = rgc._heap_stats()
a = 0
nr = 0
b = 0
c = 0
d = 0
e = 0
for i in range(len(tb)):
if tb[i].count == 10:
a += 1
nr = i
if tb[i].count > 50:
d += 1
for i in range(len(tb)):
if tb[i].count == 4:
b += 1
c += tb[i].links[nr]
e += tb[i].size
return d * 1000 + c * 100 + b * 10 + a
return f
def test_gc_heap_stats(self):
py.test.skip("this test makes the following test crash. Investigate.")
run = self.runner("gc_heap_stats")
res = run([])
assert res % 10000 == 2611
totsize = (res / 10000)
size_of_int = rffi.sizeof(lltype.Signed)
assert (totsize - 26 * size_of_int) % 4 == 0
# ^^^ a crude assumption that totsize - varsize would be dividable by 4
# (and give fixedsize)
def define_writebarrier_before_copy(cls):
S = lltype.GcStruct('S', ('x', lltype.Char))
TP = lltype.GcArray(lltype.Ptr(S))
def fn():
l = lltype.malloc(TP, 100)
l2 = lltype.malloc(TP, 100)
for i in range(100):
l[i] = lltype.malloc(S)
rgc.ll_arraycopy(l, l2, 50, 0, 50)
# force nursery collect
x = []
for i in range(20):
x.append((1, lltype.malloc(S)))
for i in range(50):
assert l2[i] == l[50 + i]
return 0
return fn
def test_writebarrier_before_copy(self):
run = self.runner("writebarrier_before_copy")
run([])
# ________________________________________________________________
class TestSemiSpaceGC(GenericMovingGCTests):
gcname = "semispace"
GC_CAN_SHRINK_ARRAY = True
class gcpolicy(gc.BasicFrameworkGcPolicy):
class transformerclass(shadowstack.ShadowStackFrameworkGCTransformer):
from rpython.memory.gc.semispace import SemiSpaceGC as GCClass
GC_PARAMS = {'space_size': 512*WORD,
'translated_to_c': False}
root_stack_depth = 200
class TestGenerationGC(GenericMovingGCTests):
gcname = "generation"
GC_CAN_SHRINK_ARRAY = True
class gcpolicy(gc.BasicFrameworkGcPolicy):
class transformerclass(shadowstack.ShadowStackFrameworkGCTransformer):
from rpython.memory.gc.generation import GenerationGC as \
GCClass
GC_PARAMS = {'space_size': 512*WORD,
'nursery_size': 32*WORD,
'translated_to_c': False}
root_stack_depth = 200
def define_weakref_across_minor_collection(cls):
import weakref
class A:
pass
def f():
x = 20 # for GenerationGC, enough for a minor collection
a = A()
a.foo = x
ref = weakref.ref(a)
all = [None] * x
i = 0
while i < x:
all[i] = [i] * i
i += 1
assert ref() is a
llop.gc__collect(lltype.Void)
assert ref() is a
return a.foo + len(all)
return f
def test_weakref_across_minor_collection(self):
run = self.runner("weakref_across_minor_collection")
res = run([])
assert res == 20 + 20
def define_nongc_static_root_minor_collect(cls):
T1 = lltype.GcStruct("C", ('x', lltype.Signed))
T2 = lltype.Struct("C", ('p', lltype.Ptr(T1)))
static = lltype.malloc(T2, immortal=True)
def f():
t1 = lltype.malloc(T1)
t1.x = 42
static.p = t1
x = 20
all = [None] * x
i = 0
while i < x: # enough to cause a minor collect
all[i] = [i] * i
i += 1
i = static.p.x
llop.gc__collect(lltype.Void)
return static.p.x + i
def cleanup():
static.p = lltype.nullptr(T1)
return f, cleanup, None
def test_nongc_static_root_minor_collect(self):
run = self.runner("nongc_static_root_minor_collect")
res = run([])
assert res == 84
def define_static_root_minor_collect(cls):
class A:
pass
class B:
pass
static = A()
static.p = None
def f():
t1 = B()
t1.x = 42
static.p = t1
x = 20
all = [None] * x
i = 0
while i < x: # enough to cause a minor collect
all[i] = [i] * i
i += 1
i = static.p.x
llop.gc__collect(lltype.Void)
return static.p.x + i
def cleanup():
static.p = None
return f, cleanup, None
def test_static_root_minor_collect(self):
run = self.runner("static_root_minor_collect")
res = run([])
assert res == 84
def define_many_weakrefs(cls):
# test for the case where allocating the weakref itself triggers
# a collection
import weakref
class A:
pass
def f():
a = A()
i = 0
while i < 17:
ref = weakref.ref(a)
assert ref() is a
i += 1
return 0
return f
def test_many_weakrefs(self):
run = self.runner("many_weakrefs")
run([])
def define_immutable_to_old_promotion(cls):
T_CHILD = lltype.Ptr(lltype.GcStruct('Child', ('field', lltype.Signed)))
T_PARENT = lltype.Ptr(lltype.GcStruct('Parent', ('sub', T_CHILD)))
child = lltype.malloc(T_CHILD.TO)
child2 = lltype.malloc(T_CHILD.TO)
parent = lltype.malloc(T_PARENT.TO)
parent2 = lltype.malloc(T_PARENT.TO)
parent.sub = child
child.field = 3
parent2.sub = child2
child2.field = 8
T_ALL = lltype.Ptr(lltype.GcArray(T_PARENT))
all = lltype.malloc(T_ALL.TO, 2)
all[0] = parent
all[1] = parent2
def f(x, y):
res = all[x]
#all[x] = lltype.nullptr(T_PARENT.TO)
return res.sub.field
return f
def test_immutable_to_old_promotion(self):
run, transformer = self.runner("immutable_to_old_promotion", transformer=True)
run([1, 4])
if not transformer.GCClass.prebuilt_gc_objects_are_static_roots:
assert len(transformer.layoutbuilder.addresses_of_static_ptrs) == 0
else:
assert len(transformer.layoutbuilder.addresses_of_static_ptrs) >= 4
# NB. Remember that the number above does not count
# the number of prebuilt GC objects, but the number of locations
# within prebuilt GC objects that are of type Ptr(Gc).
# At the moment we get additional_roots_sources == 6:
# * all[0]
# * all[1]
# * parent.sub
# * parent2.sub
# * the GcArray pointer from gc.wr_to_objects_with_id
# * the GcArray pointer from gc.object_id_dict.
def define_adr_of_nursery(cls):
class A(object):
pass
def f():
# we need at least 1 obj to allocate a nursery
a = A()
nf_a = llop.gc_adr_of_nursery_free(llmemory.Address)
nt_a = llop.gc_adr_of_nursery_top(llmemory.Address)
nf0 = nf_a.address[0]
nt0 = nt_a.address[0]
a0 = A()
a1 = A()
nf1 = nf_a.address[0]
nt1 = nt_a.address[0]
assert nf1 > nf0
assert nt1 > nf1
assert nt1 == nt0
return 0
return f
def test_adr_of_nursery(self):
run = self.runner("adr_of_nursery")
res = run([])
class TestGenerationalNoFullCollectGC(GCTest):
# test that nursery is doing its job and that no full collection
# is needed when most allocated objects die quickly
gcname = "generation"
class gcpolicy(gc.BasicFrameworkGcPolicy):
class transformerclass(shadowstack.ShadowStackFrameworkGCTransformer):
from rpython.memory.gc.generation import GenerationGC
class GCClass(GenerationGC):
__ready = False
def setup(self):
from rpython.memory.gc.generation import GenerationGC
GenerationGC.setup(self)
self.__ready = True
def semispace_collect(self, size_changing=False):
ll_assert(not self.__ready,
"no full collect should occur in this test")
def _teardown(self):
self.__ready = False # collecting here is expected
GenerationGC._teardown(self)
GC_PARAMS = {'space_size': 512*WORD,
'nursery_size': 128*WORD,
'translated_to_c': False}
root_stack_depth = 200
def define_working_nursery(cls):
def f():
total = 0
i = 0
while i < 40:
lst = []
j = 0
while j < 5:
lst.append(i*j)
j += 1
total += len(lst)
i += 1
return total
return f
def test_working_nursery(self):
run = self.runner("working_nursery")
res = run([])
assert res == 40 * 5
class TestHybridGC(TestGenerationGC):
gcname = "hybrid"
class gcpolicy(gc.BasicFrameworkGcPolicy):
class transformerclass(shadowstack.ShadowStackFrameworkGCTransformer):
from rpython.memory.gc.hybrid import HybridGC as GCClass
GC_PARAMS = {'space_size': 512*WORD,
'nursery_size': 32*WORD,
'large_object': 8*WORD,
'translated_to_c': False}
root_stack_depth = 200
def define_ref_from_rawmalloced_to_regular(cls):
import gc
S = lltype.GcStruct('S', ('x', lltype.Signed))
A = lltype.GcStruct('A', ('p', lltype.Ptr(S)),
('a', lltype.Array(lltype.Char)))
def setup(j):
p = lltype.malloc(S)
p.x = j*2
lst = lltype.malloc(A, j)
# the following line generates a write_barrier call at the moment,
# which is important because the 'lst' can be allocated directly
# in generation 2. This can only occur with varsized mallocs.
lst.p = p
return lst
def f(i, j):
lst = setup(j)
gc.collect()
return lst.p.x
return f
def test_ref_from_rawmalloced_to_regular(self):
run = self.runner("ref_from_rawmalloced_to_regular")
res = run([100, 100])
assert res == 200
def define_write_barrier_direct(cls):
from rpython.rlib import rgc
S = lltype.GcForwardReference()
S.become(lltype.GcStruct('S',
('x', lltype.Signed),
('prev', lltype.Ptr(S)),
('next', lltype.Ptr(S))))
s0 = lltype.malloc(S, immortal=True)
def f():
s = lltype.malloc(S)
s.x = 42
llop.bare_setfield(lltype.Void, s0, void('next'), s)
llop.gc_writebarrier(lltype.Void, llmemory.cast_ptr_to_adr(s0))
rgc.collect(0)
return s0.next.x
def cleanup():
s0.next = lltype.nullptr(S)
return f, cleanup, None
def test_write_barrier_direct(self):
run = self.runner("write_barrier_direct")
res = run([])
assert res == 42
class TestMiniMarkGC(TestHybridGC):
gcname = "minimark"
GC_CAN_TEST_ID = True
class gcpolicy(gc.BasicFrameworkGcPolicy):
class transformerclass(shadowstack.ShadowStackFrameworkGCTransformer):
from rpython.memory.gc.minimark import MiniMarkGC as GCClass
GC_PARAMS = {'nursery_size': 32*WORD,
'page_size': 16*WORD,
'arena_size': 64*WORD,
'small_request_threshold': 5*WORD,
'large_object': 8*WORD,
'card_page_indices': 4,
'translated_to_c': False,
}
root_stack_depth = 200
def define_no_clean_setarrayitems(cls):
# The optimization find_clean_setarrayitems() in
# gctransformer/framework.py does not work with card marking.
# Check that it is turned off.
S = lltype.GcStruct('S', ('x', lltype.Signed))
A = lltype.GcArray(lltype.Ptr(S))
def sub(lst):
lst[15] = lltype.malloc(S) # 'lst' is set the single mark "12-15"
lst[15].x = 123
lst[0] = lst[15] # that would be a "clean_setarrayitem"
def f():
lst = lltype.malloc(A, 16) # 16 > 10
rgc.collect()
sub(lst)
null = lltype.nullptr(S)
lst[15] = null # clear, so that A() is only visible via lst[0]
rgc.collect() # -> crash
return lst[0].x
return f
def test_no_clean_setarrayitems(self):
run = self.runner("no_clean_setarrayitems")
res = run([])
assert res == 123
def define_nursery_hash_base(cls):
class A:
pass
def fn():
objects = []
hashes = []
for i in range(200):
rgc.collect(0) # nursery-only collection, if possible
obj = A()
objects.append(obj)
hashes.append(compute_identity_hash(obj))
unique = {}
for i in range(len(objects)):
assert compute_identity_hash(objects[i]) == hashes[i]
unique[hashes[i]] = None
return len(unique)
return fn
def test_nursery_hash_base(self):
res = self.runner('nursery_hash_base')
assert res([]) >= 195
def define_instantiate_nonmovable(cls):
from rpython.rlib import objectmodel
from rpython.rtyper import annlowlevel
class A:
pass
def fn():
a1 = A()
a = objectmodel.instantiate(A, nonmovable=True)
a.next = a1 # 'a' is known young here, so no write barrier emitted
res = rgc.can_move(annlowlevel.cast_instance_to_base_ptr(a))
rgc.collect()
objectmodel.keepalive_until_here(a)
return res
return fn
def test_instantiate_nonmovable(self):
res = self.runner('instantiate_nonmovable')
assert res([]) == 0
class TestIncrementalMiniMarkGC(TestMiniMarkGC):
gcname = "incminimark"
class gcpolicy(gc.BasicFrameworkGcPolicy):
class transformerclass(shadowstack.ShadowStackFrameworkGCTransformer):
from rpython.memory.gc.incminimark import IncrementalMiniMarkGC \
as GCClass
GC_PARAMS = {'nursery_size': 32*WORD,
'page_size': 16*WORD,
'arena_size': 64*WORD,
'small_request_threshold': 5*WORD,
'large_object': 8*WORD,
'card_page_indices': 4,
'translated_to_c': False,
}
root_stack_depth = 200
def define_malloc_array_of_gcptr(self):
S = lltype.GcStruct('S', ('x', lltype.Signed))
A = lltype.GcArray(lltype.Ptr(S))
def f():
lst = lltype.malloc(A, 5)
return (lst[0] == lltype.nullptr(S)
and lst[1] == lltype.nullptr(S)
and lst[2] == lltype.nullptr(S)
and lst[3] == lltype.nullptr(S)
and lst[4] == lltype.nullptr(S))
return f
def test_malloc_array_of_gcptr(self):
run = self.runner('malloc_array_of_gcptr')
res = run([])
assert res
def define_malloc_struct_of_gcptr(cls):
S1 = lltype.GcStruct('S', ('x', lltype.Signed))
S = lltype.GcStruct('S',
('x', lltype.Signed),
('filed1', lltype.Ptr(S1)),
('filed2', lltype.Ptr(S1)))
s0 = lltype.malloc(S)
def f():
return (s0.filed1 == lltype.nullptr(S1) and s0.filed2 == lltype.nullptr(S1))
return f
def test_malloc_struct_of_gcptr(self):
run = self.runner("malloc_struct_of_gcptr")
res = run([])
assert res
# ________________________________________________________________
# tagged pointers
class TaggedPointerGCTests(GCTest):
taggedpointers = True
def define_tagged_simple(cls):
class Unrelated(object):
pass
u = Unrelated()
u.x = UnboxedObject(47)
def fn(n):
rgc.collect() # check that a prebuilt tagged pointer doesn't explode
if n > 0:
x = BoxedObject(n)
else:
x = UnboxedObject(n)
u.x = x # invoke write barrier
rgc.collect()
return x.meth(100)
def func():
return fn(1000) + fn(-1000)
assert func() == 205
return func
def test_tagged_simple(self):
func = self.runner("tagged_simple")
res = func([])
assert res == 205
def define_tagged_prebuilt(cls):
class F:
pass
f = F()
f.l = [UnboxedObject(10)]
def fn(n):
if n > 0:
x = BoxedObject(n)
else:
x = UnboxedObject(n)
f.l.append(x)
rgc.collect()
return f.l[-1].meth(100)
def func():
return fn(1000) ^ fn(-1000)
assert func() == -1999
return func
def test_tagged_prebuilt(self):
func = self.runner("tagged_prebuilt")
res = func([])
assert res == -1999
def define_gettypeid(cls):
class A(object):
pass
def fn():
a = A()
return rgc.get_typeid(a)
return fn
def test_gettypeid(self):
func = self.runner("gettypeid")
res = func([])
print res
from rpython.rlib.objectmodel import UnboxedValue
class TaggedBase(object):
__slots__ = ()
def meth(self, x):
raise NotImplementedError
class BoxedObject(TaggedBase):
attrvalue = 66
def __init__(self, normalint):
self.normalint = normalint
def meth(self, x):
return self.normalint + x + 2
class UnboxedObject(TaggedBase, UnboxedValue):
__slots__ = 'smallint'
def meth(self, x):
return self.smallint + x + 3
class TestHybridTaggedPointerGC(TaggedPointerGCTests):
gcname = "hybrid"
class gcpolicy(gc.BasicFrameworkGcPolicy):
class transformerclass(shadowstack.ShadowStackFrameworkGCTransformer):
from rpython.memory.gc.generation import GenerationGC as \
GCClass
GC_PARAMS = {'space_size': 512*WORD,
'nursery_size': 32*WORD,
'translated_to_c': False}
root_stack_depth = 200
def test_gettypeid(self):
py.test.skip("fails for obscure reasons")
```
#### File: rpython/rlib/rcomplex.py
```python
import math
from math import fabs
from rpython.rlib.rfloat import copysign, asinh, log1p, isfinite, isinf, isnan
from rpython.rlib.constant import DBL_MIN, CM_SCALE_UP, CM_SCALE_DOWN
from rpython.rlib.constant import CM_LARGE_DOUBLE, DBL_MANT_DIG
from rpython.rlib.constant import M_LN2, M_LN10
from rpython.rlib.constant import CM_SQRT_LARGE_DOUBLE, CM_SQRT_DBL_MIN
from rpython.rlib.constant import CM_LOG_LARGE_DOUBLE
from rpython.rlib.special_value import special_type, INF, NAN
from rpython.rlib.special_value import sqrt_special_values
from rpython.rlib.special_value import acos_special_values
from rpython.rlib.special_value import acosh_special_values
from rpython.rlib.special_value import asinh_special_values
from rpython.rlib.special_value import atanh_special_values
from rpython.rlib.special_value import log_special_values
from rpython.rlib.special_value import exp_special_values
from rpython.rlib.special_value import cosh_special_values
from rpython.rlib.special_value import sinh_special_values
from rpython.rlib.special_value import tanh_special_values
from rpython.rlib.special_value import rect_special_values
#binary
def c_add(x, y):
(r1, i1), (r2, i2) = x, y
r = r1 + r2
i = i1 + i2
return (r, i)
def c_sub(x, y):
(r1, i1), (r2, i2) = x, y
r = r1 - r2
i = i1 - i2
return (r, i)
def c_mul(x, y):
(r1, i1), (r2, i2) = x, y
r = r1 * r2 - i1 * i2
i = r1 * i2 + i1 * r2
return (r, i)
def c_div(x, y): #x/y
(r1, i1), (r2, i2) = x, y
if r2 < 0:
abs_r2 = -r2
else:
abs_r2 = r2
if i2 < 0:
abs_i2 = -i2
else:
abs_i2 = i2
if abs_r2 >= abs_i2:
if abs_r2 == 0.0:
raise ZeroDivisionError
else:
ratio = i2 / r2
denom = r2 + i2 * ratio
rr = (r1 + i1 * ratio) / denom
ir = (i1 - r1 * ratio) / denom
elif isnan(r2):
rr = NAN
ir = NAN
else:
ratio = r2 / i2
denom = r2 * ratio + i2
assert i2 != 0.0
rr = (r1 * ratio + i1) / denom
ir = (i1 * ratio - r1) / denom
return (rr, ir)
def c_pow(x, y):
(r1, i1), (r2, i2) = x, y
if i1 == 0 and i2 == 0 and r1 > 0:
rr = math.pow(r1, r2)
ir = 0.
elif r2 == 0.0 and i2 == 0.0:
rr, ir = 1, 0
elif r1 == 1.0 and i1 == 0.0:
rr, ir = (1.0, 0.0)
elif r1 == 0.0 and i1 == 0.0:
if i2 != 0.0 or r2 < 0.0:
raise ZeroDivisionError
rr, ir = (0.0, 0.0)
else:
vabs = math.hypot(r1,i1)
len = math.pow(vabs,r2)
at = math.atan2(i1,r1)
phase = at * r2
if i2 != 0.0:
len /= math.exp(at * i2)
phase += i2 * math.log(vabs)
try:
rr = len * math.cos(phase)
ir = len * math.sin(phase)
except ValueError:
rr = NAN
ir = NAN
return (rr, ir)
#unary
def c_neg(r, i):
return (-r, -i)
def c_sqrt(x, y):
'''
Method: use symmetries to reduce to the case when x = z.real and y
= z.imag are nonnegative. Then the real part of the result is
given by
s = sqrt((x + hypot(x, y))/2)
and the imaginary part is
d = (y/2)/s
If either x or y is very large then there's a risk of overflow in
computation of the expression x + hypot(x, y). We can avoid this
by rewriting the formula for s as:
s = 2*sqrt(x/8 + hypot(x/8, y/8))
This costs us two extra multiplications/divisions, but avoids the
overhead of checking for x and y large.
If both x and y are subnormal then hypot(x, y) may also be
subnormal, so will lack full precision. We solve this by rescaling
x and y by a sufficiently large power of 2 to ensure that x and y
are normal.
'''
if not isfinite(x) or not isfinite(y):
return sqrt_special_values[special_type(x)][special_type(y)]
if x == 0. and y == 0.:
return (0., y)
ax = fabs(x)
ay = fabs(y)
if ax < DBL_MIN and ay < DBL_MIN and (ax > 0. or ay > 0.):
# here we catch cases where hypot(ax, ay) is subnormal
ax = math.ldexp(ax, CM_SCALE_UP)
ay1= math.ldexp(ay, CM_SCALE_UP)
s = math.ldexp(math.sqrt(ax + math.hypot(ax, ay1)),
CM_SCALE_DOWN)
else:
ax /= 8.
s = 2.*math.sqrt(ax + math.hypot(ax, ay/8.))
d = ay/(2.*s)
if x >= 0.:
return (s, copysign(d, y))
else:
return (d, copysign(s, y))
def c_acos(x, y):
if not isfinite(x) or not isfinite(y):
return acos_special_values[special_type(x)][special_type(y)]
if fabs(x) > CM_LARGE_DOUBLE or fabs(y) > CM_LARGE_DOUBLE:
# avoid unnecessary overflow for large arguments
real = math.atan2(fabs(y), x)
# split into cases to make sure that the branch cut has the
# correct continuity on systems with unsigned zeros
if x < 0.:
imag = -copysign(math.log(math.hypot(x/2., y/2.)) +
M_LN2*2., y)
else:
imag = copysign(math.log(math.hypot(x/2., y/2.)) +
M_LN2*2., -y)
else:
s1x, s1y = c_sqrt(1.-x, -y)
s2x, s2y = c_sqrt(1.+x, y)
real = 2.*math.atan2(s1x, s2x)
imag = asinh(s2x*s1y - s2y*s1x)
return (real, imag)
def c_acosh(x, y):
# XXX the following two lines seem unnecessary at least on Linux;
# the tests pass fine without them
if not isfinite(x) or not isfinite(y):
return acosh_special_values[special_type(x)][special_type(y)]
if fabs(x) > CM_LARGE_DOUBLE or fabs(y) > CM_LARGE_DOUBLE:
# avoid unnecessary overflow for large arguments
real = math.log(math.hypot(x/2., y/2.)) + M_LN2*2.
imag = math.atan2(y, x)
else:
s1x, s1y = c_sqrt(x - 1., y)
s2x, s2y = c_sqrt(x + 1., y)
real = asinh(s1x*s2x + s1y*s2y)
imag = 2.*math.atan2(s1y, s2x)
return (real, imag)
def c_asin(x, y):
# asin(z) = -i asinh(iz)
sx, sy = c_asinh(-y, x)
return (sy, -sx)
def c_asinh(x, y):
if not isfinite(x) or not isfinite(y):
return asinh_special_values[special_type(x)][special_type(y)]
if fabs(x) > CM_LARGE_DOUBLE or fabs(y) > CM_LARGE_DOUBLE:
if y >= 0.:
real = copysign(math.log(math.hypot(x/2., y/2.)) +
M_LN2*2., x)
else:
real = -copysign(math.log(math.hypot(x/2., y/2.)) +
M_LN2*2., -x)
imag = math.atan2(y, fabs(x))
else:
s1x, s1y = c_sqrt(1.+y, -x)
s2x, s2y = c_sqrt(1.-y, x)
real = asinh(s1x*s2y - s2x*s1y)
imag = math.atan2(y, s1x*s2x - s1y*s2y)
return (real, imag)
def c_atan(x, y):
# atan(z) = -i atanh(iz)
sx, sy = c_atanh(-y, x)
return (sy, -sx)
def c_atanh(x, y):
if not isfinite(x) or not isfinite(y):
return atanh_special_values[special_type(x)][special_type(y)]
# Reduce to case where x >= 0., using atanh(z) = -atanh(-z).
if x < 0.:
return c_neg(*c_atanh(*c_neg(x, y)))
ay = fabs(y)
if x > CM_SQRT_LARGE_DOUBLE or ay > CM_SQRT_LARGE_DOUBLE:
# if abs(z) is large then we use the approximation
# atanh(z) ~ 1/z +/- i*pi/2 (+/- depending on the sign
# of y
h = math.hypot(x/2., y/2.) # safe from overflow
real = x/4./h/h
# the two negations in the next line cancel each other out
# except when working with unsigned zeros: they're there to
# ensure that the branch cut has the correct continuity on
# systems that don't support signed zeros
imag = -copysign(math.pi/2., -y)
elif x == 1. and ay < CM_SQRT_DBL_MIN:
# C99 standard says: atanh(1+/-0.) should be inf +/- 0i
if ay == 0.:
raise ValueError("math domain error")
#real = INF
#imag = y
else:
real = -math.log(math.sqrt(ay)/math.sqrt(math.hypot(ay, 2.)))
imag = copysign(math.atan2(2., -ay) / 2, y)
else:
real = log1p(4.*x/((1-x)*(1-x) + ay*ay))/4.
imag = -math.atan2(-2.*y, (1-x)*(1+x) - ay*ay) / 2.
return (real, imag)
def c_log(x, y):
# The usual formula for the real part is log(hypot(z.real, z.imag)).
# There are four situations where this formula is potentially
# problematic:
#
# (1) the absolute value of z is subnormal. Then hypot is subnormal,
# so has fewer than the usual number of bits of accuracy, hence may
# have large relative error. This then gives a large absolute error
# in the log. This can be solved by rescaling z by a suitable power
# of 2.
#
# (2) the absolute value of z is greater than DBL_MAX (e.g. when both
# z.real and z.imag are within a factor of 1/sqrt(2) of DBL_MAX)
# Again, rescaling solves this.
#
# (3) the absolute value of z is close to 1. In this case it's
# difficult to achieve good accuracy, at least in part because a
# change of 1ulp in the real or imaginary part of z can result in a
# change of billions of ulps in the correctly rounded answer.
#
# (4) z = 0. The simplest thing to do here is to call the
# floating-point log with an argument of 0, and let its behaviour
# (returning -infinity, signaling a floating-point exception, setting
# errno, or whatever) determine that of c_log. So the usual formula
# is fine here.
# XXX the following two lines seem unnecessary at least on Linux;
# the tests pass fine without them
if not isfinite(x) or not isfinite(y):
return log_special_values[special_type(x)][special_type(y)]
ax = fabs(x)
ay = fabs(y)
if ax > CM_LARGE_DOUBLE or ay > CM_LARGE_DOUBLE:
real = math.log(math.hypot(ax/2., ay/2.)) + M_LN2
elif ax < DBL_MIN and ay < DBL_MIN:
if ax > 0. or ay > 0.:
# catch cases where hypot(ax, ay) is subnormal
real = math.log(math.hypot(math.ldexp(ax, DBL_MANT_DIG),
math.ldexp(ay, DBL_MANT_DIG)))
real -= DBL_MANT_DIG*M_LN2
else:
# log(+/-0. +/- 0i)
raise ValueError("math domain error")
#real = -INF
#imag = atan2(y, x)
else:
h = math.hypot(ax, ay)
if 0.71 <= h and h <= 1.73:
am = max(ax, ay)
an = min(ax, ay)
real = log1p((am-1)*(am+1) + an*an) / 2.
else:
real = math.log(h)
imag = math.atan2(y, x)
return (real, imag)
def c_log10(x, y):
rx, ry = c_log(x, y)
return (rx / M_LN10, ry / M_LN10)
def c_exp(x, y):
if not isfinite(x) or not isfinite(y):
if isinf(x) and isfinite(y) and y != 0.:
if x > 0:
real = copysign(INF, math.cos(y))
imag = copysign(INF, math.sin(y))
else:
real = copysign(0., math.cos(y))
imag = copysign(0., math.sin(y))
r = (real, imag)
else:
r = exp_special_values[special_type(x)][special_type(y)]
# need to raise ValueError if y is +/- infinity and x is not
# a NaN and not -infinity
if isinf(y) and (isfinite(x) or (isinf(x) and x > 0)):
raise ValueError("math domain error")
return r
if x > CM_LOG_LARGE_DOUBLE:
l = math.exp(x-1.)
real = l * math.cos(y) * math.e
imag = l * math.sin(y) * math.e
else:
l = math.exp(x)
real = l * math.cos(y)
imag = l * math.sin(y)
if isinf(real) or isinf(imag):
raise OverflowError("math range error")
return real, imag
def c_cosh(x, y):
if not isfinite(x) or not isfinite(y):
if isinf(x) and isfinite(y) and y != 0.:
if x > 0:
real = copysign(INF, math.cos(y))
imag = copysign(INF, math.sin(y))
else:
real = copysign(INF, math.cos(y))
imag = -copysign(INF, math.sin(y))
r = (real, imag)
else:
r = cosh_special_values[special_type(x)][special_type(y)]
# need to raise ValueError if y is +/- infinity and x is not
# a NaN
if isinf(y) and not isnan(x):
raise ValueError("math domain error")
return r
if fabs(x) > CM_LOG_LARGE_DOUBLE:
# deal correctly with cases where cosh(x) overflows but
# cosh(z) does not.
x_minus_one = x - copysign(1., x)
real = math.cos(y) * math.cosh(x_minus_one) * math.e
imag = math.sin(y) * math.sinh(x_minus_one) * math.e
else:
real = math.cos(y) * math.cosh(x)
imag = math.sin(y) * math.sinh(x)
if isinf(real) or isinf(imag):
raise OverflowError("math range error")
return real, imag
def c_sinh(x, y):
# special treatment for sinh(+/-inf + iy) if y is finite and nonzero
if not isfinite(x) or not isfinite(y):
if isinf(x) and isfinite(y) and y != 0.:
if x > 0:
real = copysign(INF, math.cos(y))
imag = copysign(INF, math.sin(y))
else:
real = -copysign(INF, math.cos(y))
imag = copysign(INF, math.sin(y))
r = (real, imag)
else:
r = sinh_special_values[special_type(x)][special_type(y)]
# need to raise ValueError if y is +/- infinity and x is not
# a NaN
if isinf(y) and not isnan(x):
raise ValueError("math domain error")
return r
if fabs(x) > CM_LOG_LARGE_DOUBLE:
x_minus_one = x - copysign(1., x)
real = math.cos(y) * math.sinh(x_minus_one) * math.e
imag = math.sin(y) * math.cosh(x_minus_one) * math.e
else:
real = math.cos(y) * math.sinh(x)
imag = math.sin(y) * math.cosh(x)
if isinf(real) or isinf(imag):
raise OverflowError("math range error")
return real, imag
def c_tanh(x, y):
# Formula:
#
# tanh(x+iy) = (tanh(x)(1+tan(y)^2) + i tan(y)(1-tanh(x))^2) /
# (1+tan(y)^2 tanh(x)^2)
#
# To avoid excessive roundoff error, 1-tanh(x)^2 is better computed
# as 1/cosh(x)^2. When abs(x) is large, we approximate 1-tanh(x)^2
# by 4 exp(-2*x) instead, to avoid possible overflow in the
# computation of cosh(x).
if not isfinite(x) or not isfinite(y):
if isinf(x) and isfinite(y) and y != 0.:
if x > 0:
real = 1.0 # vv XXX why is the 2. there?
imag = copysign(0., 2. * math.sin(y) * math.cos(y))
else:
real = -1.0
imag = copysign(0., 2. * math.sin(y) * math.cos(y))
r = (real, imag)
else:
r = tanh_special_values[special_type(x)][special_type(y)]
# need to raise ValueError if y is +/-infinity and x is finite
if isinf(y) and isfinite(x):
raise ValueError("math domain error")
return r
if fabs(x) > CM_LOG_LARGE_DOUBLE:
real = copysign(1., x)
imag = 4. * math.sin(y) * math.cos(y) * math.exp(-2.*fabs(x))
else:
tx = math.tanh(x)
ty = math.tan(y)
cx = 1. / math.cosh(x)
txty = tx * ty
denom = 1. + txty * txty
real = tx * (1. + ty*ty) / denom
imag = ((ty / denom) * cx) * cx
return real, imag
def c_cos(r, i):
# cos(z) = cosh(iz)
return c_cosh(-i, r)
def c_sin(r, i):
# sin(z) = -i sinh(iz)
sr, si = c_sinh(-i, r)
return si, -sr
def c_tan(r, i):
# tan(z) = -i tanh(iz)
sr, si = c_tanh(-i, r)
return si, -sr
def c_rect(r, phi):
if not isfinite(r) or not isfinite(phi):
# if r is +/-infinity and phi is finite but nonzero then
# result is (+-INF +-INF i), but we need to compute cos(phi)
# and sin(phi) to figure out the signs.
if isinf(r) and isfinite(phi) and phi != 0.:
if r > 0:
real = copysign(INF, math.cos(phi))
imag = copysign(INF, math.sin(phi))
else:
real = -copysign(INF, math.cos(phi))
imag = -copysign(INF, math.sin(phi))
z = (real, imag)
else:
z = rect_special_values[special_type(r)][special_type(phi)]
# need to raise ValueError if r is a nonzero number and phi
# is infinite
if r != 0. and not isnan(r) and isinf(phi):
raise ValueError("math domain error")
return z
real = r * math.cos(phi)
imag = r * math.sin(phi)
return real, imag
def c_phase(x, y):
# Windows screws up atan2 for inf and nan, and alpha Tru64 5.1 doesn't
# follow C99 for atan2(0., 0.).
if isnan(x) or isnan(y):
return NAN
if isinf(y):
if isinf(x):
if copysign(1., x) == 1.:
# atan2(+-inf, +inf) == +-pi/4
return copysign(0.25 * math.pi, y)
else:
# atan2(+-inf, -inf) == +-pi*3/4
return copysign(0.75 * math.pi, y)
# atan2(+-inf, x) == +-pi/2 for finite x
return copysign(0.5 * math.pi, y)
if isinf(x) or y == 0.:
if copysign(1., x) == 1.:
# atan2(+-y, +inf) = atan2(+-0, +x) = +-0.
return copysign(0., y)
else:
# atan2(+-y, -inf) = atan2(+-0., -x) = +-pi.
return copysign(math.pi, y)
return math.atan2(y, x)
def c_abs(r, i):
if not isfinite(r) or not isfinite(i):
# C99 rules: if either the real or the imaginary part is an
# infinity, return infinity, even if the other part is a NaN.
if isinf(r):
return INF
if isinf(i):
return INF
# either the real or imaginary part is a NaN,
# and neither is infinite. Result should be NaN.
return NAN
result = math.hypot(r, i)
if not isfinite(result):
raise OverflowError("math range error")
return result
def c_polar(r, i):
real = c_abs(r, i)
phi = c_phase(r, i)
return real, phi
def c_isinf(r, i):
return isinf(r) or isinf(i)
def c_isnan(r, i):
return isnan(r) or isnan(i)
def c_isfinite(r, i):
return isfinite(r) and isfinite(i)
```
#### File: rpython/rlib/rposix.py
```python
import os
from rpython.rtyper.lltypesystem.rffi import CConstant, CExternVariable, INT
from rpython.rtyper.lltypesystem import ll2ctypes, rffi
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rlib.rarithmetic import intmask
from rpython.rlib.objectmodel import specialize
from rpython.rlib import jit
from rpython.translator.platform import platform
WIN32 = os.name == "nt"
class CConstantErrno(CConstant):
# these accessors are used when calling get_errno() or set_errno()
# on top of CPython
def __getitem__(self, index):
assert index == 0
try:
return ll2ctypes.TLS.errno
except AttributeError:
raise ValueError("no C function call occurred so far, "
"errno is undefined")
def __setitem__(self, index, value):
assert index == 0
ll2ctypes.TLS.errno = value
if os.name == 'nt':
if platform.name == 'msvc':
includes=['errno.h','stdio.h']
else:
includes=['errno.h','stdio.h', 'stdint.h']
separate_module_sources =['''
/* Lifted completely from CPython 3.3 Modules/posix_module.c */
#include <malloc.h> /* for _msize */
typedef struct {
intptr_t osfhnd;
char osfile;
} my_ioinfo;
extern __declspec(dllimport) char * __pioinfo[];
#define IOINFO_L2E 5
#define IOINFO_ARRAY_ELTS (1 << IOINFO_L2E)
#define IOINFO_ARRAYS 64
#define _NHANDLE_ (IOINFO_ARRAYS * IOINFO_ARRAY_ELTS)
#define FOPEN 0x01
#define _NO_CONSOLE_FILENO (intptr_t)-2
/* This function emulates what the windows CRT
does to validate file handles */
RPY_EXTERN int
_PyVerify_fd(int fd)
{
const int i1 = fd >> IOINFO_L2E;
const int i2 = fd & ((1 << IOINFO_L2E) - 1);
static size_t sizeof_ioinfo = 0;
/* Determine the actual size of the ioinfo structure,
* as used by the CRT loaded in memory
*/
if (sizeof_ioinfo == 0 && __pioinfo[0] != NULL) {
sizeof_ioinfo = _msize(__pioinfo[0]) / IOINFO_ARRAY_ELTS;
}
if (sizeof_ioinfo == 0) {
/* This should not happen... */
goto fail;
}
/* See that it isn't a special CLEAR fileno */
if (fd != _NO_CONSOLE_FILENO) {
/* Microsoft CRT would check that 0<=fd<_nhandle but we can't do that. Instead
* we check pointer validity and other info
*/
if (0 <= i1 && i1 < IOINFO_ARRAYS && __pioinfo[i1] != NULL) {
/* finally, check that the file is open */
my_ioinfo* info = (my_ioinfo*)(__pioinfo[i1] + i2 * sizeof_ioinfo);
if (info->osfile & FOPEN) {
return 1;
}
}
}
fail:
errno = EBADF;
return 0;
}
''',]
else:
separate_module_sources = []
includes=['errno.h','stdio.h']
errno_eci = ExternalCompilationInfo(
includes=includes,
separate_module_sources=separate_module_sources,
)
# Direct getters/setters, don't use directly!
_get_errno, _set_errno = CExternVariable(INT, 'errno', errno_eci,
CConstantErrno, sandboxsafe=True,
_nowrapper=True, c_type='int')
def get_saved_errno():
"""Return the value of the "saved errno".
This value is saved after a call to a C function, if it was declared
with the flag llexternal(..., save_err=rffi.RFFI_SAVE_ERRNO).
Functions without that flag don't change the saved errno.
"""
from rpython.rlib import rthread
return intmask(rthread.tlfield_rpy_errno.getraw())
def set_saved_errno(errno):
"""Set the value of the saved errno. This value will be used to
initialize the real errno just before calling the following C function,
provided it was declared llexternal(..., save_err=RFFI_READSAVED_ERRNO).
Note also that it is more common to want the real errno to be initially
zero; for that case, use llexternal(..., save_err=RFFI_ZERO_ERRNO_BEFORE)
and then you don't need set_saved_errno(0).
"""
from rpython.rlib import rthread
rthread.tlfield_rpy_errno.setraw(rffi.cast(INT, errno))
def get_saved_alterrno():
"""Return the value of the "saved alterrno".
This value is saved after a call to a C function, if it was declared
with the flag llexternal(..., save_err=rffi.RFFI_SAVE_ERRNO | rffl.RFFI_ALT_ERRNO).
Functions without that flag don't change the saved errno.
"""
from rpython.rlib import rthread
return intmask(rthread.tlfield_alt_errno.getraw())
def set_saved_alterrno(errno):
"""Set the value of the saved alterrno. This value will be used to
initialize the real errno just before calling the following C function,
provided it was declared llexternal(..., save_err=RFFI_READSAVED_ERRNO | rffl.RFFI_ALT_ERRNO).
Note also that it is more common to want the real errno to be initially
zero; for that case, use llexternal(..., save_err=RFFI_ZERO_ERRNO_BEFORE)
and then you don't need set_saved_errno(0).
"""
from rpython.rlib import rthread
rthread.tlfield_alt_errno.setraw(rffi.cast(INT, errno))
# These are not posix specific, but where should they move to?
@specialize.call_location()
def _errno_before(save_err):
if save_err & rffi.RFFI_READSAVED_ERRNO:
from rpython.rlib import rthread
if save_err & rffi.RFFI_ALT_ERRNO:
_set_errno(rthread.tlfield_alt_errno.getraw())
else:
_set_errno(rthread.tlfield_rpy_errno.getraw())
elif save_err & rffi.RFFI_ZERO_ERRNO_BEFORE:
_set_errno(rffi.cast(rffi.INT, 0))
if WIN32 and (save_err & rffi.RFFI_READSAVED_LASTERROR):
from rpython.rlib import rthread, rwin32
if save_err & rffi.RFFI_ALT_ERRNO:
err = rthread.tlfield_alt_lasterror.getraw()
else:
err = rthread.tlfield_rpy_lasterror.getraw()
# careful, getraw() overwrites GetLastError.
# We must assign it with _SetLastError() as the last
# operation, i.e. after the errno handling.
rwin32._SetLastError(err)
@specialize.call_location()
def _errno_after(save_err):
if WIN32:
if save_err & rffi.RFFI_SAVE_LASTERROR:
from rpython.rlib import rthread, rwin32
err = rwin32._GetLastError()
# careful, setraw() overwrites GetLastError.
# We must read it first, before the errno handling.
if save_err & rffi.RFFI_ALT_ERRNO:
rthread.tlfield_alt_lasterror.setraw(err)
else:
rthread.tlfield_rpy_lasterror.setraw(err)
elif save_err & rffi.RFFI_SAVE_WSALASTERROR:
from rpython.rlib import rthread, _rsocket_rffi
err = _rsocket_rffi._WSAGetLastError()
if save_err & rffi.RFFI_ALT_ERRNO:
rthread.tlfield_alt_lasterror.setraw(err)
else:
rthread.tlfield_rpy_lasterror.setraw(err)
if save_err & rffi.RFFI_SAVE_ERRNO:
from rpython.rlib import rthread
if save_err & rffi.RFFI_ALT_ERRNO:
rthread.tlfield_alt_errno.setraw(_get_errno())
else:
rthread.tlfield_rpy_errno.setraw(_get_errno())
if os.name == 'nt':
is_valid_fd = jit.dont_look_inside(rffi.llexternal(
"_PyVerify_fd", [rffi.INT], rffi.INT,
compilation_info=errno_eci,
))
def validate_fd(fd):
if not is_valid_fd(fd):
from errno import EBADF
raise OSError(EBADF, 'Bad file descriptor')
else:
def is_valid_fd(fd):
return 1
def validate_fd(fd):
pass
def closerange(fd_low, fd_high):
# this behaves like os.closerange() from Python 2.6.
for fd in xrange(fd_low, fd_high):
try:
if is_valid_fd(fd):
os.close(fd)
except OSError:
pass
#___________________________________________________________________
# Wrappers around posix functions, that accept either strings, or
# instances with a "as_bytes()" method.
# - pypy.modules.posix.interp_posix passes an object containing a unicode path
# which can encode itself with sys.filesystemencoding.
# - but rpython.rtyper.module.ll_os.py on Windows will replace these functions
# with other wrappers that directly handle unicode strings.
@specialize.argtype(0)
def _as_bytes(path):
assert path is not None
if isinstance(path, str):
return path
else:
return path.as_bytes()
@specialize.argtype(0)
def open(path, flags, mode):
return os.open(_as_bytes(path), flags, mode)
@specialize.argtype(0)
def stat(path):
return os.stat(_as_bytes(path))
@specialize.argtype(0)
def lstat(path):
return os.lstat(_as_bytes(path))
@specialize.argtype(0)
def statvfs(path):
return os.statvfs(_as_bytes(path))
@specialize.argtype(0)
def unlink(path):
return os.unlink(_as_bytes(path))
@specialize.argtype(0, 1)
def rename(path1, path2):
return os.rename(_as_bytes(path1), _as_bytes(path2))
@specialize.argtype(0)
def listdir(dirname):
return os.listdir(_as_bytes(dirname))
@specialize.argtype(0)
def access(path, mode):
return os.access(_as_bytes(path), mode)
@specialize.argtype(0)
def chmod(path, mode):
return os.chmod(_as_bytes(path), mode)
@specialize.argtype(0, 1)
def utime(path, times):
return os.utime(_as_bytes(path), times)
@specialize.argtype(0)
def chdir(path):
return os.chdir(_as_bytes(path))
@specialize.argtype(0)
def mkdir(path, mode=0777):
return os.mkdir(_as_bytes(path), mode)
@specialize.argtype(0)
def rmdir(path):
return os.rmdir(_as_bytes(path))
@specialize.argtype(0)
def mkfifo(path, mode):
os.mkfifo(_as_bytes(path), mode)
@specialize.argtype(0)
def mknod(path, mode, device):
os.mknod(_as_bytes(path), mode, device)
@specialize.argtype(0, 1)
def symlink(src, dest):
os.symlink(_as_bytes(src), _as_bytes(dest))
if os.name == 'nt':
import nt
@specialize.argtype(0)
def _getfullpathname(path):
return nt._getfullpathname(_as_bytes(path))
@specialize.argtype(0, 1)
def putenv(name, value):
os.environ[_as_bytes(name)] = _as_bytes(value)
@specialize.argtype(0)
def unsetenv(name):
del os.environ[_as_bytes(name)]
if os.name == 'nt':
from rpython.rlib import rwin32
os_kill = rwin32.os_kill
else:
os_kill = os.kill
```
#### File: rsre/test/test_zjit.py
```python
import py
from rpython.jit.metainterp.test import support
from rpython.rlib.rsre.test.test_match import get_code
from rpython.rlib.rsre import rsre_core
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.annlowlevel import llstr, hlstr
def entrypoint1(r, string, repeat):
r = array2list(r)
string = hlstr(string)
match = None
for i in range(repeat):
match = rsre_core.match(r, string)
if match is None:
return -1
else:
return match.match_end
def entrypoint2(r, string, repeat):
r = array2list(r)
string = hlstr(string)
match = None
for i in range(repeat):
match = rsre_core.search(r, string)
if match is None:
return -1
else:
return match.match_start
def list2array(lst):
a = lltype.malloc(lltype.GcArray(lltype.Signed), len(lst))
for i, x in enumerate(lst):
a[i] = int(x)
return a
def array2list(a):
return [a[i] for i in range(len(a))]
def test_jit_unroll_safe():
# test that the decorators are applied in the right order
assert not hasattr(rsre_core.sre_match, '_jit_unroll_safe_')
for m in rsre_core.sre_match._specialized_methods_:
assert m._jit_unroll_safe_
class TestJitRSre(support.LLJitMixin):
def meta_interp_match(self, pattern, string, repeat=1):
r = get_code(pattern)
return self.meta_interp(entrypoint1, [list2array(r), llstr(string),
repeat],
listcomp=True, backendopt=True)
def meta_interp_search(self, pattern, string, repeat=1):
r = get_code(pattern)
return self.meta_interp(entrypoint2, [list2array(r), llstr(string),
repeat],
listcomp=True, backendopt=True)
def test_simple_match_1(self):
res = self.meta_interp_match(r"ab*bbbbbbbc", "abbbbbbbbbcdef")
assert res == 11
def test_simple_match_2(self):
res = self.meta_interp_match(r".*abc", "xxabcyyyyyyyyyyyyy")
assert res == 5
def test_simple_match_repeated(self):
res = self.meta_interp_match(r"abcdef", "abcdef", repeat=10)
assert res == 6
self.check_trace_count(1)
self.check_jitcell_token_count(1)
def test_match_minrepeat_1(self):
res = self.meta_interp_match(r".*?abc", "xxxxxxxxxxxxxxabc")
assert res == 17
#def test_match_maxuntil_1(self):
# res = self.meta_interp_match(r"(ab)*c", "ababababababababc")
# assert res == 17
def test_branch_1(self):
res = self.meta_interp_match(r".*?(ab|x)c", "xxxxxxxxxxxxxxabc")
assert res == 17
def test_match_minrepeat_2(self):
s = ("xxxxxxxxxxabbbbbbbbbb" +
"xxxxxxxxxxabbbbbbbbbb" +
"xxxxxxxxxxabbbbbbbbbb" +
"xxxxxxxxxxabbbbbbbbbbc")
res = self.meta_interp_match(r".*?ab+?c", s)
assert res == len(s)
def test_fast_search(self):
res = self.meta_interp_search(r"<foo\w+>", "e<f<f<foxd<f<fh<foobar>ua")
assert res == 15
self.check_resops(guard_value=0)
def test_regular_search(self):
res = self.meta_interp_search(r"<\w+>", "eiofweoxdiwhdoh<foobar>ua")
assert res == 15
def test_regular_search_upcase(self):
res = self.meta_interp_search(r"<\w+>", "EIOFWEOXDIWHDOH<FOOBAR>UA")
assert res == 15
def test_max_until_1(self):
res = self.meta_interp_match(r"(ab)*abababababc",
"ababababababababababc")
assert res == 21
def test_example_1(self):
res = self.meta_interp_search(
r"Active\s+20\d\d-\d\d-\d\d\s+[[]\d+[]]([^[]+)",
"Active"*20 + "Active 2010-04-07 [42] Foobar baz boz blah[43]")
assert res == 6*20
def test_aorbstar(self):
res = self.meta_interp_match("(a|b)*a", "a" * 100)
assert res == 100
self.check_resops(guard_value=0)
# group guards tests
def test_group_range(self):
res = self.meta_interp_match(r"<[^b-c]+>", "<aeaeaea>")
assert res == 9
self.check_enter_count(1)
def test_group_single_chars(self):
res = self.meta_interp_match(r"<[ae]+>", "<aeaeaea>")
assert res == 9
self.check_enter_count(1)
def test_group_digit(self):
res = self.meta_interp_match(r"<[^\d]+>", "<..a..aa>")
assert res == 9
self.check_enter_count(1)
def test_group_space(self):
res = self.meta_interp_match(r"<\S+>", "<..a..aa>")
assert res == 9
self.check_enter_count(1)
def test_group_word(self):
res = self.meta_interp_match(r"<\w+>", "<ab09_a1>")
assert res == 9
self.check_enter_count(1)
def test_group_complex(self):
res = self.meta_interp_match(r"<[a@h\d\s]+>", "<a93919a @ a23>")
assert res == 15
self.check_enter_count(1)
@py.test.mark.xfail
def test_group_space_but_not_space(self):
res = self.meta_interp_match(r"<[\S ]+>", "<..a .. aa>")
assert res == 13
self.check_enter_count(1)
def test_find_repetition_end_fastpath(self):
res = self.meta_interp_search(r"b+", "a"*30 + "b")
assert res == 30
self.check_resops(call=0)
```
#### File: rstruct/test/test_ieee.py
```python
import py
import sys
import random
import struct
from rpython.rlib.rstruct import ieee
from rpython.rlib.rfloat import isnan, NAN, INFINITY
from rpython.translator.c.test.test_genc import compile
class TestFloatSpecific:
def test_halffloat_exact(self):
#testcases generated from numpy.float16(x).view('uint16')
cases = [[0, 0], [10, 18688], [-10, 51456], [10e3, 28898],
[float('inf'), 31744], [-float('inf'), 64512]]
for c, h in cases:
hbit = ieee.float_pack(c, 2)
assert hbit == h
assert c == ieee.float_unpack(h, 2)
def test_halffloat_inexact(self):
#testcases generated from numpy.float16(x).view('uint16')
cases = [[10.001, 18688, 10.], [-10.001, 51456, -10],
[0.027588, 10000, 0.027587890625],
[22001, 30047, 22000]]
for c, h, f in cases:
hbit = ieee.float_pack(c, 2)
assert hbit == h
assert f == ieee.float_unpack(h, 2)
def test_halffloat_overunderflow(self):
import math
cases = [[670000, float('inf')], [-67000, -float('inf')],
[1e-08, 0], [-1e-8, -0.]]
for f1, f2 in cases:
try:
f_out = ieee.float_unpack(ieee.float_pack(f1, 2), 2)
except OverflowError:
f_out = math.copysign(float('inf'), f1)
assert f_out == f2
assert math.copysign(1., f_out) == math.copysign(1., f2)
def test_float80_exact(self):
s = []
ieee.pack_float80(s, -1., 16, False)
assert repr(s[-1]) == repr('\x00\x00\x00\x00\x00\x00\x00\x80\xff\xbf\x00\x00\x00\x00\x00\x00')
ieee.pack_float80(s, -1., 16, True)
assert repr(s[-1]) == repr('\x00\x00\x00\x00\x00\x00\xbf\xff\x80\x00\x00\x00\x00\x00\x00\x00')
ieee.pack_float80(s, -123.456, 16, False)
assert repr(s[-1]) == repr('\x00\xb8\xf3\xfd\xd4x\xe9\xf6\x05\xc0\x00\x00\x00\x00\x00\x00')
ieee.pack_float80(s, -123.456, 16, True)
assert repr(s[-1]) == repr('\x00\x00\x00\x00\x00\x00\xc0\x05\xf6\xe9x\xd4\xfd\xf3\xb8\x00')
x = ieee.unpack_float80('\x00\x00\x00\x00\x00\x00\x00\x80\xff?\xc8\x01\x00\x00\x00\x00', False)
assert x == 1.0
x = ieee.unpack_float80('\x00\x00\x7f\x83\xe1\x91?\xff\x80\x00\x00\x00\x00\x00\x00\x00', True)
assert x == 1.0
class TestFloatPacking:
def setup_class(cls):
if sys.version_info < (2, 6):
py.test.skip("the 'struct' module of this old CPython is broken")
def check_float(self, x):
# check roundtrip
for size in [10, 12, 16]:
for be in [False, True]:
Q = []
ieee.pack_float80(Q, x, size, be)
Q = Q[0]
y = ieee.unpack_float80(Q, be)
assert repr(x) == repr(y), '%r != %r, Q=%r' % (x, y, Q)
for be in [False, True]:
Q = []
ieee.pack_float(Q, x, 8, be)
Q = Q[0]
y = ieee.unpack_float(Q, be)
assert repr(x) == repr(y), '%r != %r, Q=%r' % (x, y, Q)
# check that packing agrees with the struct module
struct_pack8 = struct.unpack('<Q', struct.pack('<d', x))[0]
float_pack8 = ieee.float_pack(x, 8)
assert struct_pack8 == float_pack8
# check that packing agrees with the struct module
try:
struct_pack4 = struct.unpack('<L', struct.pack('<f', x))[0]
except OverflowError:
struct_pack4 = "overflow"
try:
float_pack4 = ieee.float_pack(x, 4)
except OverflowError:
float_pack4 = "overflow"
assert struct_pack4 == float_pack4
if float_pack4 == "overflow":
return
# if we didn't overflow, try round-tripping the binary32 value
roundtrip = ieee.float_pack(ieee.float_unpack(float_pack4, 4), 4)
assert float_pack4 == roundtrip
try:
float_pack2 = ieee.float_pack(x, 2)
except OverflowError:
return
roundtrip = ieee.float_pack(ieee.float_unpack(float_pack2, 2), 2)
assert (float_pack2, x) == (roundtrip, x)
def test_infinities(self):
self.check_float(float('inf'))
self.check_float(float('-inf'))
def test_zeros(self):
self.check_float(0.0)
self.check_float(-0.0)
def test_nans(self):
self.check_float(float('nan'))
def test_simple(self):
test_values = [1e-10, 0.00123, 0.5, 0.7, 1.0, 123.456, 1e10]
for value in test_values:
self.check_float(value)
self.check_float(-value)
def test_subnormal(self):
# special boundaries
self.check_float(2**-1074)
self.check_float(2**-1022)
self.check_float(2**-1021)
self.check_float((2**53-1)*2**-1074)
self.check_float((2**52-1)*2**-1074)
self.check_float((2**52+1)*2**-1074)
# other subnormals
self.check_float(1e-309)
self.check_float(1e-320)
def test_powers_of_two(self):
# exact powers of 2
for k in range(-1074, 1024):
self.check_float(2.**k)
# and values near powers of 2
for k in range(-1074, 1024):
self.check_float((2 - 2**-52) * 2.**k)
def test_float4_boundaries(self):
# Exercise IEEE 754 binary32 boundary cases.
self.check_float(2**128.)
# largest representable finite binary32 value
self.check_float((1 - 2.**-24) * 2**128.)
# halfway case: rounds up to an overflowing value
self.check_float((1 - 2.**-25) * 2**128.)
self.check_float(2**-125)
# smallest normal
self.check_float(2**-126)
# smallest positive binary32 value (subnormal)
self.check_float(2**-149)
# 2**-150 should round down to 0
self.check_float(2**-150)
# but anything even a tiny bit larger should round up to 2**-149
self.check_float((1 + 2**-52) * 2**-150)
def test_random(self):
# construct a Python float from random integer, using struct
for _ in xrange(10000):
Q = random.randrange(2**64)
x = struct.unpack('<d', struct.pack('<Q', Q))[0]
# nans are tricky: we can't hope to reproduce the bit
# pattern exactly, so check_float will fail for a random nan.
if isnan(x):
continue
self.check_float(x)
class TestCompiled:
def test_pack_float(self):
def pack(x, size):
result = []
ieee.pack_float(result, x, size, False)
l = []
for x in result:
for c in x:
l.append(str(ord(c)))
return ','.join(l)
c_pack = compile(pack, [float, int])
def unpack(s):
l = s.split(',')
s = ''.join([chr(int(x)) for x in l])
return ieee.unpack_float(s, False)
c_unpack = compile(unpack, [str])
def check_roundtrip(x, size):
s = c_pack(x, size)
if not isnan(x):
# pack uses copysign which is ambiguous for NAN
assert s == pack(x, size)
assert unpack(s) == x
assert c_unpack(s) == x
else:
assert isnan(unpack(s))
assert isnan(c_unpack(s))
for size in [2, 4, 8]:
check_roundtrip(123.4375, size)
check_roundtrip(-123.4375, size)
check_roundtrip(INFINITY, size)
check_roundtrip(NAN, size)
```
#### File: rstruct/test/test_runpack.py
```python
from rpython.rtyper.test.tool import BaseRtypingTest
from rpython.rlib.rstruct.runpack import runpack
from rpython.rlib.rarithmetic import LONG_BIT
import struct
class TestRStruct(BaseRtypingTest):
def test_unpack(self):
pad = '\x00' * (LONG_BIT//8-1) # 3 or 7 null bytes
def fn():
return runpack('sll', 'a'+pad+'\x03'+pad+'\x04'+pad)[1]
assert fn() == 3
assert self.interpret(fn, []) == 3
def test_unpack_2(self):
data = struct.pack('iiii', 0, 1, 2, 4)
def fn():
a, b, c, d = runpack('iiii', data)
return a * 1000 + b * 100 + c * 10 + d
assert fn() == 124
assert self.interpret(fn, []) == 124
def test_unpack_single(self):
data = struct.pack('i', 123)
def fn():
return runpack('i', data)
assert fn() == 123
assert self.interpret(fn, []) == 123
def test_unpack_big_endian(self):
def fn():
return runpack(">i", "\x01\x02\x03\x04")
assert fn() == 0x01020304
assert self.interpret(fn, []) == 0x01020304
def test_unpack_double_big_endian(self):
def fn():
return runpack(">d", "testtest")
assert fn() == struct.unpack(">d", "testtest")[0]
assert self.interpret(fn, []) == struct.unpack(">d", "testtest")[0]
```
#### File: rlib/rvmprof/cintf.py
```python
import py
import sys
from rpython.tool.udir import udir
from rpython.tool.version import rpythonroot
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rtyper.tool import rffi_platform as platform
from rpython.jit.backend import detect_cpu
class VMProfPlatformUnsupported(Exception):
pass
def setup():
if not detect_cpu.autodetect().startswith(detect_cpu.MODEL_X86_64):
raise VMProfPlatformUnsupported("rvmprof only supports"
" x86-64 CPUs for now")
ROOT = py.path.local(rpythonroot).join('rpython', 'rlib', 'rvmprof')
SRC = ROOT.join('src')
if sys.platform.startswith('linux'):
libs = ['dl']
else:
libs = []
eci_kwds = dict(
include_dirs = [SRC],
includes = ['rvmprof.h'],
libraries = libs,
separate_module_files = [SRC.join('rvmprof.c')],
post_include_bits=['#define RPYTHON_VMPROF\n'],
)
eci = ExternalCompilationInfo(**eci_kwds)
platform.verify_eci(ExternalCompilationInfo(
compile_extra=['-DRPYTHON_LL2CTYPES'],
**eci_kwds))
vmprof_init = rffi.llexternal("vmprof_init",
[rffi.INT, rffi.DOUBLE, rffi.CCHARP],
rffi.CCHARP, compilation_info=eci)
vmprof_enable = rffi.llexternal("vmprof_enable", [], rffi.INT,
compilation_info=eci,
save_err=rffi.RFFI_SAVE_ERRNO)
vmprof_disable = rffi.llexternal("vmprof_disable", [], rffi.INT,
compilation_info=eci,
save_err=rffi.RFFI_SAVE_ERRNO)
vmprof_register_virtual_function = rffi.llexternal(
"vmprof_register_virtual_function",
[rffi.CCHARP, rffi.LONG, rffi.INT],
rffi.INT, compilation_info=eci)
vmprof_ignore_signals = rffi.llexternal("vmprof_ignore_signals",
[rffi.INT], lltype.Void,
compilation_info=eci)
return CInterface(locals())
class CInterface(object):
def __init__(self, namespace):
for k, v in namespace.iteritems():
setattr(self, k, v)
def _freeze_(self):
return True
def token2lltype(tok):
if tok == 'i':
return lltype.Signed
if tok == 'r':
return llmemory.GCREF
raise NotImplementedError(repr(tok))
def make_trampoline_function(name, func, token, restok):
from rpython.jit.backend import detect_cpu
cont_name = 'rpyvmprof_f_%s_%s' % (name, token)
tramp_name = 'rpyvmprof_t_%s_%s' % (name, token)
orig_tramp_name = tramp_name
func.c_name = cont_name
func._dont_inline_ = True
if sys.platform == 'darwin':
# according to internet "At the time UNIX was written in 1974...."
# "... all C functions are prefixed with _"
cont_name = '_' + cont_name
tramp_name = '_' + tramp_name
PLT = ""
size_decl = ""
type_decl = ""
extra_align = ""
else:
PLT = "@PLT"
type_decl = "\t.type\t%s, @function" % (tramp_name,)
size_decl = "\t.size\t%s, .-%s" % (
tramp_name, tramp_name)
extra_align = "\t.cfi_def_cfa_offset 8"
assert detect_cpu.autodetect().startswith(detect_cpu.MODEL_X86_64), (
"rvmprof only supports x86-64 CPUs for now")
# mapping of argument count (not counting the final uid argument) to
# the register that holds this uid argument
reg = {0: '%rdi',
1: '%rsi',
2: '%rdx',
3: '%rcx',
4: '%r8',
5: '%r9',
}
try:
reg = reg[len(token)]
except KeyError:
raise NotImplementedError(
"not supported: %r takes more than 5 arguments" % (func,))
target = udir.join('module_cache')
target.ensure(dir=1)
target = target.join('trampoline_%s_%s.vmprof.s' % (name, token))
# NOTE! the tabs in this file are absolutely essential, things
# that don't start with \t are silently ignored (<arigato>: WAT!?)
target.write("""\
\t.text
\t.globl\t%(tramp_name)s
%(type_decl)s
%(tramp_name)s:
\t.cfi_startproc
\tpushq\t%(reg)s
\t.cfi_def_cfa_offset 16
\tcall %(cont_name)s%(PLT)s
\taddq\t$8, %%rsp
%(extra_align)s
\tret
\t.cfi_endproc
%(size_decl)s
""" % locals())
def tok2cname(tok):
if tok == 'i':
return 'long'
if tok == 'r':
return 'void *'
raise NotImplementedError(repr(tok))
header = 'RPY_EXTERN %s %s(%s);\n' % (
tok2cname(restok),
orig_tramp_name,
', '.join([tok2cname(tok) for tok in token] + ['long']))
header += """\
static int cmp_%s(void *addr) {
if (addr == %s) return 1;
#ifdef VMPROF_ADDR_OF_TRAMPOLINE
return VMPROF_ADDR_OF_TRAMPOLINE(addr);
#undef VMPROF_ADDR_OF_TRAMPOLINE
#else
return 0;
#endif
#define VMPROF_ADDR_OF_TRAMPOLINE cmp_%s
}
""" % (tramp_name, orig_tramp_name, tramp_name)
eci = ExternalCompilationInfo(
post_include_bits = [header],
separate_module_files = [str(target)],
)
return rffi.llexternal(
orig_tramp_name,
[token2lltype(tok) for tok in token] + [lltype.Signed],
token2lltype(restok),
compilation_info=eci,
_nowrapper=True, sandboxsafe=True,
random_effects_on_gcobjs=True)
```
#### File: rpython/rlib/_stacklet_asmgcc.py
```python
from rpython.rlib.debug import ll_assert
from rpython.rlib import rgc
from rpython.rlib.objectmodel import specialize
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi
from rpython.rtyper.lltypesystem.lloperation import llop
from rpython.rtyper.annlowlevel import llhelper, MixLevelHelperAnnotator
from rpython.annotator import model as annmodel
from rpython.rtyper.llannotation import lltype_to_annotation
from rpython.rlib import _rffi_stacklet as _c
_asmstackrootwalker = None # BIG HACK: monkey-patched by asmgcroot.py
_stackletrootwalker = None
def get_stackletrootwalker():
# XXX this is too complicated now; we don't need a StackletRootWalker
# instance to store global state. We could rewrite it all in one big
# function. We don't care enough for now.
# lazily called, to make the following imports lazy
global _stackletrootwalker
if _stackletrootwalker is not None:
return _stackletrootwalker
from rpython.memory.gctransform.asmgcroot import (
WALKFRAME, CALLEE_SAVED_REGS, INDEX_OF_EBP, sizeofaddr)
assert _asmstackrootwalker is not None, "should have been monkey-patched"
basewalker = _asmstackrootwalker
class StackletRootWalker(object):
_alloc_flavor_ = "raw"
def setup(self, obj):
# initialization: read the SUSPSTACK object
p = llmemory.cast_adr_to_ptr(obj, lltype.Ptr(SUSPSTACK))
if not p.handle:
return False
self.context = llmemory.cast_ptr_to_adr(p.handle)
self.next_callback_piece = p.callback_pieces
anchor = p.anchor
del p
self.curframe = lltype.malloc(WALKFRAME, flavor='raw')
self.otherframe = lltype.malloc(WALKFRAME, flavor='raw')
self.fill_initial_frame(self.curframe, anchor)
return True
def fill_initial_frame(self, curframe, initialframedata):
# Copy&paste :-(
initialframedata += 2*sizeofaddr
reg = 0
while reg < CALLEE_SAVED_REGS:
curframe.regs_stored_at[reg] = initialframedata+reg*sizeofaddr
reg += 1
retaddraddr = initialframedata + CALLEE_SAVED_REGS * sizeofaddr
retaddraddr = self.translateptr(retaddraddr)
curframe.frame_address = retaddraddr.address[0]
def fetch_next_stack_piece(self):
if self.next_callback_piece == llmemory.NULL:
lltype.free(self.curframe, flavor='raw')
lltype.free(self.otherframe, flavor='raw')
self.context = llmemory.NULL
return False
else:
anchor = self.next_callback_piece
nextaddr = anchor + sizeofaddr
nextaddr = self.translateptr(nextaddr)
self.next_callback_piece = nextaddr.address[0]
self.fill_initial_frame(self.curframe, anchor)
return True
@specialize.arg(3)
def customtrace(self, gc, obj, callback, arg):
#
# Pointers to the stack can be "translated" or not:
#
# * Non-translated pointers point to where the data would be
# if the stack was installed and running.
#
# * Translated pointers correspond to where the data
# is now really in memory.
#
# Note that 'curframe' contains non-translated pointers, and
# of course the stack itself is full of non-translated pointers.
#
if not self.setup(obj):
return
while True:
callee = self.curframe
retaddraddr = self.translateptr(callee.frame_address)
retaddr = retaddraddr.address[0]
ebp_in_caller = callee.regs_stored_at[INDEX_OF_EBP]
ebp_in_caller = self.translateptr(ebp_in_caller)
ebp_in_caller = ebp_in_caller.address[0]
basewalker.locate_caller_based_on_retaddr(retaddr,
ebp_in_caller)
# see asmgcroot for similarity:
while True:
location = basewalker._shape_decompressor.next()
if location == 0:
break
addr = basewalker.getlocation(callee, ebp_in_caller,
location)
# yield the translated addr of the next GCREF in the stack
addr = self.translateptr(addr)
gc._trace_callback(callback, arg, addr)
caller = self.otherframe
reg = CALLEE_SAVED_REGS - 1
while reg >= 0:
location = basewalker._shape_decompressor.next()
addr = basewalker.getlocation(callee, ebp_in_caller,
location)
caller.regs_stored_at[reg] = addr # non-translated
reg -= 1
location = basewalker._shape_decompressor.next()
caller.frame_address = basewalker.getlocation(callee,
ebp_in_caller,
location)
# ^^^ non-translated
if caller.frame_address == llmemory.NULL:
# completely done with this piece of stack
if not self.fetch_next_stack_piece():
return
continue
#
self.otherframe = callee
self.curframe = caller
# loop back
def translateptr(self, addr):
return _c._translate_pointer(self.context, addr)
_stackletrootwalker = StackletRootWalker()
return _stackletrootwalker
get_stackletrootwalker._annspecialcase_ = 'specialize:memo'
def complete_destrptr(gctransformer):
translator = gctransformer.translator
mixlevelannotator = MixLevelHelperAnnotator(translator.rtyper)
args_s = [lltype_to_annotation(lltype.Ptr(SUSPSTACK))]
s_result = annmodel.s_None
destrptr = mixlevelannotator.delayedfunction(suspstack_destructor,
args_s, s_result)
mixlevelannotator.finish()
lltype.attachRuntimeTypeInfo(SUSPSTACK, destrptr=destrptr)
def customtrace(gc, obj, callback, arg):
stackletrootwalker = get_stackletrootwalker()
stackletrootwalker.customtrace(gc, obj, callback, arg)
lambda_customtrace = lambda: customtrace
def suspstack_destructor(suspstack):
h = suspstack.handle
if h:
_c.destroy(h)
SUSPSTACK = lltype.GcStruct('SuspStack',
('handle', _c.handle),
('anchor', llmemory.Address),
('callback_pieces', llmemory.Address),
rtti=True)
NULL_SUSPSTACK = lltype.nullptr(SUSPSTACK)
ASM_FRAMEDATA_HEAD_PTR = lltype.Ptr(lltype.ForwardReference())
ASM_FRAMEDATA_HEAD_PTR.TO.become(lltype.Struct('ASM_FRAMEDATA_HEAD',
('prev', ASM_FRAMEDATA_HEAD_PTR),
('next', ASM_FRAMEDATA_HEAD_PTR)
))
alternateanchor = lltype.malloc(ASM_FRAMEDATA_HEAD_PTR.TO,
immortal=True)
alternateanchor.prev = alternateanchor
alternateanchor.next = alternateanchor
FUNCNOARG_P = lltype.Ptr(lltype.FuncType([], _c.handle))
pypy_asm_stackwalk2 = rffi.llexternal('pypy_asm_stackwalk',
[FUNCNOARG_P,
ASM_FRAMEDATA_HEAD_PTR],
lltype.Signed, sandboxsafe=True,
_nowrapper=True)
def _new_callback():
# Here, we just closed the stack. Get the stack anchor, store
# it in the gcrootfinder.suspstack.anchor, and create a new
# stacklet with stacklet_new(). If this call fails, then we
# are just returning NULL.
_stack_just_closed()
#
return _c.new(gcrootfinder.newthrd, llhelper(_c.run_fn, _new_runfn),
llmemory.NULL)
def _stack_just_closed():
# Immediately unlink the new stackanchor from the doubly-linked
# chained list. When returning from pypy_asm_stackwalk2, the
# assembler code will try to unlink it again, which should be
# a no-op given that the doubly-linked list is empty.
stackanchor = llmemory.cast_ptr_to_adr(alternateanchor.next)
gcrootfinder.suspstack.anchor = stackanchor
alternateanchor.prev = alternateanchor
alternateanchor.next = alternateanchor
def _new_runfn(h, _):
# Here, we are in a fresh new stacklet.
llop.gc_stack_bottom(lltype.Void) # marker for trackgcroot.py
#
# There is a fresh suspstack object waiting on the gcrootfinder,
# so populate it with data that represents the parent suspended
# stacklet and detach the suspstack object from gcrootfinder.
suspstack = gcrootfinder.attach_handle_on_suspstack(h)
#
# Call the main function provided by the (RPython) user.
suspstack = gcrootfinder.runfn(suspstack, gcrootfinder.arg)
#
# Here, suspstack points to the target stacklet to which we want
# to jump to next. Read the 'handle' and forget about the
# suspstack object.
return _consume_suspstack(suspstack)
def _consume_suspstack(suspstack):
h = suspstack.handle
ll_assert(bool(h), "_consume_suspstack: null handle")
suspstack.handle = _c.null_handle
return h
def _switch_callback():
# Here, we just closed the stack. Get the stack anchor, store
# it in the gcrootfinder.suspstack.anchor, and switch to this
# suspstack with stacklet_switch(). If this call fails, then we
# are just returning NULL.
oldanchor = gcrootfinder.suspstack.anchor
_stack_just_closed()
h = _consume_suspstack(gcrootfinder.suspstack)
#
# gcrootfinder.suspstack.anchor is left with the anchor of the
# previous place (i.e. before the call to switch()).
h2 = _c.switch(h)
#
if not h2: # MemoryError: restore
gcrootfinder.suspstack.anchor = oldanchor
gcrootfinder.suspstack.handle = h
return h2
class StackletGcRootFinder(object):
suspstack = NULL_SUSPSTACK
def new(self, thrd, callback, arg):
self.newthrd = thrd._thrd
self.runfn = callback
self.arg = arg
# make a fresh new clean SUSPSTACK
rgc.register_custom_trace_hook(SUSPSTACK, lambda_customtrace)
newsuspstack = lltype.malloc(SUSPSTACK)
newsuspstack.handle = _c.null_handle
self.suspstack = newsuspstack
# Invoke '_new_callback' by closing the stack
#
callback_pieces = llop.gc_detach_callback_pieces(llmemory.Address)
newsuspstack.callback_pieces = callback_pieces
#
h = pypy_asm_stackwalk2(llhelper(FUNCNOARG_P, _new_callback),
alternateanchor)
h = rffi.cast(_c.handle, h)
#
llop.gc_reattach_callback_pieces(lltype.Void, callback_pieces)
return self.get_result_suspstack(h)
def switch(self, suspstack):
# Immediately before the switch, 'suspstack' describes the suspended
# state of the *target* of the switch. Then it is theoretically
# freed. In fact what occurs is that we reuse the same 'suspstack'
# object in the target, just after the switch, to store the
# description of where we came from. Then that "other" 'suspstack'
# object is returned.
self.suspstack = suspstack
#
callback_pieces = llop.gc_detach_callback_pieces(llmemory.Address)
old_callback_pieces = suspstack.callback_pieces
suspstack.callback_pieces = callback_pieces
#
h = pypy_asm_stackwalk2(llhelper(FUNCNOARG_P, _switch_callback),
alternateanchor)
h = rffi.cast(_c.handle, h)
#
llop.gc_reattach_callback_pieces(lltype.Void, callback_pieces)
if not h:
self.suspstack.callback_pieces = old_callback_pieces
#
return self.get_result_suspstack(h)
def attach_handle_on_suspstack(self, handle):
s = self.suspstack
self.suspstack = NULL_SUSPSTACK
ll_assert(bool(s.anchor), "s.anchor should not be null")
s.handle = handle
llop.gc_writebarrier(lltype.Void, llmemory.cast_ptr_to_adr(s))
return s
def get_result_suspstack(self, h):
#
# Return from a new() or a switch(): 'h' is a handle, possibly
# an empty one, that says from where we switched to.
if not h:
raise MemoryError
elif _c.is_empty_handle(h):
return NULL_SUSPSTACK
else:
# This is a return that gave us a real handle. Store it.
return self.attach_handle_on_suspstack(h)
def is_empty_handle(self, suspstack):
return not suspstack
def get_null_handle(self):
return NULL_SUSPSTACK
gcrootfinder = StackletGcRootFinder()
```
#### File: rlib/test/test_buffer.py
```python
from rpython.rlib.buffer import *
from rpython.annotator.annrpython import RPythonAnnotator
from rpython.annotator.model import SomeInteger
def test_string_buffer():
buf = StringBuffer('hello world')
assert buf.getitem(4) == 'o'
assert buf.getitem(4) == buf[4]
assert buf.getlength() == 11
assert buf.getlength() == len(buf)
assert buf.getslice(1, 6, 1, 5) == 'ello '
assert buf.getslice(1, 6, 1, 5) == buf[1:6]
assert buf.getslice(1, 6, 2, 3) == 'el '
assert buf.as_str() == 'hello world'
def test_len_nonneg():
# This test needs a buffer subclass whose getlength() isn't guaranteed to
# return a non-neg integer.
class DummyBuffer(Buffer):
def __init__(self, s):
self.size = s
def getlength(self):
return self.size
def func(n):
buf = DummyBuffer(n)
return len(buf)
a = RPythonAnnotator()
s = a.build_types(func, [int])
assert s == SomeInteger(nonneg=True)
```
#### File: rlib/test/test_rawstorage.py
```python
import py
import sys
from rpython.rtyper.lltypesystem import lltype
from rpython.rlib import rawstorage
from rpython.rlib.rawstorage import alloc_raw_storage, free_raw_storage,\
raw_storage_setitem, raw_storage_getitem, AlignmentError,\
raw_storage_setitem_unaligned, raw_storage_getitem_unaligned
from rpython.rtyper.test.tool import BaseRtypingTest
from rpython.translator.c.test.test_genc import compile
def test_untranslated_storage():
r = alloc_raw_storage(37)
raw_storage_setitem(r, 8, 1<<30)
res = raw_storage_getitem(lltype.Signed, r, 8)
assert res == 1<<30
raw_storage_setitem(r, 8, 3.14)
res = raw_storage_getitem(lltype.Float, r, 8)
assert res == 3.14
py.test.raises(AlignmentError, raw_storage_getitem, lltype.Signed, r, 3)
py.test.raises(AlignmentError, raw_storage_setitem, r, 3, 42.5)
free_raw_storage(r)
def test_untranslated_storage_unaligned(monkeypatch):
monkeypatch.setattr(rawstorage, 'misaligned_is_fine', False)
r = alloc_raw_storage(15)
raw_storage_setitem_unaligned(r, 3, 1<<30)
res = raw_storage_getitem_unaligned(lltype.Signed, r, 3)
assert res == 1<<30
raw_storage_setitem_unaligned(r, 3, 3.14)
res = raw_storage_getitem_unaligned(lltype.Float, r, 3)
assert res == 3.14
free_raw_storage(r)
class TestRawStorage(BaseRtypingTest):
def test_storage_int(self):
def f(i):
r = alloc_raw_storage(24)
raw_storage_setitem(r, 8, i)
res = raw_storage_getitem(lltype.Signed, r, 8)
free_raw_storage(r)
return res
x = self.interpret(f, [1<<30])
assert x == 1 << 30
def test_storage_float_unaligned(self, monkeypatch):
def f(v):
r = alloc_raw_storage(24)
raw_storage_setitem_unaligned(r, 3, v)
res = raw_storage_getitem_unaligned(lltype.Float, r, 3)
free_raw_storage(r)
return res
monkeypatch.setattr(rawstorage, 'misaligned_is_fine', False)
x = self.interpret(f, [3.14])
assert x == 3.14
class TestCBackend(object):
def test_backend_int(self):
def f(i):
r = alloc_raw_storage(24)
raw_storage_setitem(r, 8, i)
res = raw_storage_getitem(lltype.Signed, r, 8)
free_raw_storage(r)
return res != i
fc = compile(f, [int])
x = fc(-sys.maxint // 3)
assert x == 0
def test_backend_float_unaligned(self, monkeypatch):
def f(v):
r = alloc_raw_storage(24)
raw_storage_setitem_unaligned(r, 3, v)
res = raw_storage_getitem_unaligned(lltype.Float, r, 3)
free_raw_storage(r)
return res != v
if monkeypatch is not None:
monkeypatch.setattr(rawstorage, 'misaligned_is_fine', False)
fc = compile(f, [float])
x = fc(-3.14)
assert x == 0
def test_backend_float_unaligned_allow_misalign(self):
self.test_backend_float_unaligned(monkeypatch=None)
```
#### File: rlib/test/test_rerased.py
```python
import py
import sys
import copy
from rpython.rlib.rerased import *
from rpython.annotator import model as annmodel
from rpython.annotator.annrpython import RPythonAnnotator
from rpython.rtyper.rclass import OBJECTPTR
from rpython.rtyper.lltypesystem import lltype, llmemory
from rpython.rtyper.test.tool import BaseRtypingTest
def make_annotator():
a = RPythonAnnotator()
a.translator.config.translation.taggedpointers = True
return a
class X(object):
pass
class Y(X):
pass
class Z(X):
pass
eraseX, uneraseX = new_erasing_pair("X")
erase_list_X, unerase_list_X = new_erasing_pair("list of X")
def test_simple():
x1 = X()
e = eraseX(x1)
#assert is_integer(e) is False
assert uneraseX(e) is x1
def test_simple_none():
e = eraseX(None)
assert uneraseX(e) is None
def test_simple_int():
e = erase_int(15)
#assert is_integer(e) is True
assert unerase_int(e) == 15
def test_simple_int_overflow():
erase_int(sys.maxint//2)
py.test.raises(OverflowError, erase_int, sys.maxint//2 + 1)
py.test.raises(OverflowError, erase_int, sys.maxint)
py.test.raises(OverflowError, erase_int, sys.maxint-1)
py.test.raises(OverflowError, erase_int, -sys.maxint)
py.test.raises(OverflowError, erase_int, -sys.maxint-1)
def test_list():
l = [X()]
e = erase_list_X(l)
#assert is_integer(e) is False
assert unerase_list_X(e) is l
def test_deepcopy():
x = "hello"
e = eraseX(x)
e2 = copy.deepcopy(e)
assert uneraseX(e) is x
assert uneraseX(e2) is x
def test_annotate_1():
def f():
return eraseX(X())
a = make_annotator()
s = a.build_types(f, [])
assert isinstance(s, SomeErased)
def test_annotate_2():
def f():
x1 = X()
e = eraseX(x1)
#assert not is_integer(e)
x2 = uneraseX(e)
return x2
a = make_annotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeInstance)
assert s.classdef == a.bookkeeper.getuniqueclassdef(X)
def test_annotate_3():
def f():
e = erase_int(16)
#assert is_integer(e)
x2 = unerase_int(e)
return x2
a = make_annotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeInteger)
def test_annotate_erasing_pair():
erase, unerase = new_erasing_pair("test1")
erase2, unerase2 = new_erasing_pair("test2")
class Foo:
pass
#
def make(n):
if n > 5:
return erase([5, 6, n-6])
else:
foo = Foo()
foo.bar = n+1
return erase2(foo)
def check(x, n):
if n > 5:
return unerase(x)[2]
else:
return unerase2(x).bar
def f(n):
x = make(n)
return check(x, n)
#
a = make_annotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeInteger)
def test_annotate_reflowing():
erase, unerase = new_erasing_pair("test1")
class A: pass
class B(A): pass
class C(B): pass
class D(C): pass
def f():
x = erase(None)
while True:
inst = unerase(x)
if inst is None:
inst = D()
x = erase(inst)
elif isinstance(inst, D):
inst = C()
x = erase(inst)
elif isinstance(inst, C):
inst = B()
x = erase(inst)
elif isinstance(inst, B):
inst = A()
x = erase(inst)
else:
return inst
#
a = make_annotator()
s = a.build_types(f, [])
assert isinstance(s, annmodel.SomeInstance)
assert s.classdef == a.bookkeeper.getuniqueclassdef(A)
def test_annotate_prebuilt():
erase, unerase = new_erasing_pair("test1")
class X(object):
pass
x1 = X()
e1 = erase(x1)
e2 = erase(None)
def f(i):
if i:
e = e1
else:
e = e2
return unerase(e)
#
a = make_annotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeInstance)
assert s.classdef == a.bookkeeper.getuniqueclassdef(X)
assert s.can_be_none()
def test_annotate_prebuilt_int():
e1 = erase_int(42)
def f(i):
return unerase_int(e1)
a = make_annotator()
s = a.build_types(f, [int])
assert isinstance(s, annmodel.SomeInteger)
class TestRErased(BaseRtypingTest):
ERASED_TYPE = llmemory.GCREF
UNERASED_TYPE = OBJECTPTR
def castable(self, TO, var):
return lltype.castable(TO, lltype.typeOf(var)) > 0
def interpret(self, *args, **kwargs):
kwargs["taggedpointers"] = True
return BaseRtypingTest.interpret(self, *args, **kwargs)
def test_rtype_1(self):
def f():
return eraseX(X())
x = self.interpret(f, [])
assert lltype.typeOf(x) == self.ERASED_TYPE
def test_rtype_2(self):
def f():
x1 = X()
e = eraseX(x1)
#assert not is_integer(e)
x2 = uneraseX(e)
return x2
x = self.interpret(f, [])
assert self.castable(self.UNERASED_TYPE, x)
def test_rtype_3(self):
def f():
e = erase_int(16)
#assert is_integer(e)
x2 = unerase_int(e)
return x2
x = self.interpret(f, [])
assert x == 16
def test_prebuilt_erased(self):
e1 = erase_int(16)
x1 = X()
x1.foobar = 42
e2 = eraseX(x1)
def f():
#assert is_integer(e1)
#assert not is_integer(e2)
x1.foobar += 1
x2 = unerase_int(e1) + uneraseX(e2).foobar
return x2
x = self.interpret(f, [])
assert x == 16 + 42 + 1
def test_prebuilt_erased_in_instance(self):
erase_empty, unerase_empty = new_erasing_pair("empty")
class FakeList(object):
pass
x1 = X()
x1.foobar = 42
l1 = FakeList()
l1.storage = eraseX(x1)
l2 = FakeList()
l2.storage = erase_empty(None)
def f():
#assert is_integer(e1)
#assert not is_integer(e2)
x1.foobar += 1
x2 = uneraseX(l1.storage).foobar + (unerase_empty(l2.storage) is None)
return x2
x = self.interpret(f, [])
assert x == 43 + True
def test_overflow(self):
def f(i):
try:
e = erase_int(i)
except OverflowError:
return -1
#assert is_integer(e)
return unerase_int(e)
x = self.interpret(f, [16])
assert x == 16
x = self.interpret(f, [sys.maxint])
assert x == -1
def test_none(self):
def foo():
return uneraseX(eraseX(None))
assert foo() is None
res = self.interpret(foo, [])
assert not res
#
def foo():
eraseX(X())
return uneraseX(eraseX(None))
assert foo() is None
res = self.interpret(foo, [])
assert not res
def test_rtype_list(self):
prebuilt_l = [X()]
prebuilt_e = erase_list_X(prebuilt_l)
def l(flag):
if flag == 1:
l = [X()]
e = erase_list_X(l)
elif flag == 2:
l = prebuilt_l
e = erase_list_X(l)
else:
l = prebuilt_l
e = prebuilt_e
#assert is_integer(e) is False
assert unerase_list_X(e) is l
self.interpret(l, [0])
self.interpret(l, [1])
self.interpret(l, [2])
def test_union():
s_e1 = SomeErased()
s_e1.const = 1
s_e2 = SomeErased()
s_e2.const = 3
assert not annmodel.pair(s_e1, s_e2).union().is_constant()
# ____________________________________________________________
def test_erasing_pair():
erase, unerase = new_erasing_pair("test1")
class X:
pass
x = X()
erased = erase(x)
assert unerase(erased) is x
#
erase2, unerase2 = new_erasing_pair("test2")
py.test.raises(AssertionError, unerase2, erased)
```
#### File: rlib/test/test_rsignal.py
```python
import os, py
from rpython.translator.c.test.test_genc import compile
from rpython.rlib import rsignal
def setup_module(mod):
if not hasattr(os, 'kill') or not hasattr(os, 'getpid'):
py.test.skip("requires os.kill() and os.getpid()")
if not hasattr(rsignal, 'SIGUSR1'):
py.test.skip("requires SIGUSR1 in signal")
def check(expected):
res = rsignal.pypysig_poll()
os.write(1, "poll() => %d, expected %d\n" % (res, expected))
assert res == expected
def test_simple():
import os
check(-1)
check(-1)
for i in range(3):
rsignal.pypysig_setflag(rsignal.SIGUSR1)
os.kill(os.getpid(), rsignal.SIGUSR1)
check(rsignal.SIGUSR1)
check(-1)
check(-1)
rsignal.pypysig_ignore(rsignal.SIGUSR1)
os.kill(os.getpid(), rsignal.SIGUSR1)
check(-1)
check(-1)
rsignal.pypysig_default(rsignal.SIGUSR1)
check(-1)
def test_compile():
fn = compile(test_simple, [])
fn()
```
#### File: rlib/test/test_rthread.py
```python
import gc, time
from rpython.rlib.rthread import *
from rpython.rlib.rarithmetic import r_longlong
from rpython.translator.c.test.test_boehm import AbstractGCTestClass
from rpython.rtyper.lltypesystem import lltype, rffi
import py
def setup_module(mod):
# Hack to avoid a deadlock if the module is run after other test files :-(
# In this module, we assume that rthread.start_new_thread() is not
# providing us with a GIL equivalent, except in test_gc_locking
# which installs its own aroundstate.
rffi.aroundstate._cleanup_()
def test_lock():
l = allocate_lock()
ok1 = l.acquire(True)
ok2 = l.acquire(False)
l.release()
ok3 = l.acquire(False)
res = ok1 and not ok2 and ok3
assert res == 1
def test_thread_error():
l = allocate_lock()
try:
l.release()
except error:
pass
else:
py.test.fail("Did not raise")
def test_tlref_untranslated():
class FooBar(object):
pass
t = ThreadLocalReference(FooBar)
results = []
def subthread():
x = FooBar()
results.append(t.get() is None)
t.set(x)
results.append(t.get() is x)
time.sleep(0.2)
results.append(t.get() is x)
for i in range(5):
start_new_thread(subthread, ())
time.sleep(0.5)
assert results == [True] * 15
def test_get_ident():
import thread
assert get_ident() == thread.get_ident()
def test_threadlocalref_on_llinterp():
from rpython.rtyper.test.test_llinterp import interpret
tlfield = ThreadLocalField(lltype.Signed, "rthread_test_")
#
def f():
x = tlfield.setraw(42)
return tlfield.getraw()
#
res = interpret(f, [])
assert res == 42
class AbstractThreadTests(AbstractGCTestClass):
use_threads = True
def test_start_new_thread(self):
import time
class State:
pass
state = State()
def bootstrap1():
state.my_thread_ident1 = get_ident()
def bootstrap2():
state.my_thread_ident2 = get_ident()
def f():
state.my_thread_ident1 = get_ident()
state.my_thread_ident2 = get_ident()
start_new_thread(bootstrap1, ())
start_new_thread(bootstrap2, ())
willing_to_wait_more = 1000
while (state.my_thread_ident1 == get_ident() or
state.my_thread_ident2 == get_ident()):
willing_to_wait_more -= 1
if not willing_to_wait_more:
raise Exception("thread didn't start?")
time.sleep(0.01)
return 42
fn = self.getcompiled(f, [])
res = fn()
assert res == 42
def test_gc_locking(self):
import time
from rpython.rlib.objectmodel import invoke_around_extcall
from rpython.rlib.debug import ll_assert
class State:
pass
state = State()
class Z:
def __init__(self, i, j):
self.i = i
self.j = j
def run(self):
j = self.j
if self.i > 1:
g(self.i-1, self.j * 2)
ll_assert(j == self.j, "1: bad j")
g(self.i-2, self.j * 2 + 1)
else:
if len(state.answers) % 7 == 5:
gc.collect()
state.answers.append(self.j)
ll_assert(j == self.j, "2: bad j")
run._dont_inline_ = True
def before_extcall():
release_NOAUTO(state.gil)
before_extcall._gctransformer_hint_cannot_collect_ = True
# ^^^ see comments in gil.py about this hint
def after_extcall():
acquire_NOAUTO(state.gil, True)
gc_thread_run()
after_extcall._gctransformer_hint_cannot_collect_ = True
# ^^^ see comments in gil.py about this hint
def bootstrap():
# after_extcall() is called before we arrive here.
# We can't just acquire and release the GIL manually here,
# because it is unsafe: bootstrap() is called from a rffi
# callback which checks for and reports exceptions after
# bootstrap() returns. The exception checking code must be
# protected by the GIL too.
z = state.z
state.z = None
state.bootstrapping.release()
z.run()
gc_thread_die()
# before_extcall() is called after we leave here
def g(i, j):
state.bootstrapping.acquire(True)
state.z = Z(i, j)
start_new_thread(bootstrap, ())
def f():
state.gil = allocate_ll_lock()
acquire_NOAUTO(state.gil, True)
state.bootstrapping = allocate_lock()
state.answers = []
state.finished = 0
# the next line installs before_extcall() and after_extcall()
# to be called automatically around external function calls.
invoke_around_extcall(before_extcall, after_extcall)
g(10, 1)
done = False
willing_to_wait_more = 2000
while not done:
if not willing_to_wait_more:
break
willing_to_wait_more -= 1
done = len(state.answers) == expected
time.sleep(0.01)
time.sleep(0.1)
return len(state.answers)
expected = 89
try:
fn = self.getcompiled(f, [])
finally:
rffi.aroundstate._cleanup_()
answers = fn()
assert answers == expected
def test_acquire_timed(self):
import time
def f():
l = allocate_lock()
l.acquire(True)
t1 = time.time()
ok = l.acquire_timed(1000001)
t2 = time.time()
delay = t2 - t1
if ok == 0: # RPY_LOCK_FAILURE
return -delay
elif ok == 2: # RPY_LOCK_INTR
return delay
else: # RPY_LOCK_ACQUIRED
return 0.0
fn = self.getcompiled(f, [])
res = fn()
assert res < -1.0
def test_acquire_timed_huge_timeout(self):
t = r_longlong(2 ** 61)
def f():
l = allocate_lock()
return l.acquire_timed(t)
fn = self.getcompiled(f, [])
res = fn()
assert res == 1 # RPY_LOCK_ACQUIRED
def test_acquire_timed_alarm(self):
import sys
if not sys.platform.startswith('linux'):
py.test.skip("skipped on non-linux")
import time
from rpython.rlib import rsignal
def f():
l = allocate_lock()
l.acquire(True)
#
rsignal.pypysig_setflag(rsignal.SIGALRM)
rsignal.c_alarm(1)
#
t1 = time.time()
ok = l.acquire_timed(2500000)
t2 = time.time()
delay = t2 - t1
if ok == 0: # RPY_LOCK_FAILURE
return -delay
elif ok == 2: # RPY_LOCK_INTR
return delay
else: # RPY_LOCK_ACQUIRED
return 0.0
fn = self.getcompiled(f, [])
res = fn()
assert res >= 0.95
def test_tlref(self):
class FooBar(object):
pass
t = ThreadLocalReference(FooBar)
def f():
x1 = FooBar()
t.set(x1)
import gc; gc.collect()
assert t.get() is x1
return 42
fn = self.getcompiled(f, [])
res = fn()
assert res == 42
#class TestRunDirectly(AbstractThreadTests):
# def getcompiled(self, f, argtypes):
# return f
# These are disabled because they crash occasionally for bad reasons
# related to the fact that ll2ctypes is not at all thread-safe
class TestUsingBoehm(AbstractThreadTests):
gcpolicy = 'boehm'
class TestUsingFramework(AbstractThreadTests):
gcpolicy = 'minimark'
```
#### File: rpython/rlib/types.py
```python
from rpython.annotator import model
from rpython.annotator.listdef import ListDef
from rpython.annotator.dictdef import DictDef
def none():
return model.s_None
def impossible():
return model.s_ImpossibleValue
def float():
return model.SomeFloat()
def singlefloat():
return model.SomeSingleFloat()
def longfloat():
return model.SomeLongFloat()
def int():
return model.SomeInteger()
def bool():
return model.SomeBool()
def unicode():
return model.SomeUnicodeString()
def unicode0():
return model.SomeUnicodeString(no_nul=True)
def str(can_be_None=False):
return model.SomeString(can_be_None=can_be_None)
def bytearray():
return model.SomeByteArray()
def str0():
return model.SomeString(no_nul=True)
def char():
return model.SomeChar()
def ptr(ll_type):
from rpython.rtyper.lltypesystem.lltype import Ptr
from rpython.rtyper.llannotation import SomePtr
return SomePtr(Ptr(ll_type))
def list(element):
listdef = ListDef(None, element, mutated=True, resized=True)
return model.SomeList(listdef)
def array(element):
listdef = ListDef(None, element, mutated=True, resized=False)
return model.SomeList(listdef)
def dict(keytype, valuetype):
dictdef = DictDef(None, keytype, valuetype)
return model.SomeDict(dictdef)
def instance(cls):
return lambda bookkeeper: model.SomeInstance(bookkeeper.getuniqueclassdef(cls))
class SelfTypeMarker(object):
pass
def self():
return SelfTypeMarker()
class AnyTypeMarker(object):
pass
def any():
return AnyTypeMarker()
```
#### File: rpython/rtyper/callparse.py
```python
from rpython.annotator.argument import ArgumentsForTranslation, ArgErr
from rpython.annotator import model as annmodel
from rpython.rtyper import rtuple
from rpython.rtyper.error import TyperError
from rpython.rtyper.lltypesystem import lltype
class ArgumentsForRtype(ArgumentsForTranslation):
def newtuple(self, items):
return NewTupleHolder(items)
def unpackiterable(self, it):
assert it.is_tuple()
items = it.items()
return list(items)
def getrinputs(rtyper, graph):
"""Return the list of reprs of the input arguments to the 'graph'."""
return [rtyper.bindingrepr(v) for v in graph.getargs()]
def getrresult(rtyper, graph):
"""Return the repr of the result variable of the 'graph'."""
if graph.getreturnvar().annotation is not None:
return rtyper.bindingrepr(graph.getreturnvar())
else:
return lltype.Void
def getsig(rtyper, graph):
"""Return the complete 'signature' of the graph."""
return (graph.signature,
graph.defaults,
getrinputs(rtyper, graph),
getrresult(rtyper, graph))
def callparse(rtyper, graph, hop, r_self=None):
"""Parse the arguments of 'hop' when calling the given 'graph'.
"""
rinputs = getrinputs(rtyper, graph)
def args_h(start):
return [VarHolder(i, hop.args_s[i])
for i in range(start, hop.nb_args)]
if r_self is None:
start = 1
else:
start = 0
rinputs[0] = r_self
opname = hop.spaceop.opname
if opname == "simple_call":
arguments = ArgumentsForRtype(args_h(start))
elif opname == "call_args":
arguments = ArgumentsForRtype.fromshape(
hop.args_s[start].const, # shape
args_h(start+1))
# parse the arguments according to the function we are calling
signature = graph.signature
defs_h = []
if graph.defaults:
for x in graph.defaults:
defs_h.append(ConstHolder(x))
try:
holders = arguments.match_signature(signature, defs_h)
except ArgErr, e:
raise TyperError("signature mismatch: %s" % e.getmsg(graph.name))
assert len(holders) == len(rinputs), "argument parsing mismatch"
vlist = []
for h,r in zip(holders, rinputs):
v = h.emit(r, hop)
vlist.append(v)
return vlist
class Holder(object):
def is_tuple(self):
return False
def emit(self, repr, hop):
try:
cache = self._cache
except AttributeError:
cache = self._cache = {}
try:
return cache[repr]
except KeyError:
v = self._emit(repr, hop)
cache[repr] = v
return v
class VarHolder(Holder):
def __init__(self, num, s_obj):
self.num = num
self.s_obj = s_obj
def is_tuple(self):
return isinstance(self.s_obj, annmodel.SomeTuple)
def items(self):
assert self.is_tuple()
n = len(self.s_obj.items)
return tuple([ItemHolder(self, i) for i in range(n)])
def _emit(self, repr, hop):
return hop.inputarg(repr, arg=self.num)
def access(self, hop):
repr = hop.args_r[self.num]
return repr, self.emit(repr, hop)
class ConstHolder(Holder):
def __init__(self, value):
self.value = value
def is_tuple(self):
return type(self.value) is tuple
def items(self):
assert self.is_tuple()
return self.value
def _emit(self, repr, hop):
return hop.inputconst(repr, self.value)
class NewTupleHolder(Holder):
def __new__(cls, holders):
for h in holders:
if not isinstance(h, ItemHolder) or not h.holder == holders[0].holder:
break
else:
if 0 < len(holders) == len(holders[0].holder.items()):
return holders[0].holder
inst = Holder.__new__(cls)
inst.holders = tuple(holders)
return inst
def is_tuple(self):
return True
def items(self):
return self.holders
def _emit(self, repr, hop):
assert isinstance(repr, rtuple.TupleRepr)
tupleitems_v = []
for h in self.holders:
v = h.emit(repr.items_r[len(tupleitems_v)], hop)
tupleitems_v.append(v)
vtuple = repr.newtuple(hop.llops, repr, tupleitems_v)
return vtuple
class ItemHolder(Holder):
def __init__(self, holder, index):
self.holder = holder
self.index = index
def _emit(self, repr, hop):
index = self.index
r_tup, v_tuple = self.holder.access(hop)
v = r_tup.getitem_internal(hop, v_tuple, index)
return hop.llops.convertvar(v, r_tup.items_r[index], repr)
```
#### File: rtyper/lltypesystem/rbytearray.py
```python
from rpython.rtyper.rbytearray import AbstractByteArrayRepr
from rpython.rtyper.lltypesystem import lltype, rstr
from rpython.rlib.debug import ll_assert
BYTEARRAY = lltype.GcForwardReference()
def mallocbytearray(size):
return lltype.malloc(BYTEARRAY, size)
_, _, copy_bytearray_contents = rstr._new_copy_contents_fun(BYTEARRAY, BYTEARRAY,
lltype.Char,
'bytearray')
_, _, copy_bytearray_contents_from_str = rstr._new_copy_contents_fun(rstr.STR,
BYTEARRAY,
lltype.Char,
'bytearray_from_str')
def _empty_bytearray():
return empty
BYTEARRAY.become(lltype.GcStruct('rpy_bytearray',
('chars', lltype.Array(lltype.Char)), adtmeths={
'malloc' : lltype.staticAdtMethod(mallocbytearray),
'copy_contents' : lltype.staticAdtMethod(copy_bytearray_contents),
'copy_contents_from_str': lltype.staticAdtMethod(
copy_bytearray_contents_from_str),
'length': rstr.LLHelpers.ll_length,
'empty': lltype.staticAdtMethod(_empty_bytearray),
}))
empty = lltype.malloc(BYTEARRAY, 0, immortal=True)
class LLHelpers(rstr.LLHelpers):
@classmethod
def ll_strsetitem(cls, s, i, item):
if i < 0:
i += s.length()
cls.ll_strsetitem_nonneg(s, i, item)
@staticmethod
def ll_strsetitem_nonneg(s, i, item):
chars = s.chars
ll_assert(i >= 0, "negative str getitem index")
ll_assert(i < len(chars), "str getitem index out of bound")
chars[i] = chr(item)
@staticmethod
def ll_stritem_nonneg(s, i):
return ord(rstr.LLHelpers.ll_stritem_nonneg(s, i))
class ByteArrayRepr(AbstractByteArrayRepr):
lowleveltype = lltype.Ptr(BYTEARRAY)
def __init__(self, *args):
AbstractByteArrayRepr.__init__(self, *args)
self.ll = LLHelpers
self.repr = self
def convert_const(self, value):
if value is None:
return lltype.nullptr(BYTEARRAY)
p = lltype.malloc(BYTEARRAY, len(value))
for i, c in enumerate(value):
p.chars[i] = chr(c)
return p
def ll_str(self, ll_b):
from rpython.rtyper.lltypesystem.rstr import mallocstr, STR
if ll_b:
lgt = ll_b.length()
ll_s = mallocstr(lgt)
for i in range(lgt):
ll_s.chars[i] = ll_b.chars[i]
return ll_s
else:
return lltype.nullptr(STR)
bytearray_repr = ByteArrayRepr()
def hlbytearray(ll_b):
b = bytearray()
for i in range(ll_b.length()):
b.append(ll_b.chars[i])
return b
```
#### File: rtyper/lltypesystem/rtagged.py
```python
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.rclass import (
InstanceRepr, CLASSTYPE, ll_inst_type, MissingRTypeAttribute,
ll_issubclass_const, getclassrepr, getinstancerepr, get_type_repr)
from rpython.rtyper.rmodel import TyperError, inputconst
class TaggedInstanceRepr(InstanceRepr):
def __init__(self, rtyper, classdef, unboxedclassdef):
InstanceRepr.__init__(self, rtyper, classdef)
self.unboxedclassdef = unboxedclassdef
self.is_parent = unboxedclassdef is not classdef
def _setup_repr(self):
InstanceRepr._setup_repr(self)
flds = self.allinstancefields.keys()
flds.remove('__class__')
if self.is_parent:
if flds:
raise TyperError("%r is a base class of an UnboxedValue,"
"so it cannot have fields: %r" % (
self.classdef, flds))
else:
if len(flds) != 1:
raise TyperError("%r must have exactly one field: %r" % (
self.classdef, flds))
self.specialfieldname = flds[0]
def new_instance(self, llops, classcallhop=None, nonmovable=False):
assert not nonmovable
if self.is_parent:
raise TyperError("don't instantiate %r, it is a parent of an "
"UnboxedValue class" % (self.classdef,))
if classcallhop is None:
raise TyperError("must instantiate %r by calling the class" % (
self.classdef,))
hop = classcallhop
if not (hop.spaceop.opname == 'simple_call' and hop.nb_args == 2):
raise TyperError("must instantiate %r with a simple class call" % (
self.classdef,))
v_value = hop.inputarg(lltype.Signed, arg=1)
c_one = hop.inputconst(lltype.Signed, 1)
hop.exception_is_here()
v2 = hop.genop('int_add_ovf', [v_value, v_value],
resulttype = lltype.Signed)
v2p1 = hop.genop('int_add', [v2, c_one],
resulttype = lltype.Signed)
v_instance = hop.genop('cast_int_to_ptr', [v2p1],
resulttype = self.lowleveltype)
return v_instance, False # don't call __init__
def convert_const_exact(self, value):
self.setup()
number = value.get_untagged_value()
return ll_int_to_unboxed(self.lowleveltype, number)
def getvalue_from_unboxed(self, llops, vinst):
assert not self.is_parent
v2 = llops.genop('cast_ptr_to_int', [vinst], resulttype=lltype.Signed)
c_one = inputconst(lltype.Signed, 1)
return llops.genop('int_rshift', [v2, c_one], resulttype=lltype.Signed)
def gettype_from_unboxed(self, llops, vinst, can_be_none=False):
unboxedclass_repr = getclassrepr(self.rtyper, self.unboxedclassdef)
cunboxedcls = inputconst(CLASSTYPE, unboxedclass_repr.getvtable())
if self.is_parent:
# If the lltype of vinst shows that it cannot be a tagged value,
# we can directly read the typeptr. Otherwise, call a helper that
# checks if the tag bit is set in the pointer.
unboxedinstance_repr = getinstancerepr(self.rtyper,
self.unboxedclassdef)
try:
lltype.castable(unboxedinstance_repr.lowleveltype,
vinst.concretetype)
except lltype.InvalidCast:
can_be_tagged = False
else:
can_be_tagged = True
vinst = llops.genop('cast_pointer', [vinst],
resulttype=self.common_repr())
if can_be_tagged:
if can_be_none:
func = ll_unboxed_getclass_canbenone
else:
func = ll_unboxed_getclass
return llops.gendirectcall(func, vinst,
cunboxedcls)
elif can_be_none:
return llops.gendirectcall(ll_inst_type, vinst)
else:
ctypeptr = inputconst(lltype.Void, 'typeptr')
return llops.genop('getfield', [vinst, ctypeptr],
resulttype = CLASSTYPE)
else:
return cunboxedcls
def getfield(self, vinst, attr, llops, force_cast=False, flags={}):
if not self.is_parent and attr == self.specialfieldname:
return self.getvalue_from_unboxed(llops, vinst)
elif attr == '__class__':
return self.gettype_from_unboxed(llops, vinst)
else:
raise MissingRTypeAttribute(attr)
def rtype_type(self, hop):
[vinst] = hop.inputargs(self)
return self.gettype_from_unboxed(
hop.llops, vinst, can_be_none=hop.args_s[0].can_be_none())
def rtype_setattr(self, hop):
# only for UnboxedValue.__init__(), which is not actually called
hop.genop('UnboxedValue_setattr', [])
def ll_str(self, i):
if lltype.cast_ptr_to_int(i) & 1:
from rpython.rtyper.lltypesystem import rstr
from rpython.rtyper.rint import signed_repr
llstr1 = signed_repr.ll_str(ll_unboxed_to_int(i))
return rstr.ll_strconcat(rstr.unboxed_instance_str_prefix,
rstr.ll_strconcat(llstr1,
rstr.unboxed_instance_str_suffix))
else:
return InstanceRepr.ll_str(self, i)
def rtype_isinstance(self, hop):
if not hop.args_s[1].is_constant():
raise TyperError("isinstance() too complicated")
[classdesc] = hop.args_s[1].descriptions
classdef = classdesc.getuniqueclassdef()
class_repr = get_type_repr(self.rtyper)
instance_repr = self.common_repr()
v_obj, v_cls = hop.inputargs(instance_repr, class_repr)
cls = v_cls.value
answer = self.unboxedclassdef.issubclass(classdef)
c_answer_if_unboxed = hop.inputconst(lltype.Bool, answer)
minid = hop.inputconst(lltype.Signed, cls.subclassrange_min)
maxid = hop.inputconst(lltype.Signed, cls.subclassrange_max)
return hop.gendirectcall(ll_unboxed_isinstance_const, v_obj,
minid, maxid, c_answer_if_unboxed)
def ll_int_to_unboxed(PTRTYPE, value):
return lltype.cast_int_to_ptr(PTRTYPE, value*2+1)
def ll_unboxed_to_int(p):
return lltype.cast_ptr_to_int(p) >> 1
def ll_unboxed_getclass_canbenone(instance, class_if_unboxed):
if instance:
return ll_unboxed_getclass(instance, class_if_unboxed)
return lltype.nullptr(lltype.typeOf(instance).TO.typeptr.TO)
def ll_unboxed_getclass(instance, class_if_unboxed):
if lltype.cast_ptr_to_int(instance) & 1:
return class_if_unboxed
return instance.typeptr
def ll_unboxed_isinstance_const(obj, minid, maxid, answer_if_unboxed):
if not obj:
return False
if lltype.cast_ptr_to_int(obj) & 1:
return answer_if_unboxed
else:
return ll_issubclass_const(obj.typeptr, minid, maxid)
```
#### File: rtyper/module/ll_win32file.py
```python
from __future__ import with_statement
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rtyper.tool import rffi_platform as platform
from rpython.tool.sourcetools import func_renamer
from rpython.rlib.objectmodel import specialize
def make_win32_traits(traits):
from rpython.rlib import rwin32
if traits.str is unicode:
suffix = 'W'
else:
suffix = 'A'
class CConfig:
_compilation_info_ = ExternalCompilationInfo(
includes = ['windows.h', 'winbase.h', 'sys/stat.h'],
)
WIN32_FIND_DATA = platform.Struct(
'struct _WIN32_FIND_DATA' + suffix,
# Only interesting fields
[('dwFileAttributes', rwin32.DWORD),
('nFileSizeHigh', rwin32.DWORD),
('nFileSizeLow', rwin32.DWORD),
('ftCreationTime', rwin32.FILETIME),
('ftLastAccessTime', rwin32.FILETIME),
('ftLastWriteTime', rwin32.FILETIME),
('cFileName', lltype.FixedSizeArray(traits.CHAR, 250))])
ERROR_FILE_NOT_FOUND = platform.ConstantInteger(
'ERROR_FILE_NOT_FOUND')
ERROR_NO_MORE_FILES = platform.ConstantInteger(
'ERROR_NO_MORE_FILES')
GetFileExInfoStandard = platform.ConstantInteger(
'GetFileExInfoStandard')
FILE_ATTRIBUTE_DIRECTORY = platform.ConstantInteger(
'FILE_ATTRIBUTE_DIRECTORY')
FILE_ATTRIBUTE_READONLY = platform.ConstantInteger(
'FILE_ATTRIBUTE_READONLY')
INVALID_FILE_ATTRIBUTES = platform.ConstantInteger(
'INVALID_FILE_ATTRIBUTES')
ERROR_SHARING_VIOLATION = platform.ConstantInteger(
'ERROR_SHARING_VIOLATION')
_S_IFDIR = platform.ConstantInteger('_S_IFDIR')
_S_IFREG = platform.ConstantInteger('_S_IFREG')
_S_IFCHR = platform.ConstantInteger('_S_IFCHR')
_S_IFIFO = platform.ConstantInteger('_S_IFIFO')
FILE_TYPE_UNKNOWN = platform.ConstantInteger('FILE_TYPE_UNKNOWN')
FILE_TYPE_CHAR = platform.ConstantInteger('FILE_TYPE_CHAR')
FILE_TYPE_PIPE = platform.ConstantInteger('FILE_TYPE_PIPE')
FILE_WRITE_ATTRIBUTES = platform.ConstantInteger(
'FILE_WRITE_ATTRIBUTES')
OPEN_EXISTING = platform.ConstantInteger(
'OPEN_EXISTING')
FILE_FLAG_BACKUP_SEMANTICS = platform.ConstantInteger(
'FILE_FLAG_BACKUP_SEMANTICS')
VOLUME_NAME_DOS = platform.ConstantInteger('VOLUME_NAME_DOS')
VOLUME_NAME_NT = platform.ConstantInteger('VOLUME_NAME_NT')
WIN32_FILE_ATTRIBUTE_DATA = platform.Struct(
'WIN32_FILE_ATTRIBUTE_DATA',
[('dwFileAttributes', rwin32.DWORD),
('nFileSizeHigh', rwin32.DWORD),
('nFileSizeLow', rwin32.DWORD),
('ftCreationTime', rwin32.FILETIME),
('ftLastAccessTime', rwin32.FILETIME),
('ftLastWriteTime', rwin32.FILETIME)])
BY_HANDLE_FILE_INFORMATION = platform.Struct(
'BY_HANDLE_FILE_INFORMATION',
[('dwFileAttributes', rwin32.DWORD),
('ftCreationTime', rwin32.FILETIME),
('ftLastAccessTime', rwin32.FILETIME),
('ftLastWriteTime', rwin32.FILETIME),
('dwVolumeSerialNumber', rwin32.DWORD),
('nFileSizeHigh', rwin32.DWORD),
('nFileSizeLow', rwin32.DWORD),
('nNumberOfLinks', rwin32.DWORD),
('nFileIndexHigh', rwin32.DWORD),
('nFileIndexLow', rwin32.DWORD)])
config = platform.configure(CConfig)
def external(*args, **kwargs):
kwargs['compilation_info'] = CConfig._compilation_info_
llfunc = rffi.llexternal(calling_conv='win', *args, **kwargs)
return staticmethod(llfunc)
class Win32Traits:
apisuffix = suffix
for name in '''WIN32_FIND_DATA WIN32_FILE_ATTRIBUTE_DATA BY_HANDLE_FILE_INFORMATION
GetFileExInfoStandard
FILE_ATTRIBUTE_DIRECTORY FILE_ATTRIBUTE_READONLY
INVALID_FILE_ATTRIBUTES
_S_IFDIR _S_IFREG _S_IFCHR _S_IFIFO
FILE_TYPE_UNKNOWN FILE_TYPE_CHAR FILE_TYPE_PIPE
FILE_WRITE_ATTRIBUTES OPEN_EXISTING FILE_FLAG_BACKUP_SEMANTICS
VOLUME_NAME_DOS VOLUME_NAME_NT
ERROR_FILE_NOT_FOUND ERROR_NO_MORE_FILES
ERROR_SHARING_VIOLATION
'''.split():
locals()[name] = config[name]
LPWIN32_FIND_DATA = lltype.Ptr(WIN32_FIND_DATA)
GET_FILEEX_INFO_LEVELS = rffi.ULONG # an enumeration
FindFirstFile = external('FindFirstFile' + suffix,
[traits.CCHARP, LPWIN32_FIND_DATA],
rwin32.HANDLE,
save_err=rffi.RFFI_SAVE_LASTERROR)
FindNextFile = external('FindNextFile' + suffix,
[rwin32.HANDLE, LPWIN32_FIND_DATA],
rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
FindClose = external('FindClose',
[rwin32.HANDLE],
rwin32.BOOL)
GetFileAttributes = external(
'GetFileAttributes' + suffix,
[traits.CCHARP],
rwin32.DWORD,
save_err=rffi.RFFI_SAVE_LASTERROR)
SetFileAttributes = external(
'SetFileAttributes' + suffix,
[traits.CCHARP, rwin32.DWORD],
rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
GetFileAttributesEx = external(
'GetFileAttributesEx' + suffix,
[traits.CCHARP, GET_FILEEX_INFO_LEVELS,
lltype.Ptr(WIN32_FILE_ATTRIBUTE_DATA)],
rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
GetFileInformationByHandle = external(
'GetFileInformationByHandle',
[rwin32.HANDLE, lltype.Ptr(BY_HANDLE_FILE_INFORMATION)],
rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
GetFileType = external(
'GetFileType',
[rwin32.HANDLE],
rwin32.DWORD,
save_err=rffi.RFFI_SAVE_LASTERROR)
LPSTRP = rffi.CArrayPtr(traits.CCHARP)
GetFullPathName = external(
'GetFullPathName' + suffix,
[traits.CCHARP, rwin32.DWORD,
traits.CCHARP, LPSTRP],
rwin32.DWORD,
save_err=rffi.RFFI_SAVE_LASTERROR)
GetCurrentDirectory = external(
'GetCurrentDirectory' + suffix,
[rwin32.DWORD, traits.CCHARP],
rwin32.DWORD,
save_err=rffi.RFFI_SAVE_LASTERROR)
SetCurrentDirectory = external(
'SetCurrentDirectory' + suffix,
[traits.CCHARP],
rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
CreateDirectory = external(
'CreateDirectory' + suffix,
[traits.CCHARP, rffi.VOIDP],
rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
SetEnvironmentVariable = external(
'SetEnvironmentVariable' + suffix,
[traits.CCHARP, traits.CCHARP],
rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
CreateFile = external(
'CreateFile' + apisuffix,
[traits.CCHARP, rwin32.DWORD, rwin32.DWORD,
rwin32.LPSECURITY_ATTRIBUTES, rwin32.DWORD, rwin32.DWORD,
rwin32.HANDLE],
rwin32.HANDLE,
save_err=rffi.RFFI_SAVE_LASTERROR)
DeleteFile = external(
'DeleteFile' + suffix,
[traits.CCHARP],
rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
MoveFile = external(
'MoveFile' + suffix,
[traits.CCHARP, traits.CCHARP],
rwin32.BOOL,
save_err=rffi.RFFI_SAVE_LASTERROR)
return Win32Traits
#_______________________________________________________________
# listdir
def make_listdir_impl(traits):
from rpython.rlib import rwin32
win32traits = make_win32_traits(traits)
if traits.str is unicode:
def make_listdir_mask(path):
if path and path[-1] not in (u'/', u'\\', u':'):
path += u'/'
return path + u'*.*'
def skip_listdir(name):
return name == u"." or name == u".."
else:
def make_listdir_mask(path):
if path and path[-1] not in ('/', '\\', ':'):
path += '/'
return path + '*.*'
def skip_listdir(name):
return name == "." or name == ".."
@func_renamer('listdir_llimpl_%s' % traits.str.__name__)
def listdir_llimpl(path):
mask = make_listdir_mask(path)
filedata = lltype.malloc(win32traits.WIN32_FIND_DATA, flavor='raw')
try:
result = []
hFindFile = win32traits.FindFirstFile(mask, filedata)
if hFindFile == rwin32.INVALID_HANDLE_VALUE:
error = rwin32.GetLastError_saved()
if error == win32traits.ERROR_FILE_NOT_FOUND:
return result
else:
raise WindowsError(error, "FindFirstFile failed")
while True:
name = traits.charp2str(rffi.cast(traits.CCHARP,
filedata.c_cFileName))
if not skip_listdir(name):
result.append(name)
if not win32traits.FindNextFile(hFindFile, filedata):
break
# FindNextFile sets error to ERROR_NO_MORE_FILES if
# it got to the end of the directory
error = rwin32.GetLastError_saved()
win32traits.FindClose(hFindFile)
if error == win32traits.ERROR_NO_MORE_FILES:
return result
else:
raise WindowsError(error, "FindNextFile failed")
finally:
lltype.free(filedata, flavor='raw')
return listdir_llimpl
#_______________________________________________________________
# chdir
def make_chdir_impl(traits):
from rpython.rlib import rwin32
win32traits = make_win32_traits(traits)
if traits.str is unicode:
def isUNC(path):
return path[0] == u'\\' or path[0] == u'/'
def magic_envvar(path):
return u'=' + path[0] + u':'
else:
def isUNC(path):
return path[0] == '\\' or path[0] == '/'
def magic_envvar(path):
return '=' + path[0] + ':'
@func_renamer('chdir_llimpl_%s' % traits.str.__name__)
def chdir_llimpl(path):
"""This is a reimplementation of the C library's chdir function,
but one that produces Win32 errors instead of DOS error codes.
chdir is essentially a wrapper around SetCurrentDirectory; however,
it also needs to set "magic" environment variables indicating
the per-drive current directory, which are of the form =<drive>:
"""
if not win32traits.SetCurrentDirectory(path):
raise rwin32.lastSavedWindowsError()
MAX_PATH = rwin32.MAX_PATH
assert MAX_PATH > 0
with traits.scoped_alloc_buffer(MAX_PATH) as path:
res = win32traits.GetCurrentDirectory(MAX_PATH + 1, path.raw)
if not res:
raise rwin32.lastSavedWindowsError()
res = rffi.cast(lltype.Signed, res)
assert res > 0
if res <= MAX_PATH + 1:
new_path = path.str(res)
else:
with traits.scoped_alloc_buffer(res) as path:
res = win32traits.GetCurrentDirectory(res, path.raw)
if not res:
raise rwin32.lastSavedWindowsError()
res = rffi.cast(lltype.Signed, res)
assert res > 0
new_path = path.str(res)
if isUNC(new_path):
return
if not win32traits.SetEnvironmentVariable(magic_envvar(new_path), new_path):
raise rwin32.lastSavedWindowsError()
return chdir_llimpl
#_______________________________________________________________
# chmod
def make_chmod_impl(traits):
from rpython.rlib import rwin32
win32traits = make_win32_traits(traits)
@func_renamer('chmod_llimpl_%s' % traits.str.__name__)
def chmod_llimpl(path, mode):
attr = win32traits.GetFileAttributes(path)
if attr == win32traits.INVALID_FILE_ATTRIBUTES:
raise rwin32.lastSavedWindowsError()
if mode & 0200: # _S_IWRITE
attr &= ~win32traits.FILE_ATTRIBUTE_READONLY
else:
attr |= win32traits.FILE_ATTRIBUTE_READONLY
if not win32traits.SetFileAttributes(path, attr):
raise rwin32.lastSavedWindowsError()
return chmod_llimpl
#_______________________________________________________________
# getfullpathname
def make_getfullpathname_impl(traits):
from rpython.rlib import rwin32
win32traits = make_win32_traits(traits)
@func_renamer('getfullpathname_llimpl_%s' % traits.str.__name__)
def getfullpathname_llimpl(path):
nBufferLength = rwin32.MAX_PATH + 1
lpBuffer = lltype.malloc(traits.CCHARP.TO, nBufferLength, flavor='raw')
try:
res = win32traits.GetFullPathName(
path, rffi.cast(rwin32.DWORD, nBufferLength),
lpBuffer, lltype.nullptr(win32traits.LPSTRP.TO))
if res == 0:
raise rwin32.lastSavedWindowsError("_getfullpathname failed")
result = traits.charp2str(lpBuffer)
return result
finally:
lltype.free(lpBuffer, flavor='raw')
return getfullpathname_llimpl
def make_utime_impl(traits):
from rpython.rlib import rwin32
win32traits = make_win32_traits(traits)
from rpython.rtyper.module.ll_os_stat import time_t_to_FILE_TIME
GetSystemTime = rffi.llexternal(
'GetSystemTime',
[lltype.Ptr(rwin32.SYSTEMTIME)],
lltype.Void,
calling_conv='win',
save_err=rffi.RFFI_SAVE_LASTERROR)
SystemTimeToFileTime = rffi.llexternal(
'SystemTimeToFileTime',
[lltype.Ptr(rwin32.SYSTEMTIME),
lltype.Ptr(rwin32.FILETIME)],
rwin32.BOOL,
calling_conv='win')
SetFileTime = rffi.llexternal(
'SetFileTime',
[rwin32.HANDLE,
lltype.Ptr(rwin32.FILETIME),
lltype.Ptr(rwin32.FILETIME),
lltype.Ptr(rwin32.FILETIME)],
rwin32.BOOL,
calling_conv = 'win',
save_err=rffi.RFFI_SAVE_LASTERROR)
@specialize.argtype(1)
def os_utime_llimpl(path, tp):
hFile = win32traits.CreateFile(path,
win32traits.FILE_WRITE_ATTRIBUTES, 0,
None, win32traits.OPEN_EXISTING,
win32traits.FILE_FLAG_BACKUP_SEMANTICS,
rwin32.NULL_HANDLE)
if hFile == rwin32.INVALID_HANDLE_VALUE:
raise rwin32.lastSavedWindowsError()
ctime = lltype.nullptr(rwin32.FILETIME)
atime = lltype.malloc(rwin32.FILETIME, flavor='raw')
mtime = lltype.malloc(rwin32.FILETIME, flavor='raw')
try:
if tp is None:
now = lltype.malloc(rwin32.SYSTEMTIME, flavor='raw')
try:
GetSystemTime(now)
if (not SystemTimeToFileTime(now, atime) or
not SystemTimeToFileTime(now, mtime)):
raise rwin32.lastSavedWindowsError()
finally:
lltype.free(now, flavor='raw')
else:
actime, modtime = tp
time_t_to_FILE_TIME(actime, atime)
time_t_to_FILE_TIME(modtime, mtime)
if not SetFileTime(hFile, ctime, atime, mtime):
raise rwin32.lastSavedWindowsError()
finally:
rwin32.CloseHandle(hFile)
lltype.free(atime, flavor='raw')
lltype.free(mtime, flavor='raw')
return os_utime_llimpl
```
#### File: rtyper/module/r_os_stat.py
```python
from rpython.annotator import model as annmodel
from rpython.rtyper.llannotation import lltype_to_annotation
from rpython.flowspace.model import Constant
from rpython.flowspace.operation import op
from rpython.tool.pairtype import pairtype
from rpython.rtyper.rmodel import Repr
from rpython.rtyper.rint import IntegerRepr
from rpython.rtyper.error import TyperError
from rpython.rtyper.module import ll_os_stat
class StatResultRepr(Repr):
def __init__(self, rtyper):
self.rtyper = rtyper
self.stat_fields = ll_os_stat.STAT_FIELDS
self.stat_field_indexes = {}
for i, (name, TYPE) in enumerate(self.stat_fields):
self.stat_field_indexes[name] = i
self.s_tuple = annmodel.SomeTuple([lltype_to_annotation(TYPE)
for name, TYPE in self.stat_fields])
self.r_tuple = rtyper.getrepr(self.s_tuple)
self.lowleveltype = self.r_tuple.lowleveltype
def redispatch_getfield(self, hop, index):
rtyper = self.rtyper
s_index = rtyper.annotator.bookkeeper.immutablevalue(index)
hop2 = hop.copy()
spaceop = op.getitem(hop.args_v[0], Constant(index))
spaceop.result = hop.spaceop.result
hop2.spaceop = spaceop
hop2.args_v = spaceop.args
hop2.args_s = [self.s_tuple, s_index]
hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)]
return hop2.dispatch()
def rtype_getattr(self, hop):
s_attr = hop.args_s[1]
attr = s_attr.const
try:
index = self.stat_field_indexes[attr]
except KeyError:
raise TyperError("os.stat().%s: field not available" % (attr,))
return self.redispatch_getfield(hop, index)
class __extend__(pairtype(StatResultRepr, IntegerRepr)):
def rtype_getitem((r_sta, r_int), hop):
s_int = hop.args_s[1]
index = s_int.const
return r_sta.redispatch_getfield(hop, index)
def specialize_make_stat_result(hop):
r_StatResult = hop.rtyper.getrepr(ll_os_stat.s_StatResult)
[v_result] = hop.inputargs(r_StatResult.r_tuple)
# no-op conversion from r_StatResult.r_tuple to r_StatResult
hop.exception_cannot_occur()
return v_result
class StatvfsResultRepr(Repr):
def __init__(self, rtyper):
self.rtyper = rtyper
self.statvfs_fields = ll_os_stat.STATVFS_FIELDS
self.statvfs_field_indexes = {}
for i, (name, TYPE) in enumerate(self.statvfs_fields):
self.statvfs_field_indexes[name] = i
self.s_tuple = annmodel.SomeTuple([lltype_to_annotation(TYPE)
for name, TYPE in self.statvfs_fields])
self.r_tuple = rtyper.getrepr(self.s_tuple)
self.lowleveltype = self.r_tuple.lowleveltype
def redispatch_getfield(self, hop, index):
rtyper = self.rtyper
s_index = rtyper.annotator.bookkeeper.immutablevalue(index)
hop2 = hop.copy()
spaceop = op.getitem(hop.args_v[0], Constant(index))
spaceop.result = hop.spaceop.result
hop2.spaceop = spaceop
hop2.args_v = spaceop.args
hop2.args_s = [self.s_tuple, s_index]
hop2.args_r = [self.r_tuple, rtyper.getrepr(s_index)]
return hop2.dispatch()
def rtype_getattr(self, hop):
s_attr = hop.args_s[1]
attr = s_attr.const
try:
index = self.statvfs_field_indexes[attr]
except KeyError:
raise TyperError("os.statvfs().%s: field not available" % (attr,))
return self.redispatch_getfield(hop, index)
class __extend__(pairtype(StatvfsResultRepr, IntegerRepr)):
def rtype_getitem((r_sta, r_int), hop):
s_int = hop.args_s[1]
index = s_int.const
return r_sta.redispatch_getfield(hop, index)
def specialize_make_statvfs_result(hop):
r_StatvfsResult = hop.rtyper.getrepr(ll_os_stat.s_StatvfsResult)
[v_result] = hop.inputargs(r_StatvfsResult.r_tuple)
hop.exception_cannot_occur()
return v_result
```
#### File: module/test/test_ll_strtod.py
```python
import py
from rpython.rtyper.test.tool import BaseRtypingTest
from rpython.rlib import rfloat
class TestStrtod(BaseRtypingTest):
def test_formatd(self):
for flags in [0,
rfloat.DTSF_ADD_DOT_0]:
def f(y):
return rfloat.formatd(y, 'g', 2, flags)
assert self.ll_to_string(self.interpret(f, [3.0])) == f(3.0)
```
#### File: rpython/rtyper/normalizecalls.py
```python
from rpython.annotator import model as annmodel, description
from rpython.flowspace.argument import Signature
from rpython.flowspace.model import (Variable, Constant, Block, Link,
checkgraph, FunctionGraph, SpaceOperation)
from rpython.rlib.objectmodel import ComputedIntSymbolic
from rpython.rtyper.error import TyperError
from rpython.rtyper.rmodel import getgcflavor
from rpython.tool.sourcetools import valid_identifier
def normalize_call_familes(annotator):
for callfamily in annotator.bookkeeper.pbc_maximal_call_families.infos():
if not callfamily.modified:
assert callfamily.normalized
continue
normalize_calltable(annotator, callfamily)
callfamily.normalized = True
callfamily.modified = False
def normalize_calltable(annotator, callfamily):
"""Try to normalize all rows of a table."""
nshapes = len(callfamily.calltables)
for shape, table in callfamily.calltables.items():
for row in table:
did_something = normalize_calltable_row_signature(annotator, shape,
row)
if did_something:
assert not callfamily.normalized, "change in call family normalisation"
if nshapes != 1:
raise_call_table_too_complex_error(callfamily, annotator)
while True:
progress = False
for shape, table in callfamily.calltables.items():
for row in table:
progress |= normalize_calltable_row_annotation(annotator,
row.values())
if not progress:
return # done
assert not callfamily.normalized, "change in call family normalisation"
def raise_call_table_too_complex_error(callfamily, annotator):
msg = []
items = callfamily.calltables.items()
for i, (shape1, table1) in enumerate(items):
for shape2, table2 in items[i + 1:]:
if shape1 == shape2:
continue
row1 = table1[0]
row2 = table2[0]
problematic_function_graphs = set(row1.values()).union(set(row2.values()))
pfg = [str(graph) for graph in problematic_function_graphs]
pfg.sort()
msg.append("the following functions:")
msg.append(" %s" % ("\n ".join(pfg), ))
msg.append("are called with inconsistent numbers of arguments")
msg.append("(and/or the argument names are different, which is"
" not supported in this case)")
if shape1[0] != shape2[0]:
msg.append("sometimes with %s arguments, sometimes with %s" % (shape1[0], shape2[0]))
else:
pass # XXX better message in this case
callers = []
msg.append("the callers of these functions are:")
for tag, (caller, callee) in annotator.translator.callgraph.iteritems():
if callee not in problematic_function_graphs:
continue
if str(caller) in callers:
continue
callers.append(str(caller))
callers.sort()
for caller in callers:
msg.append(" %s" % (caller, ))
raise TyperError("\n".join(msg))
def normalize_calltable_row_signature(annotator, shape, row):
graphs = row.values()
assert graphs, "no graph??"
sig0 = graphs[0].signature
defaults0 = graphs[0].defaults
for graph in graphs[1:]:
if graph.signature != sig0:
break
if graph.defaults != defaults0:
break
else:
return False # nothing to do, all signatures already match
shape_cnt, shape_keys, shape_star = shape
assert not shape_star, "should have been removed at this stage"
# for the first 'shape_cnt' arguments we need to generalize to
# a common type
call_nbargs = shape_cnt + len(shape_keys)
did_something = False
for graph in graphs:
argnames, varargname, kwargname = graph.signature
assert not varargname, "XXX not implemented"
assert not kwargname, "XXX not implemented" # ?
inputargs_s = [annotator.binding(v) for v in graph.getargs()]
argorder = range(shape_cnt)
for key in shape_keys:
i = list(argnames).index(key)
assert i not in argorder
argorder.append(i)
need_reordering = (argorder != range(call_nbargs))
if need_reordering or len(graph.getargs()) != call_nbargs:
oldblock = graph.startblock
inlist = []
defaults = graph.defaults or ()
num_nondefaults = len(inputargs_s) - len(defaults)
defaults = [description.NODEFAULT] * num_nondefaults + list(defaults)
newdefaults = []
for j in argorder:
v = Variable(graph.getargs()[j])
annotator.setbinding(v, inputargs_s[j])
inlist.append(v)
newdefaults.append(defaults[j])
newblock = Block(inlist)
# prepare the output args of newblock:
# 1. collect the positional arguments
outlist = inlist[:shape_cnt]
# 2. add defaults and keywords
for j in range(shape_cnt, len(inputargs_s)):
try:
i = argorder.index(j)
v = inlist[i]
except ValueError:
default = defaults[j]
if default is description.NODEFAULT:
raise TyperError(
"call pattern has %d positional arguments, "
"but %r takes at least %d arguments" % (
shape_cnt, graph.name, num_nondefaults))
v = Constant(default)
outlist.append(v)
newblock.closeblock(Link(outlist, oldblock))
graph.startblock = newblock
for i in range(len(newdefaults)-1,-1,-1):
if newdefaults[i] is description.NODEFAULT:
newdefaults = newdefaults[i:]
break
graph.defaults = tuple(newdefaults)
graph.signature = Signature([argnames[j] for j in argorder],
None, None)
# finished
checkgraph(graph)
annotator.annotated[newblock] = annotator.annotated[oldblock]
did_something = True
return did_something
def normalize_calltable_row_annotation(annotator, graphs):
if len(graphs) <= 1:
return False # nothing to do
graph_bindings = {}
for graph in graphs:
graph_bindings[graph] = [annotator.binding(v)
for v in graph.getargs()]
iterbindings = graph_bindings.itervalues()
nbargs = len(iterbindings.next())
for binding in iterbindings:
assert len(binding) == nbargs
generalizedargs = []
for i in range(nbargs):
args_s = []
for graph, bindings in graph_bindings.items():
args_s.append(bindings[i])
s_value = annmodel.unionof(*args_s)
generalizedargs.append(s_value)
result_s = [annotator.binding(graph.getreturnvar())
for graph in graph_bindings]
generalizedresult = annmodel.unionof(*result_s)
conversion = False
for graph in graphs:
bindings = graph_bindings[graph]
need_conversion = (generalizedargs != bindings)
if need_conversion:
conversion = True
oldblock = graph.startblock
inlist = []
for j, s_value in enumerate(generalizedargs):
v = Variable(graph.getargs()[j])
annotator.setbinding(v, s_value)
inlist.append(v)
newblock = Block(inlist)
# prepare the output args of newblock and link
outlist = inlist[:]
newblock.closeblock(Link(outlist, oldblock))
graph.startblock = newblock
# finished
checkgraph(graph)
annotator.annotated[newblock] = annotator.annotated[oldblock]
# convert the return value too
if annotator.binding(graph.getreturnvar()) != generalizedresult:
conversion = True
annotator.setbinding(graph.getreturnvar(), generalizedresult)
return conversion
# ____________________________________________________________
def merge_classpbc_getattr_into_classdef(annotator):
# code like 'some_class.attr' will record an attribute access in the
# PBC access set of the family of classes of 'some_class'. If the classes
# have corresponding ClassDefs, they are not updated by the annotator.
# We have to do it now.
all_families = annotator.bookkeeper.classpbc_attr_families
for attrname, access_sets in all_families.items():
for access_set in access_sets.infos():
descs = access_set.descs
if len(descs) <= 1:
continue
if not isinstance(descs.iterkeys().next(), description.ClassDesc):
continue
classdefs = [desc.getuniqueclassdef() for desc in descs]
commonbase = classdefs[0]
for cdef in classdefs[1:]:
commonbase = commonbase.commonbase(cdef)
if commonbase is None:
raise TyperError("reading attribute %r: no common base "
"class for %r" % (attrname, descs.keys()))
extra_access_sets = commonbase.extra_access_sets
if commonbase.repr is not None:
assert access_set in extra_access_sets # minimal sanity check
continue
access_set.commonbase = commonbase
if access_set not in extra_access_sets:
counter = len(extra_access_sets)
extra_access_sets[access_set] = attrname, counter
# ____________________________________________________________
def create_class_constructors(annotator):
bk = annotator.bookkeeper
call_families = bk.pbc_maximal_call_families
for family in call_families.infos():
if len(family.descs) <= 1:
continue
descs = family.descs.keys()
if not isinstance(descs[0], description.ClassDesc):
continue
# Note that if classes are in the same callfamily, their __init__
# attribute must be in the same attrfamily as well.
change = descs[0].mergeattrfamilies(descs[1:], '__init__')
if hasattr(descs[0].getuniqueclassdef(), 'my_instantiate_graph'):
assert not change, "after the fact change to a family of classes" # minimal sanity check
continue
# Put __init__ into the attr family, for ClassesPBCRepr.call()
attrfamily = descs[0].getattrfamily('__init__')
inits_s = [desc.s_read_attribute('__init__') for desc in descs]
s_value = annmodel.unionof(attrfamily.s_value, *inits_s)
attrfamily.s_value = s_value
# ClassesPBCRepr.call() will also need instantiate() support
for desc in descs:
bk.needs_generic_instantiate[desc.getuniqueclassdef()] = True
# ____________________________________________________________
def create_instantiate_functions(annotator):
# build the 'instantiate() -> instance of C' functions for the vtables
needs_generic_instantiate = annotator.bookkeeper.needs_generic_instantiate
for classdef in needs_generic_instantiate:
assert getgcflavor(classdef) == 'gc' # only gc-case
create_instantiate_function(annotator, classdef)
def create_instantiate_function(annotator, classdef):
# build the graph of a function that looks like
#
# def my_instantiate():
# return instantiate(cls)
#
if hasattr(classdef, 'my_instantiate_graph'):
return
v = Variable()
block = Block([])
block.operations.append(SpaceOperation('instantiate1', [], v))
name = valid_identifier('instantiate_' + classdef.name)
graph = FunctionGraph(name, block)
block.closeblock(Link([v], graph.returnblock))
annotator.setbinding(v, annmodel.SomeInstance(classdef))
annotator.annotated[block] = graph
# force the result to be converted to a generic OBJECTPTR
generalizedresult = annmodel.SomeInstance(classdef=None)
annotator.setbinding(graph.getreturnvar(), generalizedresult)
classdef.my_instantiate_graph = graph
annotator.translator.graphs.append(graph)
# ____________________________________________________________
class TooLateForNewSubclass(Exception):
pass
class TotalOrderSymbolic(ComputedIntSymbolic):
def __init__(self, orderwitness, peers):
self.orderwitness = orderwitness
self.peers = peers
self.value = None
self._with_subclasses = None # unknown
peers.append(self)
def __cmp__(self, other):
if not isinstance(other, TotalOrderSymbolic):
return cmp(self.compute_fn(), other)
else:
return cmp(self.orderwitness, other.orderwitness)
# support for implementing int_between: (a<=b<c) with (b-a<c-a)
# see rpython.jit.metainterp.pyjitpl.opimpl_int_between
def __sub__(self, other):
return self.compute_fn() - other
def __rsub__(self, other):
return other - self.compute_fn()
def check_any_subclass_in_peer_list(self, i):
# check if the next peer, in order, is or not the end
# marker for this start marker
assert self.peers[i] is self
return self.peers[i + 1].orderwitness != self.orderwitness + [MAX]
def number_with_subclasses(self):
# Return True or False depending on whether this is the
# subclassrange_min corresponding to a class which has subclasses
# or not. If this is called and returns False, then adding later
# new subclasses will crash in compute_fn().
if self._with_subclasses is None: # unknown so far
self.peers.sort()
i = self.peers.index(self)
self._with_subclasses = self.check_any_subclass_in_peer_list(i)
return self._with_subclasses
def compute_fn(self):
if self.value is None:
self.peers.sort()
for i, peer in enumerate(self.peers):
assert peer.value is None or peer.value == i
peer.value = i
#
if peer._with_subclasses is False:
if peer.check_any_subclass_in_peer_list(i):
raise TooLateForNewSubclass
#
assert self.value is not None
return self.value
def dump(self, annotator): # for debugging
self.peers.sort()
mapping = {}
for classdef in annotator.bookkeeper.classdefs:
if hasattr(classdef, '_unique_cdef_id'):
mapping[classdef._unique_cdef_id] = classdef
for peer in self.peers:
if peer is self:
print '==>',
else:
print ' ',
print 'value %4s --' % (peer.value,), peer.orderwitness,
if peer.orderwitness[-1] in mapping:
print mapping[peer.orderwitness[-1]]
else:
print
def assign_inheritance_ids(annotator):
# we sort the classes by lexicographic order of reversed(mro),
# which gives a nice depth-first order. The classes are turned
# into numbers in order to (1) help determinism, (2) ensure that
# new hierarchies of classes with no common base classes can be
# added later and get higher numbers.
bk = annotator.bookkeeper
try:
lst = bk._inheritance_id_symbolics
except AttributeError:
lst = bk._inheritance_id_symbolics = []
for classdef in annotator.bookkeeper.classdefs:
if not hasattr(classdef, 'minid'):
witness = [get_unique_cdef_id(cdef) for cdef in classdef.getmro()]
witness.reverse()
classdef.minid = TotalOrderSymbolic(witness, lst)
classdef.maxid = TotalOrderSymbolic(witness + [MAX], lst)
MAX = 1E100
_cdef_id_counter = 0
def get_unique_cdef_id(cdef):
global _cdef_id_counter
try:
return cdef._unique_cdef_id
except AttributeError:
cdef._unique_cdef_id = _cdef_id_counter
_cdef_id_counter += 1
return cdef._unique_cdef_id
# ____________________________________________________________
def perform_normalizations(annotator):
create_class_constructors(annotator)
annotator.frozen += 1
try:
normalize_call_familes(annotator)
merge_classpbc_getattr_into_classdef(annotator)
assign_inheritance_ids(annotator)
finally:
annotator.frozen -= 1
create_instantiate_functions(annotator)
```
#### File: rtyper/test/test_rordereddict.py
```python
import py
try:
from collections import OrderedDict
except ImportError: # Python 2.6
py.test.skip("requires collections.OrderedDict")
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rtyper.lltypesystem import rordereddict, rstr
from rpython.rlib.rarithmetic import intmask
from rpython.rtyper.annlowlevel import llstr, hlstr
from rpython.rtyper.test.test_rdict import BaseTestRDict
from rpython.rlib import objectmodel
def get_indexes(ll_d):
return ll_d.indexes._obj.container._as_ptr()
def foreach_index(ll_d):
indexes = get_indexes(ll_d)
for i in range(len(indexes)):
yield rffi.cast(lltype.Signed, indexes[i])
def count_items(ll_d, ITEM):
c = 0
for item in foreach_index(ll_d):
if item == ITEM:
c += 1
return c
class TestRDictDirect(object):
dummykeyobj = None
dummyvalueobj = None
def _get_str_dict(self):
# STR -> lltype.Signed
DICT = rordereddict.get_ll_dict(lltype.Ptr(rstr.STR), lltype.Signed,
ll_fasthash_function=rstr.LLHelpers.ll_strhash,
ll_hash_function=rstr.LLHelpers.ll_strhash,
ll_eq_function=rstr.LLHelpers.ll_streq,
dummykeyobj=self.dummykeyobj,
dummyvalueobj=self.dummyvalueobj)
return DICT
def test_dict_creation(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
lls = llstr("abc")
rordereddict.ll_dict_setitem(ll_d, lls, 13)
assert count_items(ll_d, rordereddict.FREE) == rordereddict.DICT_INITSIZE - 1
assert rordereddict.ll_dict_getitem(ll_d, llstr("abc")) == 13
assert rordereddict.ll_dict_getitem(ll_d, lls) == 13
rordereddict.ll_dict_setitem(ll_d, lls, 42)
assert rordereddict.ll_dict_getitem(ll_d, lls) == 42
rordereddict.ll_dict_setitem(ll_d, llstr("abc"), 43)
assert rordereddict.ll_dict_getitem(ll_d, lls) == 43
def test_dict_creation_2(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
llab = llstr("ab")
llb = llstr("b")
rordereddict.ll_dict_setitem(ll_d, llab, 1)
rordereddict.ll_dict_setitem(ll_d, llb, 2)
assert rordereddict.ll_dict_getitem(ll_d, llb) == 2
def test_dict_store_get(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
for i in range(20):
for j in range(i):
assert rordereddict.ll_dict_getitem(ll_d, llstr(str(j))) == j
rordereddict.ll_dict_setitem(ll_d, llstr(str(i)), i)
assert ll_d.num_live_items == 20
for i in range(20):
assert rordereddict.ll_dict_getitem(ll_d, llstr(str(i))) == i
def test_dict_store_get_del(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
for i in range(20):
for j in range(0, i, 2):
assert rordereddict.ll_dict_getitem(ll_d, llstr(str(j))) == j
rordereddict.ll_dict_setitem(ll_d, llstr(str(i)), i)
if i % 2 != 0:
rordereddict.ll_dict_delitem(ll_d, llstr(str(i)))
assert ll_d.num_live_items == 10
for i in range(0, 20, 2):
assert rordereddict.ll_dict_getitem(ll_d, llstr(str(i))) == i
def test_dict_del_lastitem(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
py.test.raises(KeyError, rordereddict.ll_dict_delitem, ll_d, llstr("abc"))
rordereddict.ll_dict_setitem(ll_d, llstr("abc"), 13)
py.test.raises(KeyError, rordereddict.ll_dict_delitem, ll_d, llstr("def"))
rordereddict.ll_dict_delitem(ll_d, llstr("abc"))
assert count_items(ll_d, rordereddict.FREE) == rordereddict.DICT_INITSIZE - 1
assert count_items(ll_d, rordereddict.DELETED) == 1
py.test.raises(KeyError, rordereddict.ll_dict_getitem, ll_d, llstr("abc"))
def test_dict_del_not_lastitem(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("abc"), 13)
rordereddict.ll_dict_setitem(ll_d, llstr("def"), 15)
rordereddict.ll_dict_delitem(ll_d, llstr("abc"))
assert count_items(ll_d, rordereddict.FREE) == rordereddict.DICT_INITSIZE - 2
assert count_items(ll_d, rordereddict.DELETED) == 1
def test_dict_resize(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("a"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("b"), 2)
rordereddict.ll_dict_setitem(ll_d, llstr("c"), 3)
rordereddict.ll_dict_setitem(ll_d, llstr("d"), 4)
rordereddict.ll_dict_setitem(ll_d, llstr("e"), 5)
rordereddict.ll_dict_setitem(ll_d, llstr("f"), 6)
rordereddict.ll_dict_setitem(ll_d, llstr("g"), 7)
rordereddict.ll_dict_setitem(ll_d, llstr("h"), 8)
rordereddict.ll_dict_setitem(ll_d, llstr("i"), 9)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 10)
assert len(get_indexes(ll_d)) == 16
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 11)
rordereddict.ll_dict_setitem(ll_d, llstr("l"), 12)
rordereddict.ll_dict_setitem(ll_d, llstr("m"), 13)
assert len(get_indexes(ll_d)) == 64
for item in 'abcdefghijklm':
assert rordereddict.ll_dict_getitem(ll_d, llstr(item)) == ord(item) - ord('a') + 1
def test_dict_grow_cleanup(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
lls = llstr("a")
for i in range(40):
rordereddict.ll_dict_setitem(ll_d, lls, i)
rordereddict.ll_dict_delitem(ll_d, lls)
assert ll_d.num_ever_used_items <= 10
def test_dict_iteration(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 2)
ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT))
ll_iter = rordereddict.ll_dictiter(ITER, ll_d)
ll_dictnext = rordereddict._ll_dictnext
num = ll_dictnext(ll_iter)
assert hlstr(ll_d.entries[num].key) == "k"
num = ll_dictnext(ll_iter)
assert hlstr(ll_d.entries[num].key) == "j"
py.test.raises(StopIteration, ll_dictnext, ll_iter)
def test_popitem(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 2)
TUP = lltype.Ptr(lltype.GcStruct('x', ('item0', lltype.Ptr(rstr.STR)),
('item1', lltype.Signed)))
ll_elem = rordereddict.ll_dict_popitem(TUP, ll_d)
assert hlstr(ll_elem.item0) == "j"
assert ll_elem.item1 == 2
ll_elem = rordereddict.ll_dict_popitem(TUP, ll_d)
assert hlstr(ll_elem.item0) == "k"
assert ll_elem.item1 == 1
py.test.raises(KeyError, rordereddict.ll_dict_popitem, TUP, ll_d)
def test_popitem_first(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 2)
rordereddict.ll_dict_setitem(ll_d, llstr("m"), 3)
ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT))
for expected in ["k", "j", "m"]:
ll_iter = rordereddict.ll_dictiter(ITER, ll_d)
num = rordereddict._ll_dictnext(ll_iter)
ll_key = ll_d.entries[num].key
assert hlstr(ll_key) == expected
rordereddict.ll_dict_delitem(ll_d, ll_key)
ll_iter = rordereddict.ll_dictiter(ITER, ll_d)
py.test.raises(StopIteration, rordereddict._ll_dictnext, ll_iter)
def test_popitem_first_bug(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 1)
rordereddict.ll_dict_delitem(ll_d, llstr("k"))
ITER = rordereddict.get_ll_dictiter(lltype.Ptr(DICT))
ll_iter = rordereddict.ll_dictiter(ITER, ll_d)
num = rordereddict._ll_dictnext(ll_iter)
ll_key = ll_d.entries[num].key
assert hlstr(ll_key) == "j"
assert ll_d.lookup_function_no == 4 # 1 free item found at the start
rordereddict.ll_dict_delitem(ll_d, llstr("j"))
assert ll_d.num_ever_used_items == 0
assert ll_d.lookup_function_no == 0 # reset
def test_direct_enter_and_del(self):
def eq(a, b):
return a == b
DICT = rordereddict.get_ll_dict(lltype.Signed, lltype.Signed,
ll_fasthash_function=intmask,
ll_hash_function=intmask,
ll_eq_function=eq)
ll_d = rordereddict.ll_newdict(DICT)
numbers = [i * rordereddict.DICT_INITSIZE + 1 for i in range(8)]
for num in numbers:
rordereddict.ll_dict_setitem(ll_d, num, 1)
rordereddict.ll_dict_delitem(ll_d, num)
for k in foreach_index(ll_d):
assert k < rordereddict.VALID_OFFSET
def test_contains(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
assert rordereddict.ll_dict_contains(ll_d, llstr("k"))
assert not rordereddict.ll_dict_contains(ll_d, llstr("j"))
def test_clear(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("l"), 1)
rordereddict.ll_dict_clear(ll_d)
assert ll_d.num_live_items == 0
def test_get(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
assert rordereddict.ll_dict_get(ll_d, llstr("k"), 32) == 1
assert rordereddict.ll_dict_get(ll_d, llstr("j"), 32) == 32
def test_setdefault(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
assert rordereddict.ll_dict_setdefault(ll_d, llstr("j"), 42) == 42
assert rordereddict.ll_dict_getitem(ll_d, llstr("j")) == 42
assert rordereddict.ll_dict_setdefault(ll_d, llstr("k"), 42) == 1
assert rordereddict.ll_dict_getitem(ll_d, llstr("k")) == 1
def test_copy(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 1)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 2)
ll_d2 = rordereddict.ll_dict_copy(ll_d)
for ll_d3 in [ll_d, ll_d2]:
assert rordereddict.ll_dict_getitem(ll_d3, llstr("k")) == 1
assert rordereddict.ll_dict_get(ll_d3, llstr("j"), 42) == 2
assert rordereddict.ll_dict_get(ll_d3, llstr("i"), 42) == 42
def test_update(self):
DICT = self._get_str_dict()
ll_d1 = rordereddict.ll_newdict(DICT)
ll_d2 = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d1, llstr("k"), 5)
rordereddict.ll_dict_setitem(ll_d1, llstr("j"), 6)
rordereddict.ll_dict_setitem(ll_d2, llstr("i"), 7)
rordereddict.ll_dict_setitem(ll_d2, llstr("k"), 8)
rordereddict.ll_dict_update(ll_d1, ll_d2)
for key, value in [("k", 8), ("i", 7), ("j", 6)]:
assert rordereddict.ll_dict_getitem(ll_d1, llstr(key)) == value
def test_pop(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 5)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 6)
assert rordereddict.ll_dict_pop(ll_d, llstr("k")) == 5
assert rordereddict.ll_dict_pop(ll_d, llstr("j")) == 6
py.test.raises(KeyError, rordereddict.ll_dict_pop, ll_d, llstr("k"))
py.test.raises(KeyError, rordereddict.ll_dict_pop, ll_d, llstr("j"))
def test_pop_default(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
rordereddict.ll_dict_setitem(ll_d, llstr("k"), 5)
rordereddict.ll_dict_setitem(ll_d, llstr("j"), 6)
assert rordereddict.ll_dict_pop_default(ll_d, llstr("k"), 42) == 5
assert rordereddict.ll_dict_pop_default(ll_d, llstr("j"), 41) == 6
assert rordereddict.ll_dict_pop_default(ll_d, llstr("k"), 40) == 40
assert rordereddict.ll_dict_pop_default(ll_d, llstr("j"), 39) == 39
def test_bug_remove_deleted_items(self):
DICT = self._get_str_dict()
ll_d = rordereddict.ll_newdict(DICT)
for i in range(15):
rordereddict.ll_dict_setitem(ll_d, llstr(chr(i)), 5)
for i in range(15):
rordereddict.ll_dict_delitem(ll_d, llstr(chr(i)))
rordereddict.ll_prepare_dict_update(ll_d, 7)
# used to get UninitializedMemoryAccess
class TestRDictDirectDummyKey(TestRDictDirect):
class dummykeyobj:
ll_dummy_value = llstr("dupa")
class TestRDictDirectDummyValue(TestRDictDirect):
class dummyvalueobj:
ll_dummy_value = -42
class TestOrderedRDict(BaseTestRDict):
@staticmethod
def newdict():
return OrderedDict()
@staticmethod
def newdict2():
return OrderedDict()
@staticmethod
def new_r_dict(myeq, myhash):
return objectmodel.r_ordereddict(myeq, myhash)
def test_two_dicts_with_different_value_types(self):
def func(i):
d1 = OrderedDict()
d1['hello'] = i + 1
d2 = OrderedDict()
d2['world'] = d1
return d2['world']['hello']
res = self.interpret(func, [5])
assert res == 6
class TestStress:
def test_stress(self):
from rpython.annotator.dictdef import DictKey, DictValue
from rpython.annotator import model as annmodel
from rpython.rtyper import rint
from rpython.rtyper.test.test_rdict import not_really_random
rodct = rordereddict
dictrepr = rodct.OrderedDictRepr(
None, rint.signed_repr, rint.signed_repr,
DictKey(None, annmodel.SomeInteger()),
DictValue(None, annmodel.SomeInteger()))
dictrepr.setup()
l_dict = rodct.ll_newdict(dictrepr.DICT)
referencetable = [None] * 400
referencelength = 0
value = 0
def complete_check():
for n, refvalue in zip(range(len(referencetable)), referencetable):
try:
gotvalue = rodct.ll_dict_getitem(l_dict, n)
except KeyError:
assert refvalue is None
else:
assert gotvalue == refvalue
for x in not_really_random():
n = int(x*100.0) # 0 <= x < 400
op = repr(x)[-1]
if op <= '2' and referencetable[n] is not None:
rodct.ll_dict_delitem(l_dict, n)
referencetable[n] = None
referencelength -= 1
elif op <= '6':
rodct.ll_dict_setitem(l_dict, n, value)
if referencetable[n] is None:
referencelength += 1
referencetable[n] = value
value += 1
else:
try:
gotvalue = rodct.ll_dict_getitem(l_dict, n)
except KeyError:
assert referencetable[n] is None
else:
assert gotvalue == referencetable[n]
if 1.38 <= x <= 1.39:
complete_check()
print 'current dict length:', referencelength
assert l_dict.num_live_items == referencelength
complete_check()
def test_stress_2(self):
yield self.stress_combination, True, False
yield self.stress_combination, False, True
yield self.stress_combination, False, False
yield self.stress_combination, True, True
def stress_combination(self, key_can_be_none, value_can_be_none):
from rpython.rtyper.lltypesystem.rstr import string_repr
from rpython.annotator.dictdef import DictKey, DictValue
from rpython.annotator import model as annmodel
from rpython.rtyper.test.test_rdict import not_really_random
rodct = rordereddict
print
print "Testing combination with can_be_None: keys %s, values %s" % (
key_can_be_none, value_can_be_none)
class PseudoRTyper:
cache_dummy_values = {}
dictrepr = rodct.OrderedDictRepr(
PseudoRTyper(), string_repr, string_repr,
DictKey(None, annmodel.SomeString(key_can_be_none)),
DictValue(None, annmodel.SomeString(value_can_be_none)))
dictrepr.setup()
print dictrepr.lowleveltype
#for key, value in dictrepr.DICTENTRY._adtmeths.items():
# print ' %s = %s' % (key, value)
l_dict = rodct.ll_newdict(dictrepr.DICT)
referencetable = [None] * 400
referencelength = 0
values = not_really_random()
keytable = [string_repr.convert_const("foo%d" % n)
for n in range(len(referencetable))]
def complete_check():
for n, refvalue in zip(range(len(referencetable)), referencetable):
try:
gotvalue = rodct.ll_dict_getitem(l_dict, keytable[n])
except KeyError:
assert refvalue is None
else:
assert gotvalue == refvalue
for x in not_really_random():
n = int(x*100.0) # 0 <= x < 400
op = repr(x)[-1]
if op <= '2' and referencetable[n] is not None:
rodct.ll_dict_delitem(l_dict, keytable[n])
referencetable[n] = None
referencelength -= 1
elif op <= '6':
ll_value = string_repr.convert_const(str(values.next()))
rodct.ll_dict_setitem(l_dict, keytable[n], ll_value)
if referencetable[n] is None:
referencelength += 1
referencetable[n] = ll_value
else:
try:
gotvalue = rodct.ll_dict_getitem(l_dict, keytable[n])
except KeyError:
assert referencetable[n] is None
else:
assert gotvalue == referencetable[n]
if 1.38 <= x <= 1.39:
complete_check()
print 'current dict length:', referencelength
assert l_dict.num_live_items == referencelength
complete_check()
```
#### File: tool/algo/regalloc.py
```python
import sys
from rpython.flowspace.model import Variable
from rpython.tool.algo.color import DependencyGraph
from rpython.tool.algo.unionfind import UnionFind
def perform_register_allocation(graph, consider_var, ListOfKind=()):
"""Perform register allocation for the Variables of the given 'kind'
in the 'graph'."""
regalloc = RegAllocator(graph, consider_var, ListOfKind)
regalloc.make_dependencies()
regalloc.coalesce_variables()
regalloc.find_node_coloring()
return regalloc
class RegAllocator(object):
DEBUG_REGALLOC = False
def __init__(self, graph, consider_var, ListOfKind):
self.graph = graph
self.consider_var = consider_var
self.ListOfKind = ListOfKind
def make_dependencies(self):
dg = DependencyGraph()
for block in self.graph.iterblocks():
# Compute die_at = {Variable: index_of_operation_with_last_usage}
die_at = dict.fromkeys(block.inputargs, 0)
for i, op in enumerate(block.operations):
for v in op.args:
if isinstance(v, Variable):
die_at[v] = i
elif isinstance(v, self.ListOfKind):
for v1 in v:
if isinstance(v1, Variable):
die_at[v1] = i
if op.result is not None:
die_at[op.result] = i + 1
if isinstance(block.exitswitch, tuple):
for x in block.exitswitch:
die_at.pop(x, None)
else:
die_at.pop(block.exitswitch, None)
for link in block.exits:
for v in link.args:
die_at.pop(v, None)
die_at = [(value, key) for (key, value) in die_at.items()]
die_at.sort()
die_at.append((sys.maxint,))
# Done. XXX the code above this line runs 3 times
# (for kind in KINDS) to produce the same result...
livevars = [v for v in block.inputargs
if self.consider_var(v)]
# Add the variables of this block to the dependency graph
for i, v in enumerate(livevars):
dg.add_node(v)
for j in range(i):
dg.add_edge(livevars[j], v)
livevars = set(livevars)
die_index = 0
for i, op in enumerate(block.operations):
while die_at[die_index][0] == i:
try:
livevars.remove(die_at[die_index][1])
except KeyError:
pass
die_index += 1
if (op.result is not None and
self.consider_var(op.result)):
dg.add_node(op.result)
for v in livevars:
if self.consider_var(v):
dg.add_edge(v, op.result)
livevars.add(op.result)
self._depgraph = dg
def coalesce_variables(self):
self._unionfind = UnionFind()
pendingblocks = list(self.graph.iterblocks())
while pendingblocks:
block = pendingblocks.pop()
# Aggressively try to coalesce each source variable with its
# target. We start from the end of the graph instead of
# from the beginning. This is a bit arbitrary, but the idea
# is that the end of the graph runs typically more often
# than the start, given that we resume execution from the
# middle during blackholing.
for link in block.exits:
if link.last_exception is not None:
self._depgraph.add_node(link.last_exception)
if link.last_exc_value is not None:
self._depgraph.add_node(link.last_exc_value)
for i, v in enumerate(link.args):
self._try_coalesce(v, link.target.inputargs[i])
def _try_coalesce(self, v, w):
if isinstance(v, Variable) and self.consider_var(v):
assert self.consider_var(w)
dg = self._depgraph
uf = self._unionfind
v0 = uf.find_rep(v)
w0 = uf.find_rep(w)
if v0 is not w0 and v0 not in dg.neighbours[w0]:
_, rep, _ = uf.union(v0, w0)
assert uf.find_rep(v0) is uf.find_rep(w0) is rep
if rep is v0:
dg.coalesce(w0, v0)
else:
assert rep is w0
dg.coalesce(v0, w0)
def find_node_coloring(self):
self._coloring = self._depgraph.find_node_coloring()
if self.DEBUG_REGALLOC:
for block in self.graph.iterblocks():
print block
for v in block.getvariables():
print '\t', v, '\t', self.getcolor(v)
def getcolor(self, v):
return self._coloring[self._unionfind.find_rep(v)]
def swapcolors(self, col1, col2):
for key, value in self._coloring.items():
if value == col1:
self._coloring[key] = col2
elif value == col2:
self._coloring[key] = col1
```
#### File: rpython/tool/ansicolor.py
```python
class colors:
black = '30'
darkred = '31'
darkgreen = '32'
brown = '33'
darkblue = '34'
purple = '35'
teal = '36'
lightgray = '37'
darkgray = '30;01'
red = '31;01'
green = '32;01'
yellow = '33;01'
blue = '34;01'
fuchsia = '35;01'
turquoise = '36;01'
white = '37;01'
def setcolor(s, color):
return '\x1b[%sm%s\x1b[00m' % (color, s)
for name in colors.__dict__:
if name.startswith('_'):
continue
exec """
def %s(s):
return setcolor(s, colors.%s)
""" % (name, name)
```
#### File: tool/jitlogparser/storage.py
```python
import py
import os
from rpython.tool.disassembler import dis
from rpython.tool.jitlogparser.module_finder import gather_all_code_objs
class LoopStorage(object):
def __init__(self, extrapath=None):
self.loops = None
self.functions = {}
self.codes = {}
self.disassembled_codes = {}
self.extrapath = extrapath
def load_code(self, fname):
try:
return self.codes[fname]
except KeyError:
if os.path.isabs(fname):
res = gather_all_code_objs(fname)
else:
if self.extrapath is None:
raise IOError("Cannot find %s" % fname)
res = gather_all_code_objs(os.path.join(self.extrapath, fname))
self.codes[fname] = res
return res
def disassemble_code(self, fname, startlineno, name):
try:
if py.path.local(fname).check(file=False):
return None # cannot find source file
except py.error.EACCES:
return None # cannot open the file
key = (fname, startlineno, name)
try:
return self.disassembled_codes[key]
except KeyError:
codeobjs = self.load_code(fname)
if (startlineno, name) not in codeobjs:
# cannot find the code obj at this line: this can happen for
# various reasons, e.g. because the .py files changed since
# the log was produced, or because the co_firstlineno
# attribute of the code object is wrong (e.g., code objects
# produced by gateway.applevel(), such as the ones found in
# nanos.py)
return None
code = codeobjs[(startlineno, name)]
res = dis(code)
self.disassembled_codes[key] = res
return res
def reconnect_loops(self, loops):
""" Re-connect loops in a way that entry bridges are filtered out
and normal bridges are associated with guards. Returning list of
normal loops.
"""
res = []
guard_dict = {}
for loop_no, loop in enumerate(loops):
for op in loop.operations:
if op.name.startswith('guard_'):
guard_dict[int(op.descr[len('<Guard0x'):-1], 16)] = (op, loop)
for loop in loops:
if loop.comment:
comment = loop.comment.strip()
if 'entry bridge' in comment:
pass
elif comment.startswith('# bridge out of'):
no = int(comment[len('# bridge out of Guard 0x'):].split(' ', 1)[0], 16)
op, parent = guard_dict[no]
op.bridge = loop
op.percentage = ((getattr(loop, 'count', 1) * 100) /
max(getattr(parent, 'count', 1), 1))
loop.no = no
continue
res.append(loop)
self.loops = res
return res
```
#### File: rpython/tool/terminal.py
```python
import sys
# The current module
MODULE = sys.modules[__name__]
COLORS = "BLUE GREEN CYAN RED MAGENTA YELLOW WHITE BLACK".split()
# List of terminal controls, you can add more to the list.
CONTROLS = {
'BOL':'cr', 'UP':'cuu1', 'DOWN':'cud1', 'LEFT':'cub1', 'RIGHT':'cuf1',
'CLEAR_SCREEN':'clear', 'CLEAR_EOL':'el', 'CLEAR_BOL':'el1',
'CLEAR_EOS':'ed', 'BOLD':'bold', 'BLINK':'blink', 'DIM':'dim',
'REVERSE':'rev', 'UNDERLINE':'smul', 'NORMAL':'sgr0',
'HIDE_CURSOR':'cinvis', 'SHOW_CURSOR':'cnorm'
}
# List of numeric capabilities
VALUES = {
'COLUMNS':'cols', # Width of the terminal (80 for unknown)
'LINES':'lines', # Height of the terminal (25 for unknown)
'MAX_COLORS': 'colors',
}
def default():
"""Set the default attribute values"""
for color in COLORS:
setattr(MODULE, color, '')
setattr(MODULE, 'BG_%s' % color, '')
for control in CONTROLS:
setattr(MODULE, control, '')
MODULE.COLUMNS = 80
MODULE.LINES = 25
MODULE.MAX_COLORS = 1
def setup():
"""Set the terminal control strings"""
# Initializing the terminal
curses.setupterm()
# Get the color escape sequence template or '' if not supported
# setab and setaf are for ANSI escape sequences
bgColorSeq = curses.tigetstr('setab') or curses.tigetstr('setb') or ''
fgColorSeq = curses.tigetstr('setaf') or curses.tigetstr('setf') or ''
for color in COLORS:
# Get the color index from curses
colorIndex = getattr(curses, 'COLOR_%s' % color)
# Set the color escape sequence after filling the template with index
setattr(MODULE, color, curses.tparm(fgColorSeq, colorIndex))
# Set background escape sequence
setattr(
MODULE, 'BG_%s' % color, curses.tparm(bgColorSeq, colorIndex)
)
for control in CONTROLS:
# Set the control escape sequence
setattr(MODULE, control, curses.tigetstr(CONTROLS[control]) or '')
if hasattr(curses, 'tigetnum'):
for value in VALUES:
# Set terminal related values
setattr(MODULE, value, curses.tigetnum(VALUES[value]))
def render(text):
"""Helper function to apply controls easily
Example:
apply("%(GREEN)s%(BOLD)stext%(NORMAL)s") -> a bold green text
"""
return text % MODULE.__dict__
try:
import curses
setup()
except Exception, e:
# There is a failure; set all attributes to default
print 'Warning: %s' % e
default()
```
#### File: translator/c/external.py
```python
from rpython.rtyper.lltypesystem.lltype import typeOf, Void
from rpython.translator.c.support import USESLOTS # set to False if necessary while refactoring
from rpython.translator.c.support import cdecl, somelettersfrom
class CExternalFunctionCodeGenerator(object):
if USESLOTS:
__slots__ = """db fnptr FUNCTYPE argtypenames resulttypename""".split()
def __init__(self, fnptr, db):
self.fnptr = fnptr
self.db = db
self.FUNCTYPE = typeOf(fnptr)
assert Void not in self.FUNCTYPE.ARGS
self.argtypenames = [db.gettype(T) for T in self.FUNCTYPE.ARGS]
self.resulttypename = db.gettype(self.FUNCTYPE.RESULT)
def graphs_to_patch(self):
return []
def name(self, cname): #virtual
return cname
def argnames(self):
return ['%s%d' % (somelettersfrom(self.argtypenames[i]), i)
for i in range(len(self.argtypenames))]
def allconstantvalues(self):
return []
def implementation_begin(self):
pass
def cfunction_declarations(self):
if self.FUNCTYPE.RESULT is not Void:
yield '%s;' % cdecl(self.resulttypename, 'result')
def cfunction_body(self):
try:
convert_params = self.fnptr.convert_params
except AttributeError:
convert_params = lambda backend, args: [arg for _,arg in args]
call = '%s(%s)' % (self.fnptr._name, ', '.join(convert_params("c", zip(self.FUNCTYPE.ARGS, self.argnames()))))
if self.FUNCTYPE.RESULT is not Void:
yield 'result = %s;' % call
yield 'if (PyErr_Occurred()) RPyConvertExceptionFromCPython();'
yield 'return result;'
else:
yield '%s;' % call
yield 'if (PyErr_Occurred()) RPyConvertExceptionFromCPython();'
def implementation_end(self):
pass
assert not USESLOTS or '__dict__' not in dir(CExternalFunctionCodeGenerator)
```
#### File: test/elf/src8.py
```python
class A:
pass
def foo(rec, a1, a2, a3, a4, a5, a6):
if rec > 0:
b = A()
foo(rec-1, b, b, b, b, b, b)
foo(rec-1, b, b, b, b, b, b)
foo(rec-1, a6, a5, a4, a3, a2, a1)
# __________ Entry point __________
def entry_point(argv):
foo(5, A(), A(), A(), A(), A(), A())
return 0
# _____ Define and setup target ___
def target(*args):
return entry_point, None
```
#### File: gcc/test/test_thread.py
```python
import py
import sys, os
from rpython.translator.c.test import test_standalone
def setup_module(module):
if sys.platform == 'win32':
if not ('mingw' in os.popen('gcc --version').read() and
'GNU' in os.popen('make --version').read()):
py.test.skip("mingw32 and MSYS are required for asmgcc on Windows")
class TestThreadedAsmGcc(test_standalone.TestThread):
gcrootfinder = 'asmgcc'
def setup_class(cls):
if sys.platform == 'win32':
from rpython.config.translationoption import get_combined_translation_config
cls.config = get_combined_translation_config(translating=True)
cls.config.translation.cc = 'mingw32'
```
#### File: translator/platform/openbsd.py
```python
import os
from rpython.translator.platform.bsd import BSD
class OpenBSD(BSD):
DEFAULT_CC = "cc"
name = "openbsd"
link_flags = os.environ.get("LDFLAGS", "").split() + ['-pthread']
cflags = ['-O3', '-pthread', '-fomit-frame-pointer', '-D_BSD_SOURCE'
] + os.environ.get("CFLAGS", "").split()
def _libs(self, libraries):
libraries=set(libraries + ("intl", "iconv"))
return ['-l%s' % lib for lib in libraries if lib not in ["crypt", "dl", "rt"]]
class OpenBSD_64(OpenBSD):
shared_only = ('-fPIC',)
``` |
{
"source": "J-PTRson/SingleCellMultiOmics",
"score": 2
} |
#### File: singlecellmultiomics/molecule/iterator.py
```python
from singlecellmultiomics.molecule import Molecule
from singlecellmultiomics.fragment import Fragment
from singlecellmultiomics.utils.prefetch import initialise_dict, initialise
from singlecellmultiomics.universalBamTagger import QueryNameFlagger
import pysamiterators.iterators
import collections
import pysam
class ReadIterator(pysamiterators.iterators.MatePairIterator):
def __next__(self):
try:
rec = next(self.iterator)
return tuple((rec, None))
except StopIteration:
raise
class MoleculeIterator():
"""Iterate over molecules in pysam.AlignmentFile or reads from a generator or list
Example:
>>> !wget https://github.com/BuysDB/SingleCellMultiOmics/blob/master/data/mini_nla_test.bam?raw=true -O mini_nla_test.bam
>>> !wget https://github.com/BuysDB/SingleCellMultiOmics/blob/master/data/mini_nla_test.bam.bai?raw=true -O mini_nla_test.bam.bai
>>> import pysam
>>> from singlecellmultiomics.molecule import NlaIIIMolecule, MoleculeIterator
>>> from singlecellmultiomics.fragment import NlaIIIFragment
>>> import pysamiterators
>>> alignments = pysam.AlignmentFile('mini_nla_test.bam')
>>> for molecule in MoleculeIterator(
>>> alignments=alignments,
>>> molecule_class=singlecellmultiomics.molecule.NlaIIIMolecule,
>>> fragment_class=singlecellmultiomics.fragment.NlaIIIFragment,
>>> ):
>>> break
>>> molecule
NlaIIIMolecule
with 1 assinged fragments
Allele :No allele assigned
Fragment:
sample:APKS1P25-NLAP2L2_57
umi:CCG
span:chr1 164834728-164834868
strand:+
has R1: yes
has R2: no
randomer trimmed: no
DS:164834865
RS:0
RZ:CAT
Restriction site:('chr1', 164834865)
It is also possible to supply and iterator instead of a SAM/BAM file handle
Example:
>>> from singlecellmultiomics.molecule import MoleculeIterator
>>> from singlecellmultiomics.fragment import Fragment
>>> import pysam
>>> # Create SAM file to write some example reads to:
>>> test_sam = pysam.AlignmentFile('test.sam','w',reference_names=['chr1','chr2'],reference_lengths=[1000,1000])
>>> read_A = pysam.AlignedSegment(test_sam.header)
>>> read_A.set_tag('SM','CELL_1')
>>> read_A.set_tag('RX','CAT')
>>> read_A.reference_name = 'chr1'
>>> read_A.reference_start = 100
>>> read_A.query_sequence = 'ATCGGG'
>>> read_A.cigarstring = '6M'
>>> read_A.mapping_quality = 60
>>> # Create a second read which is a duplicate of the previous
>>> read_B = pysam.AlignedSegment(test_sam.header)
>>> read_B.set_tag('SM','CELL_1')
>>> read_B.set_tag('RX','CAT')
>>> read_B.reference_name = 'chr1'
>>> read_B.reference_start = 100
>>> read_B.query_sequence = 'ATCGG'
>>> read_B.cigarstring = '5M'
>>> read_B.mapping_quality = 60
>>> # Create a thids read which is belonging to another cell
>>> read_C = pysam.AlignedSegment(test_sam.header)
>>> read_C.set_tag('SM','CELL_2')
>>> read_C.set_tag('RX','CAT')
>>> read_C.reference_name = 'chr1'
>>> read_C.reference_start = 100
>>> read_C.query_sequence = 'ATCGG'
>>> read_C.cigarstring = '5M'
>>> read_C.mapping_quality = 60
>>> # Set up an iterable containing the reads:
>>> reads = [ read_A,read_B,read_C ]
>>> molecules = []
>>> for molecule in MoleculeIterator( reads ):
>>> print(molecule)
Molecule
with 2 assinged fragments
Allele :No allele assigned
Fragment:
sample:CELL_1
umi:CAT
span:chr1 100-106
strand:+
has R1: yes
has R2: no
randomer trimmed: no
Fragment:
sample:CELL_1
umi:CAT
span:chr1 100-105
strand:+
has R1: yes
has R2: no
randomer trimmed: no
Molecule
with 1 assinged fragments
Allele :No allele assigned
Fragment:
sample:CELL_2
umi:CAT
span:chr1 100-105
strand:+
has R1: yes
has R2: no
randomer trimmed: no
In the next example the molecules overlapping with a single location on chromosome `'1'` position `420000` are extracted
Don't forget to supply `check_eject_every = None`, this allows non-sorted data to be passed to the MoleculeIterator.
Example:
>>> from singlecellmultiomics.bamProcessing import mate_pileup
>>> from singlecellmultiomics.molecule import MoleculeIterator
>>> with pysam.AlignmentFile('example.bam') as alignments:
>>> for molecule in MoleculeIterator(
>>> mate_pileup(alignments, contig='1', position=420000, check_eject_every=None)
>>> ):
>>> pass
Warning:
Make sure the reads being supplied to the MoleculeIterator sorted by genomic coordinate! If the reads are not sorted set `check_eject_every=None`
"""
def __init__(self, alignments, molecule_class=Molecule,
fragment_class=Fragment,
check_eject_every=10_000, # bigger sizes are very speed benificial
molecule_class_args={}, # because the relative amount of molecules
# which can be ejected will be much higher
fragment_class_args={},
perform_qflag=True,
pooling_method=1,
yield_invalid=False,
yield_overflow=True,
query_name_flagger=None,
every_fragment_as_molecule=False,
yield_secondary = False,
yield_supplementary= False,
max_buffer_size=None, #Limit the amount of stored reads, when this value is exceeded, a MemoryError is thrown
iterator_class = pysamiterators.iterators.MatePairIterator,
skip_contigs=None,
progress_callback_function=None,
min_mapping_qual = None,
perform_allele_clustering = False,
**pysamArgs):
"""Iterate over molecules in pysam.AlignmentFile
Args:
alignments (pysam.AlignmentFile) or iterable yielding tuples: Alignments to extract molecules from
molecule_class (pysam.FastaFile): Class to use for molecules.
fragment_class (pysam.FastaFile): Class to use for fragments.
check_eject_every (int): Check for yielding every N reads. When None is supplied, all reads are kept into memory making coordinate sorted data not required.
molecule_class_args (dict): arguments to pass to molecule_class.
fragment_class_args (dict): arguments to pass to fragment_class.
perform_qflag (bool): Make sure the sample/umi etc tags are copied
from the read name into bam tags
pooling_method(int) : 0: no pooling, 1: only compare molecules with the same sample id and hash
yield_invalid (bool) : When true all fragments which are invalid will be yielded as a molecule
yield_overflow(bool) : When true overflow fragments are yielded as separate molecules
query_name_flagger(class) : class which contains the method digest(self, reads) which accepts pysam.AlignedSegments and adds at least the SM and RX tags
every_fragment_as_molecule(bool): When set to true all valid fragments are emitted as molecule with one associated fragment, this is a way to disable deduplication.
yield_secondary (bool): When true all secondary alignments will be yielded as a molecule
iterator_class : Class name of class which generates mate-pairs out of a pysam.AlignmentFile either (pysamIterators.MatePairIterator or pysamIterators.MatePairIteratorIncludingNonProper)
skip_contigs (set) : Contigs to skip
min_mapping_qual(int) : Dont process reads with a mapping quality lower than this value. These reads are not yielded as molecules!
**kwargs: arguments to pass to the pysam.AlignmentFile.fetch function
Yields:
molecule (Molecule): Molecule
"""
if query_name_flagger is None:
query_name_flagger = QueryNameFlagger()
self.query_name_flagger = query_name_flagger
self.skip_contigs = skip_contigs if skip_contigs is not None else set()
self.alignments = alignments
self.molecule_class = molecule_class
self.fragment_class = fragment_class
self.check_eject_every = check_eject_every
self.molecule_class_args = initialise_dict(molecule_class_args)
self.fragment_class_args = initialise_dict(fragment_class_args)
self.perform_qflag = perform_qflag
self.pysamArgs = pysamArgs
self.matePairIterator = None
self.pooling_method = pooling_method
self.yield_invalid = yield_invalid
self.yield_overflow = yield_overflow
self.every_fragment_as_molecule = every_fragment_as_molecule
self.progress_callback_function = progress_callback_function
self.iterator_class = iterator_class
self.max_buffer_size=max_buffer_size
self.min_mapping_qual = min_mapping_qual
self.perform_allele_clustering = perform_allele_clustering
self._clear_cache()
def _clear_cache(self):
"""Clear cache containing non yielded molecules"""
self.waiting_fragments = 0
self.yielded_fragments = 0
self.deleted_fragments = 0
self.check_ejection_iter = 0
if self.pooling_method == 0:
self.molecules = []
elif self.pooling_method == 1:
self.molecules_per_cell = collections.defaultdict(
list) # {hash:[], :}
else:
raise NotImplementedError()
def __repr__(self):
return f"""Molecule Iterator, generates fragments from {self.fragment_class} into molecules based on {self.molecule_class}.
Yielded {self.yielded_fragments} fragments, {self.waiting_fragments} fragments are waiting to be ejected. {self.deleted_fragments} fragments rejected.
{self.get_molecule_cache_size()} molecules cached.
Mate pair iterator: {str(self.matePairIterator)}"""
def get_molecule_cache_size(self):
if self.pooling_method == 0:
return len(self.molecules)
elif self.pooling_method == 1:
return sum(len(cell_molecules) for cell,
cell_molecules in self.molecules_per_cell.items())
else:
raise NotImplementedError()
def yield_func(self, molecule_to_be_emitted):
if self.perform_allele_clustering:
if molecule_to_be_emitted.can_be_split_into_allele_molecules:
new_molecules = molecule_to_be_emitted.split_into_allele_molecules()
if len(new_molecules)>1:
yield from new_molecules
else:
yield molecule_to_be_emitted
else:
yield molecule_to_be_emitted
else:
yield molecule_to_be_emitted
def __iter__(self):
if self.perform_qflag:
qf = self.query_name_flagger
self._clear_cache()
self.waiting_fragments = 0
# prepare the source iterator which generates the read pairs:
if isinstance(self.alignments, pysam.libcalignmentfile.AlignmentFile):
self.matePairIterator = self.iterator_class(
self.alignments,
performProperPairCheck=False,
**self.pysamArgs)
else:
# If an iterable is provided use this as read source:
self.matePairIterator = self.alignments
for iteration,reads in enumerate(self.matePairIterator):
if self.progress_callback_function is not None and iteration%500==0:
self.progress_callback_function(iteration, self, reads)
if isinstance(reads, pysam.AlignedSegment):
R1 = reads
R2 = None
elif len(reads) == 2:
R1, R2 = reads
elif (isinstance(reads, list) or isinstance(reads, tuple)) and len(reads) == 1:
R1 = reads[0]
R2 = None
else:
raise ValueError(
'Iterable not understood, supply either pysam.AlignedSegment or lists of pysam.AlignedSegment')
# skip_contigs
if len(self.skip_contigs)>0:
keep = False
for read in reads:
if read is not None and read.reference_name not in self.skip_contigs:
keep = True
if not keep:
continue
if self.min_mapping_qual is not None:
keep = True
for read in reads:
if read is not None and read.mapping_quality<self.min_mapping_qual:
self.deleted_fragments+=1
keep=False
if not keep:
continue
# Make sure the sample/umi etc tags are placed:
if self.perform_qflag:
qf.digest([R1, R2])
fragment = self.fragment_class([R1, R2], **self.fragment_class_args)
if not fragment.is_valid():
if self.yield_invalid:
m = self.molecule_class(
fragment, **self.molecule_class_args)
m.__finalise__()
yield m
else:
self.deleted_fragments+=1
continue
if self.every_fragment_as_molecule:
m = self.molecule_class(fragment, **self.molecule_class_args)
m.__finalise__()
yield m
continue
added = False
try:
if self.pooling_method == 0:
for molecule in self.molecules:
if molecule.add_fragment(fragment, use_hash=False):
added = True
break
elif self.pooling_method == 1:
for molecule in self.molecules_per_cell[fragment.match_hash]:
if molecule.add_fragment(fragment, use_hash=True):
added = True
break
except OverflowError:
# This means the fragment does belong to a molecule, but the molecule does not accept any more fragments.
if self.yield_overflow:
m = self.molecule_class(fragment, **self.molecule_class_args)
m.set_rejection_reason('overflow')
m.__finalise__()
yield from self.yield_func(m)
else:
self.deleted_fragments+=1
continue
if not added:
if self.pooling_method == 0:
self.molecules.append(self.molecule_class(
fragment, **self.molecule_class_args))
else:
self.molecules_per_cell[fragment.match_hash].append(
self.molecule_class(fragment, **self.molecule_class_args)
)
self.waiting_fragments += 1
self.check_ejection_iter += 1
if self.max_buffer_size is not None and self.waiting_fragments>self.max_buffer_size:
raise MemoryError(f'max_buffer_size exceeded with {self.waiting_fragments} waiting fragments')
if self.check_eject_every is not None and self.check_ejection_iter > self.check_eject_every:
current_chrom, _, current_position = fragment.get_span()
if current_chrom is None:
continue
self.check_ejection_iter = 0
if self.pooling_method == 0:
to_pop = []
for i, m in enumerate(self.molecules):
if m.can_be_yielded(current_chrom, current_position):
to_pop.append(i)
self.waiting_fragments -= len(m)
self.yielded_fragments += len(m)
for i, j in enumerate(to_pop):
m = self.molecules.pop(i - j)
m.__finalise__()
yield from self.yield_func(m)
else:
for hash_group, molecules in self.molecules_per_cell.items():
to_pop = []
for i, m in enumerate(molecules):
if m.can_be_yielded(
current_chrom, current_position):
to_pop.append(i)
self.waiting_fragments -= len(m)
self.yielded_fragments += len(m)
for i, j in enumerate(to_pop):
m = self.molecules_per_cell[hash_group].pop(i - j)
m.__finalise__()
yield from self.yield_func(m)
# Yield remains
if self.pooling_method == 0:
for m in self.molecules:
m.__finalise__()
yield from self.yield_func(m)
#yield from iter(self.molecules)
else:
for hash_group, molecules in self.molecules_per_cell.items():
for i, m in enumerate(molecules):
m.__finalise__()
yield from self.yield_func(m)
self._clear_cache()
```
#### File: singlecellmultiomics/utils/pandas.py
```python
import pandas as pd
import numpy as np
import seaborn as sns
import warnings
def createRowColorDataFrame( discreteStatesDataFrame, nanColor =(0,0,0), predeterminedColorMapping={} ):
""" Create color dataframe for use with seaborn clustermap
Args:
discreteStatesDataFrame (pd.DataFrame) : Dataframe containing the data to convert to colors, like: pd.DataFrame( [['A','x'],['A','y']],index=['A','B'], columns=['First', 'Second'] )
nanColor(tuple) : Color for records having an NAN
predeterminedColorMapping(dict) : Supply class colors here (optional)
Returns:
discreteColorMatrix (pd.DataFrame) : Dataframe to pass to seaborn clustermap row_colors, or col_colors
luts (dict) : class->color mapping
"""
# Should look like:
# discreteStatesDataFrame = pd.DataFrame( [['A','x'],['A','y']],index=['A','B'], columns=['First', 'Second'] )
colorMatrix = []
luts = {}
for column in discreteStatesDataFrame:
states = [x for x in discreteStatesDataFrame[column].unique() if not pd.isnull(x)]
undeterminedColorStates = [x for x in discreteStatesDataFrame[column].unique() if not pd.isnull(x) and not x in predeterminedColorMapping]
cols = sns.color_palette('hls',len(undeterminedColorStates))
#lut = { i:sns.color_palette('bright').jet(x) for i,x in zip(states, np.linspace(0,1,len(states)) )}
lut = { state:cols[i] for i,state in enumerate(undeterminedColorStates) }
lut.update({key:value for key,value in predeterminedColorMapping.items() if key in states})
lut[np.nan] = nanColor
colorMatrix.append( [ nanColor if pd.isnull(x) else lut[x] for x in discreteStatesDataFrame[column] ] )
luts[column] = lut
discreteColorMatrix = pd.DataFrame(colorMatrix, index=discreteStatesDataFrame.columns, columns=discreteStatesDataFrame.index ).transpose()
return discreteColorMatrix, luts
def tordist(x1: float, x2: float, wrap_dist: float ) -> float:
"""Calculate the toroidial distance between two scalars
Args:
x1(float) : first datapoint
x2(float) : second datapoint
wrap_dist(float) : wrapping distance (highest value), values higher than this will wrap around to zero
Returns:
distance(float) : toroidial distance between x1 and x2, wrapping around wrap_dist
"""
dx = abs(x2 - x1)
if dx>wrap_dist*0.5:
return wrap_dist-dx
else:
return dx
def tor_resample(x: np.array, y: np.array, window_radius: float, max_tp: float,n:int=100) -> pd.Series:
""" Toroidal resample a set of coordinates x,y, where x is a set of timepoints into a new set of coordinates from zero to max_tp with n steps. Uses a sliding mean."""
interp = {}
s = pd.Series(y,index=x)
warnings.simplefilter("ignore")
for tp in np.linspace(0,max_tp, n):
selected_points = np.array([( tordist(x,tp,max_tp) <= window_radius) for x,y in s.items()])
q = s[selected_points]
mean = np.nanmean(q)
interp[tp] = mean
interp[tp-max_tp] = mean
interp[tp+max_tp] = mean
resampled = pd.Series(interp).sort_index()
return resampled.loc[0:max_tp]
```
#### File: singlecellmultiomics/utils/sequtils.py
```python
import math
from pysam import FastaFile, AlignmentFile
from singlecellmultiomics.utils.prefetch import Prefetcher
from collections import Counter
import numpy as np
from pysamiterators import CachedFasta
from array import array
class Reference(Prefetcher):
""" This is a picklable wrapper to pass reference handles """
def __init__(self):
self.args = locals().copy()
del self.args['self']
def instance(self, arg_update):
if 'self' in self.args:
del self.args['self']
clone = Reference(**self.args)
return clone
# Todo: exit statements
def prefetch(self, contig, start, end):
return FastaFile(**self.args)
def get_chromosome_number(chrom: str) -> int:
"""
Get chromosome number (index) of the supplied chromosome:
'1' -> 1, chr1 -> 1, returns -1 when not available, chrM -> -1
"""
try:
return int(chrom.replace('chr',''))
except Exception as e:
return -1
def is_autosome(chrom: str) -> bool:
""" Returns True when the chromsome is an autosomal chromsome,
not an alternative allele, mitochrondrial or sex chromosome
Args:
chrom(str) : chromosome name
Returns:
is_main(bool) : True when the chromsome is an autosome
"""
return is_main_chromosome(chrom) and get_chromosome_number(chrom)!=-1
def is_main_chromosome(chrom: str) -> bool:
""" Returns True when the chromsome is a main chromsome,
not an alternative or other
Args:
chrom(str) : chromosome name
Returns:
is_main(bool) : True when the chromsome is a main chromsome
"""
if chrom.startswith('KN') or chrom.startswith('KZ') or chrom.startswith('JH') or chrom.startswith('GL') or chrom.startswith(
'KI') or chrom.startswith('chrUn') or chrom.endswith('_random') or 'ERCC' in chrom or chrom.endswith('_alt') or "HLA-" in chrom:
return False
return True
def get_contig_list_from_fasta(fasta_path: str, with_length: bool=False) -> list:
"""Obtain list of contigs froma fasta file,
all alternative contigs are pooled into the string MISC_ALT_CONTIGS_SCMO
Args:
fasta_path (str or pysam.FastaFile) : Path or handle to fasta file
with_length(bool): return list of lengths
Returns:
contig_list (list ) : List of contigs + ['MISC_ALT_CONTIGS_SCMO'] if any alt contig is present in the fasta file
"""
contig_list = []
has_alt = False
if with_length:
lens = []
if type(fasta_path) is str:
fa = FastaFile(fasta_path)
elif type(fasta_path) is FastaFile:
fa = fasta_path
else:
raise TypeError('Supply pysam.FastaFile or str')
for reference, length in zip(fa.references, fa.lengths):
if is_main_chromosome(reference):
contig_list.append(reference)
if with_length:
lens.append(length)
else:
has_alt = True
# Close handle if we just opened one
if type(fasta_path) is str:
fa.close()
if has_alt:
contig_list.append('MISC_ALT_CONTIGS_SCMO')
if with_length:
lens.append(None)
if with_length:
return contig_list, lens
return contig_list
def phred_to_prob(phred):
"""Convert a phred score (ASCII) or integer to a numeric probability
Args:
phred (str/int) : score to convert
returns:
probability(float)
"""
try:
if isinstance(phred, int):
return math.pow(10, -(phred) / 10)
return math.pow(10, -(ord(phred) - 33) / 10)
except ValueError:
return 1
def hamming_distance(a, b):
return sum((i != j and i != 'N' and j != 'N' for i, j in zip(a, b)))
complement_translate = str.maketrans('ATCGNatcgn', 'TAGCNtagcn')
def reverse_complement(seq):
"""Obtain reverse complement of seq
returns:
reverse complement (str)
"""
return seq.translate(complement_translate)[::-1]
def complement(seq):
"""Obtain complement of seq
returns:
complement (str)
"""
return seq.translate(complement_translate)
def split_nth(seq, separator, n):
"""
Split sequence at the n-th occurence of separator
Args:
seq(str) : sequence to split
separator(str): separator to split on
n(int) : split at the n-th occurence
"""
pos = 0
for i in range(n):
pos = seq.index(separator, pos + 1)
return seq[:pos], seq[pos + 1:]
def create_MD_tag(reference_seq, query_seq):
"""Create MD tag
Args:
reference_seq (str) : reference sequence of alignment
query_seq (str) : query bases of alignment
Returns:
md_tag(str) : md description of the alignment
"""
no_change = 0
md = []
for ref_base, query_base in zip(reference_seq, query_seq):
if ref_base.upper() == query_base:
no_change += 1
else:
if no_change > 0:
md.append(str(no_change))
md.append(ref_base)
no_change = 0
if no_change > 0:
md.append(str(no_change))
return ''.join(md)
def prob_to_phred(prob: float):
"""
Convert probability of base call being correct into phred score
Values are clipped to stay within 0 to 60 phred range
Args:
prob (float): probability of base call being correct
Returns:
phred_score (byte)
"""
return np.rint(-10 * np.log10(np.clip(1-prob, 1-0.999999, 0.999999))).astype('B')
def get_context(contig: str, position: int, reference: FastaFile, ibase: str = None, k_rad: int = 1):
"""
Args:
contig: contig of the location to extract context
position: zero based position
reference: pysam.FastaFile handle or similar object which supports .fetch()
ibase: single base to inject into the middle of the context
k_rad: radius to extract
Returns:
context(str) : extracted context with length k_rad*2 + 1
"""
if ibase is not None:
ctx = reference.fetch(contig, position-k_rad, position+k_rad+1).upper()
return ctx[:k_rad]+ibase+ctx[1+k_rad:]
else:
return reference.fetch(contig, position-k_rad, position+k_rad+1).upper()
def base_probabilities_to_likelihood(probs: dict):
probs['N'] = [1-p for base, ps in probs.items() for p in ps if base != 'N' ]
return {base:np.product(v)/np.power(0.25, len(v)-1) for base,v in probs.items() }
def likelihood_to_prob(likelihoods):
total_likelihood = sum(likelihoods.values())
return {key: value / total_likelihood
for key, value in likelihoods.items()}
def phredscores_to_base_call(probs: dict):
"""
Perform base calling on a observation dictionary.
Returns N when there are multiple options with the same likelihood
Args:
probs: dictionary with confidence scores probs = {
'A':[0.95,0.99,0.9],
'T':[0.1],
}
Returns:
base(str) : Called base
phred(float) : probability of the call to be correct
"""
# Add N:
likelihood_per_base = base_probabilities_to_likelihood(probs)
total_likelihood = sum(likelihood_per_base.values())
base_probs = Counter({base:p/total_likelihood for base, p in likelihood_per_base.items() }).most_common()
# We cannot make a base call when there are no observations or when the most likely bases have the same prob
if len(base_probs) == 0 or (len(base_probs) >= 2 and base_probs[0][1] == base_probs[1][1]):
return 'N', 0
return (base_probs[0][0], base_probs[0][1])
def pick_best_base_call( *calls ) -> tuple:
""" Pick the best base-call from a list of base calls
Example:
>>> pick_best_base_call( ('A',32), ('C',22) ) )
('A', 32)
>>> pick_best_base_call( ('A',32), ('C',32) ) )
None
Args:
calls (generator) : generator/list containing tuples
Returns:
tuple (best_base, best_q) or ('N',0) when there is a tie
"""
# (q_base, quality, ...)
best_base, best_q = None, -1
tie = False
for call in calls:
if call is None:
continue
if call[1]>best_q:
best_base= call[0]
best_q=call[1]
tie=False
elif call[1]==best_q and call[0]!=best_base:
tie=True
if tie or best_base is None:
return ('N',0)
return best_base, best_q
def read_to_consensus_dict(read, start: int = None, end: int = None, only_include_refbase: str = None, skip_first_n_cycles:int = None, skip_last_n_cycles: int = None, min_phred_score: int = None):
"""
Obtain consensus calls for read, between start and end
"""
if read is None:
return dict()
return { (read.reference_name, refpos):
(read.query_sequence[qpos],
read.query_qualities[qpos],
refbase
)
for qpos, refpos, refbase in read.get_aligned_pairs(
matches_only=True,
with_seq=True)
if (start is None or refpos>=start) and \
(end is None or refpos<=end) and \
(min_phred_score is None or read.query_qualities[qpos]>=min_phred_score) and \
(skip_last_n_cycles is None or ( read.is_reverse and qpos>skip_last_n_cycles) or (not read.is_reverse and qpos<read.infer_query_length()-skip_last_n_cycles)) and \
(skip_first_n_cycles is None or ( not read.is_reverse and qpos>skip_first_n_cycles) or ( read.is_reverse and qpos<read.infer_query_length()-skip_first_n_cycles)) and \
(only_include_refbase is None or refbase.upper()==only_include_refbase)
}
def get_consensus_dictionaries(R1, R2, only_include_refbase=None, dove_safe=False, min_phred_score=None, skip_first_n_cycles_R1=None, skip_last_n_cycles_R1=None,skip_first_n_cycles_R2=None, skip_last_n_cycles_R2=None, dove_R2_distance=0, dove_R1_distance=0 ):
assert (R1 is None or R1.is_read1) and (R2 is None or R2.is_read2)
if dove_safe:
if R1 is None or R2 is None:
raise ValueError(
'Its not possible to determine a safe region when the alignment of R1 or R2 is not specified')
if R1.is_reverse and not R2.is_reverse:
start, end = R2.reference_start + dove_R2_distance, R1.reference_end - dove_R1_distance -1
elif not R1.is_reverse and R2.is_reverse:
start, end = R1.reference_start + dove_R1_distance, R2.reference_end - dove_R2_distance -1
else:
raise ValueError('This method only works for inwards facing reads')
else:
start, end = None, None
return read_to_consensus_dict(R1, start, end, only_include_refbase=only_include_refbase, skip_last_n_cycles=skip_last_n_cycles_R1, skip_first_n_cycles=skip_first_n_cycles_R1,min_phred_score=min_phred_score), \
read_to_consensus_dict(R2, start, end, only_include_refbase=only_include_refbase, skip_last_n_cycles=skip_last_n_cycles_R2, skip_first_n_cycles=skip_last_n_cycles_R2, min_phred_score=min_phred_score)
```
#### File: singlecellmultiomics/utils/submission.py
```python
import sys
import os
import re
import itertools as it
import glob
import time
import datetime
import subprocess
import distutils.spawn
import uuid
def create_job_file_paths(target_directory,job_alias=None, prefix=None, job_file_name=None):
if not os.path.exists(target_directory):
os.makedirs(target_directory)
if prefix is None:
prefix = time.strftime("%d_%m_%Y_%H_%M_%S_") + str(uuid.uuid4())
if job_file_name is None:
job_file_name = '%s-%s' % (prefix, job_alias)
jobfile = target_directory + '/%s.sh' % job_file_name
stderr = target_directory + '/%s.stderr' % job_file_name
stdout = target_directory + '/%s.stdout' % job_file_name
if prefix is None:
while os.path.exists(jobfile):
job_file_name = '%s-%s' % (time.strftime("%d_%m_%Y_%H_%M_%S"), job_alias)
jobfile = target_directory + '/%s.sh' % job_file_name
stderr = target_directory + '/%s.stderr' % job_file_name
stdout = target_directory + '/%s.stdout' % job_file_name
time.sleep(1)
else:
if os.path.exists(jobfile):
print(
"Job %s already exists. Files might become corrupted if previous job is still running" %
jobfile)
return jobfile,stderr, stdout, job_file_name
def generate_job_script(scheduler, jobfile,stderr, stdout, job_name, memory_gb, working_directory, time_h, threads_n, email, mail_when_finished=False, copy_env=True ):
if scheduler=='local':
return [f'cd {working_directory}']
if scheduler=='slurm':
jobData = [
'#!/bin/sh',
'#SBATCH -J %s' % job_name, # Sets job name
'#SBATCH -n %s' % threads_n,
'#SBATCH -N 1', # Run on a single node
'#SBATCH --time %s:00:00' % str(time_h).zfill(2),
'#SBATCH --mem %sG' % memory_gb,
'#SBATCH --chdir %s' % (working_directory),
'#SBATCH -o %s' % stdout,
'#SBATCH -e %s' % stderr
]
if email is not None:
if mail_when_finished:
raise NotImplementedError('email when finished is not implemented for slurm')
jobData.append('#SBATCH --mail-type=FAIL')
jobData.append('#SBATCH --mail-user=%s' % email)
elif scheduler=='sge':
jobData = [
'#!/bin/sh',
'#$ -S /bin/bash',
'#$ -N %s' % job_name,
'#$ -l h_rt=%s:00:00' % time_h,
'#$ -l h_vmem=%sG' % memory_gb,
# '#$ -l hostname=\'!n00[18]*\'',
'#$ -wd %s' % (working_directory),
'#$ -o %s' % stdout,
'#$ -e %s' % stderr,
'#$ -q all.q'
]
if email is not None:
jobData.append('#$ -M %s' % email)
jobData.append('#$ -m %sas' % ('e' if mail_when_finished else ''))
if copy_env:
jobData.append('#$ -V')
if threads_n > 1:
jobData.append('#$ -pe threaded %s' % threads_n)
# Make sure we land in the right directory
if working_directory is not None:
jobData.append(f'cd {working_directory}')
return jobData
def write_cmd_to_submission_file(cmd, job_data, jobfile, scheduler='sge' ):
if scheduler in ('slurm','sge','local'):
job_data.append('%s' % cmd)
else:
raise NotImplementedError()
with open(jobfile, 'w') as f:
f.write('\n'.join(job_data) + '\n')
def generate_submission_command(jobfile, hold, scheduler='sge'):
if scheduler=='slurm':
if hold is not None and len(hold)>0 and hold[0]!='none':
js = 'afterany:' + ':'.join( [f'{h.strip()}' for h in hold] )
qs = f'sbatch --dependency={js} {jobfile}'
else:
qs = 'sbatch %s' % jobfile
else:
qs = 'qsub %s %s' % ((('-hold_jid %s' % ','.join(hold))
if (hold is not None and hold[0] != 'none') else ''), jobfile)
return qs
def submit_job(command, target_directory, working_directory,
threads_n=1, memory_gb=8, time_h=8, scheduler='sge', copy_env=True,
email=None,job_alias=None, mail_when_finished=False,
hold=None,submit=True, prefix=None, job_file_name=None, job_name=None, silent=False):
"""
Submit a job
Args:
threads(int) : amount of requested threads
memory_gb(int) : amount of requested memory
scheduler(str): sge/slurm/local
hold(list): list of job depedencies
submit(bool) : perform the actual submission, when set to False only the submission script is written
Returns:
job_id(str) : id of sumbitted job
"""
qsub_available = (distutils.spawn.find_executable("qsub") is not None)
sbatch_available = (distutils.spawn.find_executable("sbatch") is not None)
if scheduler == 'auto':
if qsub_available:
scheduler = 'sge'
elif sbatch_available:
scheduler = 'slurm'
else:
scheduler = 'local'
if job_alias is None and job_name is None:
job_name = 'J%s' % str(uuid.uuid4())
# If no file was specified, we generate a file using the supplied job name
if job_file_name is None:
job_alias = job_name
if working_directory is None:
working_directory = os.getcwd()
if submit:
if scheduler=='sge' and not qsub_available:
raise ValueError('qsub is not available on the system')
if scheduler=='slurm' and not sbatch_available:
if qsub_available:
print('SBATCH is not available, but QSUB is, reverting to use QSUB')
scheduler='sge'
else:
raise ValueError('sbatch is not available on the system')
jobfile,stderr, stdout, _job_file_name = create_job_file_paths(target_directory,job_alias=job_alias,prefix=prefix,job_file_name=job_file_name)
if job_file_name is None:
job_file_name=_job_file_name
else:
if job_file_name!=_job_file_name and not silent:
print(f'Job file name changed from {job_file_name} to {_job_file_name}')
job_data = generate_job_script(scheduler=scheduler, jobfile=jobfile,
stderr=stderr, stdout=stdout,
job_name=job_name,
memory_gb=memory_gb, working_directory=working_directory,
time_h=time_h, threads_n=threads_n, email=email, mail_when_finished=mail_when_finished, copy_env= copy_env)
qs = generate_submission_command( jobfile, hold, scheduler)
write_cmd_to_submission_file(command, job_data, jobfile, scheduler)
if submit:
if scheduler=='slurm':
job_id = os.popen(qs).read().replace('Submitted batch job ','').strip()
return job_id
elif scheduler=='sge':
rd = os.popen(qs).read()
job_id = rd.split(' ')[2]
return job_id.strip()
elif scheduler=='local':
# Run the job now:
os.system(f'bash {jobfile} 2>{stderr} >{stdout}')
else:
print('# use the command below to submit your job:')
print(qs)
## ##### Dependency handling ##### ##
if __name__ == '__main__':
import argparse
username = os.getenv('USER')
defaultEmail = os.getenv('EMAIL')
qsub_available = (distutils.spawn.find_executable("qsub") is not None)
PY36ENV = os.getenv('PY36ENV')
if PY36ENV is None:
PY36ENV = 'source /hpc/hub_oudenaarden/bdebarbanson/virtualEnvironments/py36/bin/activate'
basepath = '/hpc/hub_oudenaarden/%s/cluster' % username if os.path.exists(
'/hpc/hub_oudenaarden/%s' % username) else os.path.dirname(
os.path.abspath(__file__)) + '/cluster/'
argparser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Send job to cluster')
argparser.add_argument(
'-w', help="Working directory, current dir when not supplied")
argparser.add_argument(
'-N',
type=str,
help="Job alias, slashes will be removed",
default="%s-dobby" %
(username if username is not None else 'anon'))
argparser.add_argument(
'-jp',
type=str,
help="Job file prefix, this will be the name of the file where the standard error, standard out and job script will be saved. By default it is the current time to prevent collisions. When supplying this be careful that simultanious jobs cannot share the same prefix!",
default=None)
argparser.add_argument(
'-t',
type=int,
default=1,
help="Threads, amount of CPUs requested (PE). Cluster Worker count")
argparser.add_argument(
'-time',
type=int,
default=24,
help="Runtime in hours")
argparser.add_argument('-m', type=int, default=4, help="Memory in gigs")
argparser.add_argument('-y', action="store_true", help="Submit jobs")
argparser.add_argument(
'-sched',
default='slurm',
help="scheduler: sge, slurm, local")
argparser.add_argument(
'-e',
type=str,
help="How to execute the job; submit, local",
default="submit")
argparser.add_argument(
'-hold',
type=str,
help="Wait for job(s) with this name to be finished",
default=None)
argparser.add_argument(
'-email',
type=str,
help="Send emails to this adress (by default Only kill messages)",
default=os.getenv('EMAIL'))
argparser.add_argument(
'--mf',
help="Mail when finished",
action='store_true')
argparser.add_argument(
'--nenv',
help="Do not copy current environment",
action='store_true')
argparser.add_argument(
'--py36',
help="Source python 3.6 (set PY36ENV variable to change the path)",
action='store_true')
argparser.add_argument('c', metavar='command', type=str, nargs='*')
argparser.add_argument(
'--silent',
help="Try to print less",
action='store_true')
argparser.add_argument(
'-s',
type=str,
help="Submission data storage path (stdout/stderr)",
default=os.path.abspath('./cluster'))
args = argparser.parse_args()
if args.email == 'none':
args.email = None
working_directory = args.w if args.w is not None else os.getcwd()
jid = submit_job(' '.join(args.c), job_name=args.N, target_directory=args.s,
job_file_name = args.jp,
working_directory=working_directory,
threads_n=args.t, memory_gb=args.m, time_h=args.time, scheduler=args.sched, copy_env=not args.nenv,
email=args.email, mail_when_finished=args.mf, hold=(args.hold.split(',') if args.hold is not None else None) ,submit=args.y, prefix=None)
if jid is not None:
print(jid)
``` |
{
"source": "jpty/python-eufy-security",
"score": 3
} |
#### File: python-eufy-security/tests/test_device.py
```python
import json
import aiohttp
import pytest
from eufy_security import async_login
from eufy_security.device import Device, DeviceDict
from eufy_security.types import DeviceType, ParamType
from .common import TEST_EMAIL, TEST_PASSWORD, load_fixture, load_json_fixture
def test_properties():
"""Test device properties."""
device_info = load_json_fixture("devices_list_response.json")["data"][0]
device = Device(None, device_info)
assert device.type == DeviceType.CAMERA
assert device.hardware_version == "HAIYI-IMX323"
assert device.last_camera_image_url == "https://path/to/image.jpg"
assert device.mac == "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
assert device.model == "T8111"
assert device.name == "Driveway"
assert device.serial == "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx1"
assert device.software_version == "1.9.3"
assert device.station_serial == "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
def test_update():
"""Test updating the device data."""
device_infos = load_json_fixture("devices_list_response.json")["data"]
device = Device(None, device_infos[0])
assert device.serial == "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx1"
device.update(device_infos[1])
assert device.serial == "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx2"
def test_is_camera():
"""Test the device is a camera."""
camera = Device(None, {"device_type": DeviceType.CAMERA.value})
assert camera.is_camera
station = Device(None, {"device_type": DeviceType.STATION.value})
assert not station.is_camera
def test_is_station():
"""Test the device is a station."""
station = Device(None, {"device_type": DeviceType.STATION.value})
assert station.is_station
camera = Device(None, {"device_type": DeviceType.CAMERA.value})
assert not camera.is_station
def test_is_sensor():
"""Test the device is a sensor."""
sensor = Device(None, {"device_type": DeviceType.SENSOR.value})
assert sensor.is_sensor
camera = Device(None, {"device_type": DeviceType.CAMERA.value})
assert not camera.is_sensor
def test_is_doorbell():
"""Test the device is a doorbell."""
doorbell = Device(None, {"device_type": DeviceType.DOORBELL.value})
assert doorbell.is_doorbell
camera = Device(None, {"device_type": DeviceType.CAMERA.value})
assert not camera.is_doorbell
@pytest.mark.asyncio
async def test_start_stream(aresponses, login_success_response):
"""Test starting the RTSP stream."""
aresponses.add(
"mysecurity.eufylife.com",
"/api/v1/passport/login",
"post",
aresponses.Response(text=json.dumps(login_success_response), status=200),
)
aresponses.add(
"security-app.eufylife.com",
"/v1/app/get_devs_list",
"post",
aresponses.Response(
text=load_fixture("devices_list_response.json"), status=200
),
)
aresponses.add(
"security-app.eufylife.com",
"/v1/app/get_hub_list",
"post",
aresponses.Response(text=load_fixture("hub_list_response.json"), status=200),
)
aresponses.add(
"security-app.eufylife.com",
"/v1/web/equipment/start_stream",
"post",
aresponses.Response(
text=load_fixture("start_stream_response.json"), status=200
),
)
async with aiohttp.ClientSession() as websession:
api = await async_login(TEST_EMAIL, TEST_PASSWORD, websession)
device = next(iter(api.devices.values()))
stream_url = await device.async_start_stream()
assert stream_url == "rtmp://p2p-vir-6.eufylife.com/hls/123"
@pytest.mark.asyncio
async def test_stop_stream(aresponses, login_success_response):
"""Test stopping the RTSP stream."""
aresponses.add(
"mysecurity.eufylife.com",
"/api/v1/passport/login",
"post",
aresponses.Response(text=json.dumps(login_success_response), status=200),
)
aresponses.add(
"security-app.eufylife.com",
"/v1/app/get_devs_list",
"post",
aresponses.Response(
text=load_fixture("devices_list_response.json"), status=200
),
)
aresponses.add(
"security-app.eufylife.com",
"/v1/app/get_hub_list",
"post",
aresponses.Response(text=load_fixture("hub_list_response.json"), status=200),
)
aresponses.add(
"security-app.eufylife.com",
"/v1/web/equipment/stop_stream",
"post",
aresponses.Response(text=load_fixture("stop_stream_response.json"), status=200),
)
async with aiohttp.ClientSession() as websession:
api = await async_login(TEST_EMAIL, TEST_PASSWORD, websession)
device = next(iter(api.devices.values()))
await device.async_stop_stream()
@pytest.mark.asyncio
async def test_async_update(aresponses, login_success_response):
"""Test stopping the RTSP stream."""
aresponses.add(
"mysecurity.eufylife.com",
"/api/v1/passport/login",
"post",
aresponses.Response(text=json.dumps(login_success_response), status=200),
)
aresponses.add(
"security-app.eufylife.com",
"/v1/app/get_devs_list",
"post",
aresponses.Response(
text=load_fixture("devices_list_response.json"), status=200
),
)
aresponses.add(
"security-app.eufylife.com",
"/v1/app/get_hub_list",
"post",
aresponses.Response(text=load_fixture("hub_list_response.json"), status=200),
)
aresponses.add(
"security-app.eufylife.com",
"/v1/app/get_devs_list",
"post",
aresponses.Response(
text=load_fixture("devices_list_response.json"), status=200
),
)
aresponses.add(
"security-app.eufylife.com",
"/v1/app/get_hub_list",
"post",
aresponses.Response(text=load_fixture("hub_list_response.json"), status=200),
)
async with aiohttp.ClientSession() as websession:
api = await async_login(TEST_EMAIL, TEST_PASSWORD, websession)
device = next(iter(api.devices.values()))
await device.async_update()
@pytest.mark.asyncio
async def test_set_params(aresponses, login_success_response):
"""Test setting params."""
aresponses.add(
"mysecurity.eufylife.com",
"/api/v1/passport/login",
"post",
aresponses.Response(text=json.dumps(login_success_response), status=200),
)
aresponses.add(
"security-app.eufylife.com",
"/v1/app/get_devs_list",
"post",
aresponses.Response(
text=load_fixture("devices_list_response.json"), status=200
),
)
aresponses.add(
"security-app.eufylife.com",
"/v1/app/get_hub_list",
"post",
aresponses.Response(text=load_fixture("hub_list_response.json"), status=200),
)
aresponses.add(
"security-app.eufylife.com",
"/v1/app/upload_devs_params",
"post",
aresponses.Response(
text=load_fixture("upload_devs_params_response.json"), status=200
),
)
async with aiohttp.ClientSession() as websession:
api = await async_login(TEST_EMAIL, TEST_PASSWORD, websession)
device = next(iter(api.devices.values()))
await device.async_set_params({ParamType.SNOOZE_MODE: True})
def test_device_dict_with_dict():
"""Test updating DeviceDict with a dict."""
device_infos = load_json_fixture("devices_list_response.json")["data"]
dd = DeviceDict(None)
dd.update(device_infos)
assert dd["xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx1"].name == "Driveway"
dd.update(
{
device_info["device_sn"]: {**device_info, "device_name": "Updated"}
for device_info in device_infos
}
)
assert dd["xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx1"].name == "Updated"
def test_device_dict_with_list():
"""Test updating DeviceDict with a list."""
device_infos = load_json_fixture("devices_list_response.json")["data"]
dd = DeviceDict(None)
dd.update(device_infos)
assert dd["xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx1"].name == "Driveway"
dd.update(
[{**device_info, "device_name": "Updated"} for device_info in device_infos]
)
assert dd["xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx1"].name == "Updated"
def test_device_dict_with_none():
"""Test updating DeviceDict with None."""
dd = DeviceDict(None)
with pytest.raises(TypeError):
dd.update(None)
```
#### File: python-eufy-security/tests/test_param.py
```python
from datetime import datetime, timezone
import pytest
from eufy_security.param import Param, Params
from eufy_security.types import ParamType
def test_param_init_with_supported_type():
"""Test param init with a supported param type."""
param = Param({"param_type": ParamType.CHIME_STATE.value, "param_value": "1",})
assert param.type == ParamType.CHIME_STATE
def test_param_init_with_unsupported_type():
"""Test param init with unsupported param type."""
with pytest.raises(ValueError):
Param(
{"param_type": -1, "param_value": "0",}
)
def test_param_init_with_param_type():
"""Test param init with a param type."""
param = Param(ParamType.CHIME_STATE)
assert param.type == ParamType.CHIME_STATE
assert param.param_info == {}
def test_param_hash():
"""Test param hash is the param type and id."""
param = Param({"param_type": ParamType.CHIME_STATE.value, "param_id": 1,})
assert hash(param) == hash((ParamType.CHIME_STATE.value, 1))
def test_param_equals():
"""Test param is equal to other param with the same param type and id."""
param1 = Param({"param_type": ParamType.CHIME_STATE.value, "param_id": 1,})
param2 = Param({"param_type": ParamType.CHIME_STATE.value, "param_id": 1,})
assert param1 == param2
def test_param_not_equal():
"""Test param is not equal to other param with alternate param type or id."""
param1 = Param({"param_type": ParamType.CHIME_STATE.value, "param_id": 1,})
param2 = Param({"param_type": ParamType.CHIME_STATE.value, "param_id": 2,})
param3 = Param({"param_type": ParamType.DETECT_EXPOSURE.value, "param_id": 1,})
assert param1 != param2
assert param1 != param3
def test_param_id():
"""Returns the param id."""
param = Param({"param_type": ParamType.CHIME_STATE.value, "param_id": 123,})
assert param.id == 123
def test_param_status():
"""Returns the param status."""
param1 = Param({"param_type": ParamType.CHIME_STATE.value, "status": 1,})
param2 = Param({"param_type": ParamType.CHIME_STATE.value, "status": 0,})
assert param1.status == True
assert param2.status == False
def test_param_value():
"""Test the parses param value."""
param = Param({"param_type": ParamType.CHIME_STATE.value, "param_value": "1234",})
assert param.value == 1234
def test_param_set_value():
"""Test setting the param value."""
param = Param({"param_type": ParamType.CHIME_STATE.value, "param_value": "1234",})
param.set_value(4567)
assert param.value == 4567
assert param.param_info["param_value"] == "4567"
def test_param_created():
"""Test the param created date."""
param = Param(
{"param_type": ParamType.CHIME_STATE.value, "create_time": 1565008299,}
)
assert param.created == datetime(2019, 8, 5, 12, 31, 39, tzinfo=timezone.utc)
def test_param_updated():
"""Test the param updated date."""
param = Param(
{"param_type": ParamType.CHIME_STATE.value, "update_time": 1565008299,}
)
assert param.updated == datetime(2019, 8, 5, 12, 31, 39, tzinfo=timezone.utc)
def test_params_init():
"""Test params init with a list."""
params = Params(
[
{"param_type": ParamType.CHIME_STATE.value, "param_value": "1"},
{"param_type": 0, "param_value": "0"},
{"param_type": ParamType.DETECT_EXPOSURE.value, "param_value": "1"},
]
)
assert len(params) == 2
def test_params_contains():
"""Test params contains by param or param type."""
params = Params([{"param_type": ParamType.CHIME_STATE, "param_id": 1},])
assert ParamType.CHIME_STATE in params
assert params[ParamType.CHIME_STATE] in params
assert ParamType.DETECT_EXPOSURE not in params
assert Param({"param_type": ParamType.DETECT_EXPOSURE.value}) not in params
def test_params_get_existing_item():
"""Test params get an existing item by param type."""
params = Params(
[
{"param_type": ParamType.CHIME_STATE, "param_id": 1},
{"param_type": ParamType.DETECT_EXPOSURE, "param_id": 2},
]
)
assert params[ParamType.CHIME_STATE].id == 1
assert params[ParamType.CHIME_STATE.value].id == 1
assert params[ParamType.CHIME_STATE.name].id == 1
def test_params_get_non_existing_item():
"""Test params get a non-existing item by param type."""
params = Params([{"param_type": ParamType.DETECT_EXPOSURE, "param_id": 2},])
with pytest.raises(KeyError):
params[ParamType.CHIME_STATE]
with pytest.raises(KeyError):
params[ParamType.CHIME_STATE.value]
with pytest.raises(KeyError):
params[ParamType.CHIME_STATE.name]
with pytest.raises(KeyError):
params[0]
def test_params_set_existing_item():
"""Test params updating the value of an existing param."""
params = Params(
[{"param_type": ParamType.DETECT_EXPOSURE, "param_id": 2, "param_value": "1"},]
)
params[ParamType.DETECT_EXPOSURE] = 2
assert params[ParamType.DETECT_EXPOSURE].param_info["param_value"] == "2"
params[ParamType.DETECT_EXPOSURE.name] = 3
assert params[ParamType.DETECT_EXPOSURE].param_info["param_value"] == "3"
params[ParamType.DETECT_EXPOSURE.value] = 4
assert params[ParamType.DETECT_EXPOSURE].param_info["param_value"] == "4"
def test_params_set_new_item():
"""Test params updating the value of an existing param."""
params = Params(
[{"param_type": ParamType.DETECT_EXPOSURE, "param_id": 2, "param_value": "1"},]
)
params[ParamType.CHIME_STATE] = 2
assert params[ParamType.CHIME_STATE].param_info["param_value"] == "2"
def test_params_items():
"""Test params items."""
params = Params(
[
{"param_type": ParamType.CHIME_STATE, "param_id": 1},
{"param_type": ParamType.DETECT_EXPOSURE, "param_id": 2},
]
)
items = params.items()
assert type(items) == dict
assert len(items) == 2
def test_params_update():
"""Test params updating with a dictionary."""
params = Params()
params.update({ParamType.CHIME_STATE: 1})
assert params[ParamType.CHIME_STATE].value == 1
```
#### File: python-eufy-security/tests/test_types.py
```python
import pytest
from eufy_security.types import ParamType
def test_param_type_loads():
"""Test ParamType loads."""
assert ParamType.CHIME_STATE.loads("123") == 123
assert ParamType.DETECT_SWITCH.loads('{"a": 1}') == {"a": 1}
assert ParamType.SNOOZE_MODE.loads("eyJhIjogMX0=") == {"a": 1}
def test_param_type_dumps():
"""Test ParamType dumps."""
assert ParamType.CHIME_STATE.dumps(123) == "123"
assert ParamType.DETECT_SWITCH.dumps({"a": 1}) == '{"a": 1}'
assert ParamType.SNOOZE_MODE.dumps({"a": 1}) == "eyJhIjogMX0="
def test_param_type_lookup():
"""Test ParamType lookup."""
assert ParamType.lookup(ParamType.CHIME_STATE.value) == ParamType.CHIME_STATE
assert ParamType.lookup(ParamType.CHIME_STATE.name) == ParamType.CHIME_STATE
assert ParamType.lookup(ParamType.CHIME_STATE) == ParamType.CHIME_STATE
with pytest.raises(ValueError):
ParamType.lookup(0)
``` |
{
"source": "Jpub/scTechPython",
"score": 3
} |
#### File: scTechPython/chapter4/decorator_1.py
```python
debug_trace = True
# 위의 플래그가 활성인 경우, 로그 파일을 연다
if debug_trace:
log_file = open("debug.log", "w", encoding='utf-8')
# 데코레이터 함수의 정의
def debug_log(func):
if debug_trace:
def func_and_log(*args, **kwargs):
# func을 실행하기 전에 로그 파일에 기록
log_file.write("시작 %s: %s, %s\n" %
(func.__name__, args, kwargs))
# func를 그대로 실행
r = func(*args, **kwargs)
# func가 종료되면 로그 파일에 이를 다시 기록
log_file.write("종료 %s: 리턴값 %s\n" % (func.__name__, r))
return r
return func_and_log
else:
return func # debug_trace = False면 아무 것도 바뀌지 않는다
# 데코레이터로 myFunc의 기능을 바꾼다
@debug_log
def myfunc(x):
return x+x
# 데코레이터로 변경된 myFunc를 실행
myfunc(3)
myfunc(5)
log_file.close() # 로그 파일을 닫는다
```
#### File: scTechPython/chapter5/class1.py
```python
class MyClass(object): # (1) 상속하는 클래스 없음
""" (2) 클래스의 닥스트링 """
# (3) 변수 x, y의 정의
x = 0
y = 0
def my_print(self):
self.x += 1 # x를 인스턴스마다 별도로 존재하는 변수로 다룸
MyClass.y += 1 # y를 클래스마다 존재하는 변수로 다룸
print('(x, y) = ({}, {})'.format(self.x, self.y))
# 클래스의 인스턴스를 생성
f = MyClass # (5) ()가 없으면 클래스에 별명을 붙인다는 의미
a = MyClass() # (6) MyClass 클래스의 인스턴스를 만들고 여기에 a라는 이름을 붙임
b = f() # (7) f()는 MyClass()와 같은 의미((5)에서 별명을 붙였으므로)
# (8) 메소드 실행
a.my_print()
b.my_print()
b.my_print()
``` |
{
"source": "jpudell/Beam-Stabilization",
"score": 3
} |
#### File: src/modules/utilities.py
```python
import os
from datetime import datetime
class saveClass:
"""
Class for writing and reading the reference values from the position logging file
(ConfigFileName)
Methods:
createOrOpenConfigFile
readLastConfig
writeNewCenter
"""
def __init__(self, config):
self.fn = "BeamStabilization\\"+str(config.ConfigFileName)
def createOrOpenConfigFile(self):
"""
Get the file object with previous reference positions.
Create it, if it does not exist (size == 0), write header in new file
:return: file object
"""
header = False
file = open(self.fn, 'a+')
if os.stat(self.fn).st_size == 0:
header = True
if header:
file.write('#Cam0 X\t Cam0 Y\t Cam1 X\t Cam1 Y\n')
return file
def readLastConfig(self, dataDict, line=-1):
"""
Open the file object and read the last entry (default) or the specified line
:param line: reference position
:return: bool, if it was succesfull
"""
#open(self.fn, 'a+')
try:
if os.stat(self.fn).st_size != 0:
with open(self.fn, 'r') as file:
lines = file.read().splitlines()
last_line = lines[line]
dataDict[0]["GoalPixel_X"] = float(last_line.split('\t')[0])
dataDict[0]["GoalPixel_Y"] = float(last_line.split('\t')[1])
dataDict[1]["GoalPixel_X"] = float(last_line.split('\t')[2])
dataDict[1]["GoalPixel_Y"] = float(last_line.split('\t')[3])
return True
except FileNotFoundError:
return False
def writeNewCenter(self, dataDict):
"""
write the new reference position to the position reference file.
"""
file = self.createOrOpenConfigFile()
file.write(str(dataDict[0]["Center_GaussFitX"])+'\t')
file.write(str(dataDict[0]["Center_GaussFitY"])+'\t')
file.write(str(dataDict[1]["Center_GaussFitX"])+'\t')
file.write(str(dataDict[1]["Center_GaussFitY"])+'\t')
file.write('\n')
file.close()
class Logging:
"""
Class handeling the continuous logging of the beam position
Methods:
init
createFolderAndFile
saveValues
closeFile
"""
def __init__(self, config):
self.SavingDestination = config.SavingDestination
self.createFolderAndFile()
def createFolderAndFile(self):
"""
Get the file object with previous reference positions.
Create it, if it does not exist (size == 0), write header in new file
:return: file object
"""
if not os.path.exists(self.SavingDestination+"\\Logging"):
os.makedirs(self.SavingDestination+"\\Logging")
os.chdir(self.SavingDestination+"\\Logging")
self.timeStamp = str(datetime.now().strftime("%Y%m%d_%H%M%S"))
self.file = open(str(self.timeStamp), 'a+')
self.file.write('# timeStamp\t FWHMX1\t FWHMY1\t FWHMX2\t '
'FWHMY2\t CoM_X1\t CoM_X2\t '
'CoM_Y1\tCoM_Y2\tGausscenterX1\t '
'GausscenterX2\t '
'GausscenterY1\t GausscenterY2\n')
def saveValues(self, dataDict):
"""
write line to logging file from the global dictionaries for both cameras:
time
FWHM_X, FWHM_Y
CoM_X, CoM_Y
Center_GaussFitX, Center_GaussFitY
"""
self.file.write(str(datetime.now().strftime("%Y%m%d_%H%M%S")) + '\t')
self.file.write(str(dataDict[0]["FWHM_X"])+'\t')
self.file.write(str(dataDict[0]["FWHM_Y"]) + '\t')
self.file.write(str(dataDict[1]["FWHM_X"]) + '\t')
self.file.write(str(dataDict[1]["FWHM_Y"]) + '\t')
self.file.write(str(dataDict[0]["CoM_X"]) + '\t')
self.file.write(str(dataDict[1]["CoM_X"]) + '\t')
self.file.write(str(dataDict[0]["CoM_Y"]) + '\t')
self.file.write(str(dataDict[1]["CoM_Y"]) + '\t')
self.file.write(str(dataDict[0]["Center_GaussFitX"]) + '\t')
self.file.write(str(dataDict[1]["Center_GaussFitX"]) + '\t')
self.file.write(str(dataDict[0]["Center_GaussFitY"]) + '\t')
self.file.write(str(dataDict[1]["Center_GaussFitY"]) + '\n')
def closeFile(self):
"""
close file object at the end of logging
"""
self.file.close()
``` |
{
"source": "jpuderer/GarageDoor",
"score": 2
} |
#### File: usr/sbin/garageDoorService.py
```python
import atexit
import json
import logging
import logging.handlers
import random
import RPi.GPIO as GPIO
import string
import sys
import time
import xmpp
from time import sleep
from yaml import load, dump
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
SERVER = 'gcm.googleapis.com'
PORT = 5235
USERNAME = "REDACTED_GCM_USER_ID"
PASSWORD = "<PASSWORD>"
AUTH_FILE = "/etc/garageDoorService/auth.yaml"
# Setup logging
DEFAULT_LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.handlers.SysLogHandler(address = '/dev/log'))
unacked_messages_quota = 100
send_queue = []
class StreamToLogger(object):
"""
Fake file-like stream object that redirects writes to a logger instance.
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
def flush(self):
pass
def configure_logging():
LOGGER.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter(DEFAULT_LOG_FORMAT)
ch.setFormatter(formatter)
LOGGER.addHandler(ch)
# Redirect stdout and stderr to logger
#sys.stdout = StreamToLogger(LOGGER, logging.DEBUG)
#sys.stderr = StreamToLogger(LOGGER, logging.DEBUG)
def setup_gpio():
GPIO.setmode(GPIO.BCM)
GPIO.setup(20, GPIO.OUT)
GPIO.output(20, False)
GPIO.setup(21, GPIO.OUT)
GPIO.output(21, False)
def cleanup():
GPIO.cleanup()
def isAuthorized(user, password):
# Open the auth file and load it, we do this everytime so we don't have to
# reload the service. The file shouldn't be too large in most cases
authFile = file(AUTH_FILE, 'r')
authDict = load(authFile)
if not authDict.has_key(user):
return False
if authDict[user] == password:
return True
else:
return False
def openGarageDoor():
GPIO.output(20, True)
sleep(1)
GPIO.output(20, False)
sleep(1)
# Return a random alphanumerical id
def random_id():
rid = ''
for x in range(8): rid += random.choice(string.ascii_letters + string.digits)
return rid
def message_callback(session, message):
global unacked_messages_quota
gcm = message.getTags('gcm')
if not gcm:
LOGGER.warning("Received message callback, but not a GCM message")
return
gcm_json = gcm[0].getData()
msg = json.loads(gcm_json)
if (msg.has_key('message_type') and
(msg['message_type'] == 'ack' or msg['message_type'] == 'nack')):
unacked_messages_quota += 1
return
# Ignore control messages. The only type of control message
# at the moment is CONNECTION_DRAINING, which we can't really
# do anything about, since we're not actively sending messages
# anyway
if (msg.has_key('message_type') and msg['message_type'] == 'control'):
LOGGER.info("Control message received")
return
# Ignore any messages that do not have a 'from' or 'message_id'
if not msg.has_key('from'):
LOGGER.warning("Message does not have 'from' field.")
return
if not msg.has_key('message_id'):
LOGGER.warning("Message does not have 'message_id' field.")
return
# Acknowledge the incoming message immediately.
send({'to': msg['from'],
'message_type': 'ack',
'message_id': msg['message_id']})
if not msg.has_key('data'):
LOGGER.warning("Empty request. No data.")
return
if not type(msg['data']) is dict:
LOGGER.warning("Invalid data in request.")
return
data = msg['data']
if not data.has_key('timestamp'):
LOGGER.warning("No timestamp in request.")
return
try:
timestamp = float(data['timestamp'])
except ValueError:
LOGGER.warning("Invalid timestamp in request.")
return
if ((time.time() - timestamp) > 5):
LOGGER.warning("Timestamp in request is too old. Discarding request.")
return
if not data.has_key('user') or not data.has_key('password'):
LOGGER.warning("No auth data in request.")
return
if not isAuthorized(data['user'], data['password']):
LOGGER.warning("Invalid auth (user, password) = (" +
data['user'] + ", " + data['password'] + ")")
return
# Open the garage door
LOGGER.info("Opening garage door for: " + data['user'])
openGarageDoor()
# Send an empty response to acknowledge that command was successfully
# received and processed app that sent the upstream message.
send_queue.append({'to': msg['from'],
'message_id': random_id(),
'data': {}})
flush_queued_messages()
# Sleep for ten seconds to avoid button mashing. Any other
# requests that get queued behind this one will expire before
# they do anything
sleep(10)
def disconnect_callback():
LOGGER.warning("XMPP session disconnected. Reconnecting.")
connect()
def send(json_dict):
template = ("<message><gcm xmlns='google:mobile:data'>{1}</gcm></message>")
client.send(xmpp.protocol.Message(
node=template.format(client.Bind.bound[0], json.dumps(json_dict))))
def connect():
global client
while True:
# I think there's a bug in the XMPP library where the client
# object doesn't get properly recreated after a connection failure
# so we recreated from scratch each time
client = xmpp.Client(SERVER, debug=['always'])
# Add a bit of delay here to prevent crazy fast retries
sleep(10)
LOGGER.info('Attempting to connect to GCM service.')
client.connect(server=(SERVER, PORT), secure=1, use_srv=False)
if not client.isConnected():
continue
auth = client.auth(USERNAME, PASSWORD)
if not auth:
LOGGER.error('GCM Server Authentication failed!')
else:
break
client.RegisterHandler('message', message_callback)
client.RegisterDisconnectHandler(disconnect_callback)
LOGGER.info('Connected.')
def flush_queued_messages():
global unacked_messages_quota
while len(send_queue) and unacked_messages_quota > 0:
send(send_queue.pop(0))
unacked_messages_quota -= 1
def main():
connect()
count = 0
while (True):
count += 1
# Send a space character once every 60 seconds to see if the
# connection is still alive
if count >= 60:
LOGGER.info("Sending keep-alive")
try:
client.send(' ')
except IOError, e:
LOGGER.info('Unabled to send: ' + str(e))
count = 0
try:
client.Process(1)
flush_queued_messages()
except AttributeError, e:
# I seem to get an attribute error in some cases is the client dies
# unexpectedly
LOGGER.error('Client error: '+ str(e))
time.sleep(5)
connect()
if __name__ == '__main__':
atexit.register(cleanup)
configure_logging()
setup_gpio()
main()
``` |
{
"source": "jpuerta/cqlengine",
"score": 2
} |
#### File: cqlengine/cqlengine/models.py
```python
try:
from collections import OrderedDict
except:
from cassandra.util import OrderedDict
import re
import warnings
from cqlengine import columns
from cqlengine.exceptions import ModelException, CQLEngineException, ValidationError
from cqlengine.query import ModelQuerySet, DMLQuery, AbstractQueryableColumn, NOT_SET
from cqlengine.query import DoesNotExist as _DoesNotExist
from cqlengine.query import MultipleObjectsReturned as _MultipleObjectsReturned
class ModelDefinitionException(ModelException): pass
class PolyMorphicModelException(ModelException): pass
DEFAULT_KEYSPACE = None
class UndefinedKeyspaceWarning(Warning):
pass
class hybrid_classmethod(object):
"""
Allows a method to behave as both a class method and
normal instance method depending on how it's called
"""
def __init__(self, clsmethod, instmethod):
self.clsmethod = clsmethod
self.instmethod = instmethod
def __get__(self, instance, owner):
if instance is None:
return self.clsmethod.__get__(owner, owner)
else:
return self.instmethod.__get__(instance, owner)
def __call__(self, *args, **kwargs):
"""
Just a hint to IDEs that it's ok to call this
"""
raise NotImplementedError
class QuerySetDescriptor(object):
"""
returns a fresh queryset for the given model
it's declared on everytime it's accessed
"""
def __get__(self, obj, model):
""" :rtype: ModelQuerySet """
if model.__abstract__:
raise CQLEngineException('cannot execute queries against abstract models')
queryset = model.__queryset__(model)
# if this is a concrete polymorphic model, and the polymorphic
# key is an indexed column, add a filter clause to only return
# logical rows of the proper type
if model._is_polymorphic and not model._is_polymorphic_base:
name, column = model._polymorphic_column_name, model._polymorphic_column
if column.partition_key or column.index:
# look for existing poly types
return queryset.filter(**{name: model.__polymorphic_key__})
return queryset
def __call__(self, *args, **kwargs):
"""
Just a hint to IDEs that it's ok to call this
:rtype: ModelQuerySet
"""
raise NotImplementedError
class TransactionDescriptor(object):
"""
returns a query set descriptor
"""
def __get__(self, instance, model):
if instance:
def transaction_setter(*prepared_transaction, **unprepared_transactions):
if len(prepared_transaction) > 0:
transactions = prepared_transaction[0]
else:
transactions = instance.objects.iff(**unprepared_transactions)._transaction
instance._transaction = transactions
return instance
return transaction_setter
qs = model.__queryset__(model)
def transaction_setter(**unprepared_transactions):
transactions = model.objects.iff(**unprepared_transactions)._transaction
qs._transaction = transactions
return qs
return transaction_setter
def __call__(self, *args, **kwargs):
raise NotImplementedError
class TTLDescriptor(object):
"""
returns a query set descriptor
"""
def __get__(self, instance, model):
if instance:
#instance = copy.deepcopy(instance)
# instance method
def ttl_setter(ts):
instance._ttl = ts
return instance
return ttl_setter
qs = model.__queryset__(model)
def ttl_setter(ts):
qs._ttl = ts
return qs
return ttl_setter
def __call__(self, *args, **kwargs):
raise NotImplementedError
class TimestampDescriptor(object):
"""
returns a query set descriptor with a timestamp specified
"""
def __get__(self, instance, model):
if instance:
# instance method
def timestamp_setter(ts):
instance._timestamp = ts
return instance
return timestamp_setter
return model.objects.timestamp
def __call__(self, *args, **kwargs):
raise NotImplementedError
class IfNotExistsDescriptor(object):
"""
return a query set descriptor with a if_not_exists flag specified
"""
def __get__(self, instance, model):
if instance:
# instance method
def ifnotexists_setter(ife):
instance._if_not_exists = ife
return instance
return ifnotexists_setter
return model.objects.if_not_exists
def __call__(self, *args, **kwargs):
raise NotImplementedError
class ConsistencyDescriptor(object):
"""
returns a query set descriptor if called on Class, instance if it was an instance call
"""
def __get__(self, instance, model):
if instance:
#instance = copy.deepcopy(instance)
def consistency_setter(consistency):
instance.__consistency__ = consistency
return instance
return consistency_setter
qs = model.__queryset__(model)
def consistency_setter(consistency):
qs._consistency = consistency
return qs
return consistency_setter
def __call__(self, *args, **kwargs):
raise NotImplementedError
class ColumnQueryEvaluator(AbstractQueryableColumn):
"""
Wraps a column and allows it to be used in comparator
expressions, returning query operators
ie:
Model.column == 5
"""
def __init__(self, column):
self.column = column
def __unicode__(self):
return self.column.db_field_name
def _get_column(self):
""" :rtype: ColumnQueryEvaluator """
return self.column
class ColumnDescriptor(object):
"""
Handles the reading and writing of column values to and from
a model instance's value manager, as well as creating
comparator queries
"""
def __init__(self, column):
"""
:param column:
:type column: columns.Column
:return:
"""
self.column = column
self.query_evaluator = ColumnQueryEvaluator(self.column)
def __get__(self, instance, owner):
"""
Returns either the value or column, depending
on if an instance is provided or not
:param instance: the model instance
:type instance: Model
"""
try:
return instance._values[self.column.column_name].getval()
except AttributeError as e:
return self.query_evaluator
def __set__(self, instance, value):
"""
Sets the value on an instance, raises an exception with classes
TODO: use None instance to create update statements
"""
if instance:
return instance._values[self.column.column_name].setval(value)
else:
raise AttributeError('cannot reassign column values')
def __delete__(self, instance):
"""
Sets the column value to None, if possible
"""
if instance:
if self.column.can_delete:
instance._values[self.column.column_name].delval()
else:
raise AttributeError('cannot delete {0} columns'.format(self.column.column_name))
class BaseModel(object):
"""
The base model class, don't inherit from this, inherit from Model, defined below
"""
class DoesNotExist(_DoesNotExist): pass
class MultipleObjectsReturned(_MultipleObjectsReturned): pass
objects = QuerySetDescriptor()
ttl = TTLDescriptor()
consistency = ConsistencyDescriptor()
iff = TransactionDescriptor()
# custom timestamps, see USING TIMESTAMP X
timestamp = TimestampDescriptor()
if_not_exists = IfNotExistsDescriptor()
# _len is lazily created by __len__
# table names will be generated automatically from it's model
# however, you can also define them manually here
__table_name__ = None
# the keyspace for this model
__keyspace__ = None
# polymorphism options
__polymorphic_key__ = None
# compaction options
__compaction__ = None
__compaction_tombstone_compaction_interval__ = None
__compaction_tombstone_threshold__ = None
# compaction - size tiered options
__compaction_bucket_high__ = None
__compaction_bucket_low__ = None
__compaction_max_threshold__ = None
__compaction_min_threshold__ = None
__compaction_min_sstable_size__ = None
# compaction - leveled options
__compaction_sstable_size_in_mb__ = None
# end compaction
# the queryset class used for this class
__queryset__ = ModelQuerySet
__dmlquery__ = DMLQuery
__default_ttl__ = None # default ttl value to use
__consistency__ = None # can be set per query
# Additional table properties
__bloom_filter_fp_chance__ = None
__caching__ = None
__comment__ = None
__dclocal_read_repair_chance__ = None
__default_time_to_live__ = None
__gc_grace_seconds__ = None
__index_interval__ = None
__memtable_flush_period_in_ms__ = None
__populate_io_cache_on_flush__ = None
__read_repair_chance__ = None
__replicate_on_write__ = None
_timestamp = None # optional timestamp to include with the operation (USING TIMESTAMP)
_if_not_exists = False # optional if_not_exists flag to check existence before insertion
def __init__(self, **values):
self._values = dict()
self._ttl = self.__default_ttl__
self._timestamp = None
self._transaction = None
for name, column in self._columns.items():
value = values.get(name, None)
if value is not None or isinstance(column, columns.BaseContainerColumn):
value = column.to_python(value)
value_mngr = column.value_manager(self, column, value)
if name in values:
value_mngr.explicit = True
self._values[name] = value_mngr
# a flag set by the deserializer to indicate
# that update should be used when persisting changes
self._is_persisted = False
self._batch = None
self._timeout = NOT_SET
def __repr__(self):
"""
Pretty printing of models by their primary key
"""
return '{0} <{1}>'.format(self.__class__.__name__,
', '.join(('{0}={1}'.format(k, getattr(self, k)) for k,v in six.iteritems(self._primary_keys)))
)
@classmethod
def _discover_polymorphic_submodels(cls):
if not cls._is_polymorphic_base:
raise ModelException('_discover_polymorphic_submodels can only be called on polymorphic base classes')
def _discover(klass):
if not klass._is_polymorphic_base and klass.__polymorphic_key__ is not None:
cls._polymorphic_map[klass.__polymorphic_key__] = klass
for subklass in klass.__subclasses__():
_discover(subklass)
_discover(cls)
@classmethod
def _get_model_by_polymorphic_key(cls, key):
if not cls._is_polymorphic_base:
raise ModelException('_get_model_by_polymorphic_key can only be called on polymorphic base classes')
return cls._polymorphic_map.get(key)
@classmethod
def _construct_instance(cls, values):
"""
method used to construct instances from query results
this is where polymorphic deserialization occurs
"""
# we're going to take the values, which is from the DB as a dict
# and translate that into our local fields
# the db_map is a db_field -> model field map
items = values.items()
field_dict = dict([(cls._db_map.get(k, k),v) for k,v in items])
if cls._is_polymorphic:
poly_key = field_dict.get(cls._polymorphic_column_name)
if poly_key is None:
raise PolyMorphicModelException('polymorphic key was not found in values')
poly_base = cls if cls._is_polymorphic_base else cls._polymorphic_base
klass = poly_base._get_model_by_polymorphic_key(poly_key)
if klass is None:
poly_base._discover_polymorphic_submodels()
klass = poly_base._get_model_by_polymorphic_key(poly_key)
if klass is None:
raise PolyMorphicModelException(
'unrecognized polymorphic key {0} for class {1}'.format(poly_key, poly_base.__name__)
)
if not issubclass(klass, cls):
raise PolyMorphicModelException(
'{0} is not a subclass of {1}'.format(klass.__name__, cls.__name__)
)
field_dict = dict((k, v) for (k, v) in field_dict.items() if k in \
klass._columns.keys())
else:
klass = cls
instance = klass(**field_dict)
instance._is_persisted = True
return instance
def _can_update(self):
"""
Called by the save function to check if this should be
persisted with update or insert
:return:
"""
if not self._is_persisted: return False
pks = self._primary_keys.keys()
return all([not self._values[k].changed for k in self._primary_keys])
@classmethod
def _get_keyspace(cls):
""" Returns the manual keyspace, if set, otherwise the default keyspace """
return cls.__keyspace__ or DEFAULT_KEYSPACE
@classmethod
def _get_column(cls, name):
"""
Returns the column matching the given name, raising a key error if
it doesn't exist
:param name: the name of the column to return
:rtype: Column
"""
return cls._columns[name]
def __eq__(self, other):
if self.__class__ != other.__class__:
return False
# check attribute keys
keys = set(self._columns.keys())
other_keys = set(other._columns.keys())
if keys != other_keys:
return False
# check that all of the attributes match
for key in other_keys:
if getattr(self, key, None) != getattr(other, key, None):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def column_family_name(cls, include_keyspace=True):
"""
Returns the column family name if it's been defined
otherwise, it creates it from the module and class name
"""
cf_name = ''
if cls.__table_name__:
cf_name = cls.__table_name__.lower()
else:
# get polymorphic base table names if model is polymorphic
if cls._is_polymorphic and not cls._is_polymorphic_base:
return cls._polymorphic_base.column_family_name(include_keyspace=include_keyspace)
camelcase = re.compile(r'([a-z])([A-Z])')
ccase = lambda s: camelcase.sub(lambda v: '{0}_{1}'.format(v.group(1), v.group(2).lower()), s)
cf_name += ccase(cls.__name__)
#trim to less than 48 characters or cassandra will complain
cf_name = cf_name[-48:]
cf_name = cf_name.lower()
cf_name = re.sub(r'^_+', '', cf_name)
if not include_keyspace: return cf_name
return '{0}.{1}'.format(cls._get_keyspace(), cf_name)
def validate(self):
""" Cleans and validates the field values """
for name, col in self._columns.items():
v = getattr(self, name)
if v is None and not self._values[name].explicit and col.has_default:
v = col.get_default()
val = col.validate(v)
setattr(self, name, val)
### Let an instance be used like a dict of its columns keys/values
def __iter__(self):
""" Iterate over column ids. """
for column_id in self._columns.keys():
yield column_id
def __getitem__(self, key):
""" Returns column's value. """
if not isinstance(key, six.string_types):
raise TypeError
if key not in self._columns.keys():
raise KeyError
return getattr(self, key)
def __setitem__(self, key, val):
""" Sets a column's value. """
if not isinstance(key, six.string_types):
raise TypeError
if key not in self._columns.keys():
raise KeyError
return setattr(self, key, val)
def __len__(self):
""" Returns the number of columns defined on that model. """
try:
return self._len
except:
self._len = len(self._columns.keys())
return self._len
def keys(self):
""" Returns list of column's IDs. """
return [k for k in self]
def values(self):
""" Returns list of column's values. """
return [self[k] for k in self]
def items(self):
""" Returns a list of columns's IDs/values. """
return [(k, self[k]) for k in self]
def _as_dict(self):
""" Returns a map of column names to cleaned values """
values = self._dynamic_columns or dict()
for name, col in self._columns.items():
values[name] = col.to_database(getattr(self, name, None))
return values
@classmethod
def create(cls, **kwargs):
extra_columns = set(kwargs.keys()) - set(cls._columns.keys())
if extra_columns:
raise ValidationError("Incorrect columns passed: {0}".format(extra_columns))
return cls.objects.create(**kwargs)
@classmethod
def all(cls):
return cls.objects.all()
@classmethod
def filter(cls, *args, **kwargs):
# if kwargs.values().count(None):
# raise CQLEngineException("Cannot pass None as a filter")
return cls.objects.filter(*args, **kwargs)
@classmethod
def get(cls, *args, **kwargs):
return cls.objects.get(*args, **kwargs)
def timeout(self, timeout):
assert self._batch is None, 'Setting both timeout and batch is not supported'
self._timeout = timeout
return self
def save(self):
# handle polymorphic models
if self._is_polymorphic:
if self._is_polymorphic_base:
raise PolyMorphicModelException('cannot save polymorphic base model')
else:
setattr(self, self._polymorphic_column_name, self.__polymorphic_key__)
is_new = self.pk is None
self.validate()
self.__dmlquery__(self.__class__, self,
batch=self._batch,
ttl=self._ttl,
timestamp=self._timestamp,
consistency=self.__consistency__,
if_not_exists=self._if_not_exists,
transaction=self._transaction,
timeout=self._timeout).save()
#reset the value managers
for v in self._values.values():
v.reset_previous_value()
self._is_persisted = True
self._ttl = self.__default_ttl__
self._timestamp = None
return self
def update(self, **values):
for k, v in values.items():
col = self._columns.get(k)
# check for nonexistant columns
if col is None:
raise ValidationError("{0}.{1} has no column named: {2}".format(self.__module__, self.__class__.__name__, k))
# check for primary key update attempts
if col.is_primary_key:
raise ValidationError("Cannot apply update to primary key '{0}' for {1}.{2}".format(k, self.__module__, self.__class__.__name__))
setattr(self, k, v)
# handle polymorphic models
if self._is_polymorphic:
if self._is_polymorphic_base:
raise PolyMorphicModelException('cannot update polymorphic base model')
else:
setattr(self, self._polymorphic_column_name, self.__polymorphic_key__)
self.validate()
self.__dmlquery__(self.__class__, self,
batch=self._batch,
ttl=self._ttl,
timestamp=self._timestamp,
consistency=self.__consistency__,
transaction=self._transaction,
timeout=self._timeout).update()
#reset the value managers
for v in self._values.values():
v.reset_previous_value()
self._is_persisted = True
self._ttl = self.__default_ttl__
self._timestamp = None
return self
def delete(self):
""" Deletes this instance """
self.__dmlquery__(self.__class__, self,
batch=self._batch,
timestamp=self._timestamp,
consistency=self.__consistency__,
timeout=self._timeout).delete()
def get_changed_columns(self):
""" returns a list of the columns that have been updated since instantiation or save """
return [k for k,v in self._values.items() if v.changed]
@classmethod
def _class_batch(cls, batch):
return cls.objects.batch(batch)
def _inst_batch(self, batch):
assert self._timeout is NOT_SET, 'Setting both timeout and batch is not supported'
self._batch = batch
return self
batch = hybrid_classmethod(_class_batch, _inst_batch)
class ModelMetaClass(type):
def __new__(cls, name, bases, attrs):
"""
"""
#move column definitions into columns dict
#and set default column names
column_dict = OrderedDict()
primary_keys = OrderedDict()
pk_name = None
#get inherited properties
inherited_columns = OrderedDict()
for base in bases:
for k,v in getattr(base, '_defined_columns', dict()).items():
inherited_columns.setdefault(k,v)
#short circuit __abstract__ inheritance
is_abstract = attrs['__abstract__'] = attrs.get('__abstract__', False)
#short circuit __polymorphic_key__ inheritance
attrs['__polymorphic_key__'] = attrs.get('__polymorphic_key__', None)
def _transform_column(col_name, col_obj):
column_dict[col_name] = col_obj
if col_obj.primary_key:
primary_keys[col_name] = col_obj
col_obj.set_column_name(col_name)
#set properties
attrs[col_name] = ColumnDescriptor(col_obj)
column_definitions = [(k,v) for k,v in attrs.items() if isinstance(v, columns.Column)]
#column_definitions = sorted(column_definitions, lambda x,y: cmp(x[1].position, y[1].position))
column_definitions = sorted(column_definitions, key=lambda x: x[1].position)
is_polymorphic_base = any([c[1].polymorphic_key for c in column_definitions])
column_definitions = [x for x in inherited_columns.items()] + column_definitions
polymorphic_columns = [c for c in column_definitions if c[1].polymorphic_key]
is_polymorphic = len(polymorphic_columns) > 0
if len(polymorphic_columns) > 1:
raise ModelDefinitionException('only one polymorphic_key can be defined in a model, {0} found'.format(len(polymorphic_columns)))
polymorphic_column_name, polymorphic_column = polymorphic_columns[0] if polymorphic_columns else (None, None)
if isinstance(polymorphic_column, (columns.BaseContainerColumn, columns.Counter)):
raise ModelDefinitionException('counter and container columns cannot be used for polymorphic keys')
# find polymorphic base class
polymorphic_base = None
if is_polymorphic and not is_polymorphic_base:
def _get_polymorphic_base(bases):
for base in bases:
if getattr(base, '_is_polymorphic_base', False):
return base
klass = _get_polymorphic_base(base.__bases__)
if klass:
return klass
polymorphic_base = _get_polymorphic_base(bases)
defined_columns = OrderedDict(column_definitions)
# check for primary key
if not is_abstract and not any([v.primary_key for k,v in column_definitions]):
raise ModelDefinitionException("At least 1 primary key is required.")
counter_columns = [c for c in defined_columns.values() if isinstance(c, columns.Counter)]
data_columns = [c for c in defined_columns.values() if not c.primary_key and not isinstance(c, columns.Counter)]
if counter_columns and data_columns:
raise ModelDefinitionException('counter models may not have data columns')
has_partition_keys = any(v.partition_key for (k, v) in column_definitions)
#transform column definitions
for k, v in column_definitions:
# don't allow a column with the same name as a built-in attribute or method
if k in BaseModel.__dict__:
raise ModelDefinitionException("column '{0}' conflicts with built-in attribute/method".format(k))
# counter column primary keys are not allowed
if (v.primary_key or v.partition_key) and isinstance(v, (columns.Counter, columns.BaseContainerColumn)):
raise ModelDefinitionException('counter columns and container columns cannot be used as primary keys')
# this will mark the first primary key column as a partition
# key, if one hasn't been set already
if not has_partition_keys and v.primary_key:
v.partition_key = True
has_partition_keys = True
_transform_column(k, v)
partition_keys = OrderedDict(k for k in primary_keys.items() if k[1].partition_key)
clustering_keys = OrderedDict(k for k in primary_keys.items() if not k[1].partition_key)
#setup partition key shortcut
if len(partition_keys) == 0:
if not is_abstract:
raise ModelException("at least one partition key must be defined")
if len(partition_keys) == 1:
pk_name = [x for x in partition_keys.keys()][0]
attrs['pk'] = attrs[pk_name]
else:
# composite partition key case, get/set a tuple of values
_get = lambda self: tuple(self._values[c].getval() for c in partition_keys.keys())
_set = lambda self, val: tuple(self._values[c].setval(v) for (c, v) in zip(partition_keys.keys(), val))
attrs['pk'] = property(_get, _set)
# some validation
col_names = set()
for v in column_dict.values():
# check for duplicate column names
if v.db_field_name in col_names:
raise ModelException("{0} defines the column {1} more than once".format(name, v.db_field_name))
if v.clustering_order and not (v.primary_key and not v.partition_key):
raise ModelException("clustering_order may be specified only for clustering primary keys")
if v.clustering_order and v.clustering_order.lower() not in ('asc', 'desc'):
raise ModelException("invalid clustering order {0} for column {1}".format(repr(v.clustering_order), v.db_field_name))
col_names.add(v.db_field_name)
#create db_name -> model name map for loading
db_map = dict()
for field_name, col in column_dict.items():
db_map[col.db_field_name] = field_name
#add management members to the class
attrs['_columns'] = column_dict
attrs['_primary_keys'] = primary_keys
attrs['_defined_columns'] = defined_columns
# maps the database field to the models key
attrs['_db_map'] = db_map
attrs['_pk_name'] = pk_name
attrs['_dynamic_columns'] = dict()
attrs['_partition_keys'] = partition_keys
attrs['_clustering_keys'] = clustering_keys
attrs['_has_counter'] = len(counter_columns) > 0
# add polymorphic management attributes
attrs['_is_polymorphic_base'] = is_polymorphic_base
attrs['_is_polymorphic'] = is_polymorphic
attrs['_polymorphic_base'] = polymorphic_base
attrs['_polymorphic_column'] = polymorphic_column
attrs['_polymorphic_column_name'] = polymorphic_column_name
attrs['_polymorphic_map'] = dict() if is_polymorphic_base else None
#setup class exceptions
DoesNotExistBase = None
for base in bases:
DoesNotExistBase = getattr(base, 'DoesNotExist', None)
if DoesNotExistBase is not None: break
DoesNotExistBase = DoesNotExistBase or attrs.pop('DoesNotExist', BaseModel.DoesNotExist)
attrs['DoesNotExist'] = type('DoesNotExist', (DoesNotExistBase,), dict())
MultipleObjectsReturnedBase = None
for base in bases:
MultipleObjectsReturnedBase = getattr(base, 'MultipleObjectsReturned', None)
if MultipleObjectsReturnedBase is not None: break
MultipleObjectsReturnedBase = DoesNotExistBase or attrs.pop('MultipleObjectsReturned', BaseModel.MultipleObjectsReturned)
attrs['MultipleObjectsReturned'] = type('MultipleObjectsReturned',
(MultipleObjectsReturnedBase,), dict())
#create the class and add a QuerySet to it
klass = super(ModelMetaClass, cls).__new__(cls, name, bases, attrs)
return klass
import six
@six.add_metaclass(ModelMetaClass)
class Model(BaseModel):
"""
the db name for the column family can be set as the attribute db_name, or
it will be generated from the class name
"""
__abstract__ = True
# __metaclass__ = ModelMetaClass
```
#### File: tests/management/test_management.py
```python
import mock
from cqlengine import ALL, CACHING_ALL, CACHING_NONE
from cqlengine.connection import get_session
from cqlengine.exceptions import CQLEngineException
from cqlengine.management import get_fields, sync_table, drop_table
from cqlengine.tests.base import BaseCassEngTestCase
from cqlengine.tests.base import CASSANDRA_VERSION, PROTOCOL_VERSION
from cqlengine import management
from cqlengine.tests.query.test_queryset import TestModel
from cqlengine.models import Model
from cqlengine import columns, SizeTieredCompactionStrategy, LeveledCompactionStrategy
from unittest import skipUnless
class CreateKeyspaceTest(BaseCassEngTestCase):
def test_create_succeeeds(self):
management.create_keyspace('test_keyspace', strategy_class="SimpleStrategy", replication_factor=1)
management.delete_keyspace('test_keyspace')
class DeleteTableTest(BaseCassEngTestCase):
def test_multiple_deletes_dont_fail(self):
"""
"""
sync_table(TestModel)
drop_table(TestModel)
drop_table(TestModel)
class LowercaseKeyModel(Model):
first_key = columns.Integer(primary_key=True)
second_key = columns.Integer(primary_key=True)
some_data = columns.Text()
class CapitalizedKeyModel(Model):
firstKey = columns.Integer(primary_key=True)
secondKey = columns.Integer(primary_key=True)
someData = columns.Text()
class PrimaryKeysOnlyModel(Model):
__compaction__ = LeveledCompactionStrategy
first_ey = columns.Integer(primary_key=True)
second_key = columns.Integer(primary_key=True)
class CapitalizedKeyTest(BaseCassEngTestCase):
def test_table_definition(self):
""" Tests that creating a table with capitalized column names succeedso """
sync_table(LowercaseKeyModel)
sync_table(CapitalizedKeyModel)
drop_table(LowercaseKeyModel)
drop_table(CapitalizedKeyModel)
class FirstModel(Model):
__table_name__ = 'first_model'
first_key = columns.UUID(primary_key=True)
second_key = columns.UUID()
third_key = columns.Text()
class SecondModel(Model):
__table_name__ = 'first_model'
first_key = columns.UUID(primary_key=True)
second_key = columns.UUID()
third_key = columns.Text()
fourth_key = columns.Text()
class ThirdModel(Model):
__table_name__ = 'first_model'
first_key = columns.UUID(primary_key=True)
second_key = columns.UUID()
third_key = columns.Text()
# removed fourth key, but it should stay in the DB
blah = columns.Map(columns.Text, columns.Text)
class FourthModel(Model):
__table_name__ = 'first_model'
first_key = columns.UUID(primary_key=True)
second_key = columns.UUID()
third_key = columns.Text()
# removed fourth key, but it should stay in the DB
renamed = columns.Map(columns.Text, columns.Text, db_field='blah')
class AddColumnTest(BaseCassEngTestCase):
def setUp(self):
drop_table(FirstModel)
def test_add_column(self):
sync_table(FirstModel)
fields = get_fields(FirstModel)
# this should contain the second key
self.assertEqual(len(fields), 2)
# get schema
sync_table(SecondModel)
fields = get_fields(FirstModel)
self.assertEqual(len(fields), 3)
sync_table(ThirdModel)
fields = get_fields(FirstModel)
self.assertEqual(len(fields), 4)
sync_table(FourthModel)
fields = get_fields(FirstModel)
self.assertEqual(len(fields), 4)
class ModelWithTableProperties(Model):
# Set random table properties
__bloom_filter_fp_chance__ = 0.76328
__caching__ = CACHING_ALL
__comment__ = 'TxfguvBdzwROQALmQBOziRMbkqVGFjqcJfVhwGR'
__gc_grace_seconds__ = 2063
__populate_io_cache_on_flush__ = True
__read_repair_chance__ = 0.17985
__replicate_on_write__ = False
__dclocal_read_repair_chance__ = 0.50811
key = columns.UUID(primary_key=True)
# kind of a hack, but we only test this property on C >= 2.0
if CASSANDRA_VERSION >= 20:
ModelWithTableProperties.__memtable_flush_period_in_ms__ = 43681
ModelWithTableProperties.__index_interval__ = 98706
ModelWithTableProperties.__default_time_to_live__ = 4756
class TablePropertiesTests(BaseCassEngTestCase):
def setUp(self):
drop_table(ModelWithTableProperties)
def test_set_table_properties(self):
sync_table(ModelWithTableProperties)
expected = {'bloom_filter_fp_chance': 0.76328,
'comment': 'TxfguvBdzwROQALmQBOziRMbkqVGFjqcJfVhwGR',
'gc_grace_seconds': 2063,
'read_repair_chance': 0.17985,
# For some reason 'dclocal_read_repair_chance' in CQL is called
# just 'local_read_repair_chance' in the schema table.
# Source: https://issues.apache.org/jira/browse/CASSANDRA-6717
# TODO: due to a bug in the native driver i'm not seeing the local read repair chance show up
# 'local_read_repair_chance': 0.50811,
}
if CASSANDRA_VERSION <= 20:
expected['caching'] = CACHING_ALL
expected['replicate_on_write'] = False
if CASSANDRA_VERSION == 20:
expected['populate_io_cache_on_flush'] = True
expected['index_interval'] = 98706
if CASSANDRA_VERSION >= 20:
expected['default_time_to_live'] = 4756
expected['memtable_flush_period_in_ms'] = 43681
self.assertDictContainsSubset(expected, management.get_table_settings(ModelWithTableProperties).options)
def test_table_property_update(self):
ModelWithTableProperties.__bloom_filter_fp_chance__ = 0.66778
ModelWithTableProperties.__caching__ = CACHING_NONE
ModelWithTableProperties.__comment__ = '<KEY>'
ModelWithTableProperties.__gc_grace_seconds__ = 96362
ModelWithTableProperties.__populate_io_cache_on_flush__ = False
ModelWithTableProperties.__read_repair_chance__ = 0.2989
ModelWithTableProperties.__replicate_on_write__ = True
ModelWithTableProperties.__dclocal_read_repair_chance__ = 0.12732
if CASSANDRA_VERSION >= 20:
ModelWithTableProperties.__default_time_to_live__ = 65178
ModelWithTableProperties.__memtable_flush_period_in_ms__ = 60210
ModelWithTableProperties.__index_interval__ = 94207
sync_table(ModelWithTableProperties)
table_settings = management.get_table_settings(ModelWithTableProperties).options
expected = {'bloom_filter_fp_chance': 0.66778,
'comment': '<KEY>',
'gc_grace_seconds': 96362,
'read_repair_chance': 0.2989,
#'local_read_repair_chance': 0.12732,
}
if CASSANDRA_VERSION >= 20:
expected['memtable_flush_period_in_ms'] = 60210
expected['default_time_to_live'] = 65178
if CASSANDRA_VERSION == 20:
expected['index_interval'] = 94207
# these featuers removed in cassandra 2.1
if CASSANDRA_VERSION <= 20:
expected['caching'] = CACHING_NONE
expected['replicate_on_write'] = True
expected['populate_io_cache_on_flush'] = False
self.assertDictContainsSubset(expected, table_settings)
class SyncTableTests(BaseCassEngTestCase):
def setUp(self):
drop_table(PrimaryKeysOnlyModel)
def test_sync_table_works_with_primary_keys_only_tables(self):
# This is "create table":
sync_table(PrimaryKeysOnlyModel)
# let's make sure settings persisted correctly:
assert PrimaryKeysOnlyModel.__compaction__ == LeveledCompactionStrategy
# blows up with DoesNotExist if table does not exist
table_settings = management.get_table_settings(PrimaryKeysOnlyModel)
# let make sure the flag we care about
assert LeveledCompactionStrategy in table_settings.options['compaction_strategy_class']
# Now we are "updating" the table:
# setting up something to change
PrimaryKeysOnlyModel.__compaction__ = SizeTieredCompactionStrategy
# primary-keys-only tables do not create entries in system.schema_columns
# table. Only non-primary keys are added to that table.
# Our code must deal with that eventuality properly (not crash)
# on subsequent runs of sync_table (which runs get_fields internally)
get_fields(PrimaryKeysOnlyModel)
sync_table(PrimaryKeysOnlyModel)
table_settings = management.get_table_settings(PrimaryKeysOnlyModel)
assert SizeTieredCompactionStrategy in table_settings.options['compaction_strategy_class']
class NonModelFailureTest(BaseCassEngTestCase):
class FakeModel(object):
pass
def test_failure(self):
with self.assertRaises(CQLEngineException):
sync_table(self.FakeModel)
@skipUnless(PROTOCOL_VERSION >= 2, "only runs against the cql3 protocol v2.0")
def test_static_columns():
class StaticModel(Model):
id = columns.Integer(primary_key=True)
c = columns.Integer(primary_key=True)
name = columns.Text(static=True)
drop_table(StaticModel)
from mock import patch
from cqlengine.connection import get_session
session = get_session()
with patch.object(session, "execute", wraps=session.execute) as m:
sync_table(StaticModel)
assert m.call_count > 0
statement = m.call_args[0][0].query_string
assert '"name" text static' in statement, statement
# if we sync again, we should not apply an alter w/ a static
sync_table(StaticModel)
with patch.object(session, "execute", wraps=session.execute) as m2:
sync_table(StaticModel)
assert len(m2.call_args_list) == 1
assert "ALTER" not in m2.call_args[0][0].query_string
```
#### File: tests/model/test_model.py
```python
from unittest import TestCase
from cqlengine.models import Model, ModelDefinitionException
from cqlengine import columns
class TestModel(TestCase):
""" Tests the non-io functionality of models """
def test_instance_equality(self):
""" tests the model equality functionality """
class EqualityModel(Model):
pk = columns.Integer(primary_key=True)
m0 = EqualityModel(pk=0)
m1 = EqualityModel(pk=1)
self.assertEqual(m0, m0)
self.assertNotEqual(m0, m1)
def test_model_equality(self):
""" tests the model equality functionality """
class EqualityModel0(Model):
pk = columns.Integer(primary_key=True)
class EqualityModel1(Model):
kk = columns.Integer(primary_key=True)
m0 = EqualityModel0(pk=0)
m1 = EqualityModel1(kk=1)
self.assertEqual(m0, m0)
self.assertNotEqual(m0, m1)
class BuiltInAttributeConflictTest(TestCase):
"""tests Model definitions that conflict with built-in attributes/methods"""
def test_model_with_attribute_name_conflict(self):
"""should raise exception when model defines column that conflicts with built-in attribute"""
with self.assertRaises(ModelDefinitionException):
class IllegalTimestampColumnModel(Model):
my_primary_key = columns.Integer(primary_key=True)
timestamp = columns.BigInt()
def test_model_with_method_name_conflict(self):
"""should raise exception when model defines column that conflicts with built-in method"""
with self.assertRaises(ModelDefinitionException):
class IllegalFilterColumnModel(Model):
my_primary_key = columns.Integer(primary_key=True)
filter = columns.Text()
```
#### File: tests/model/test_polymorphism.py
```python
import uuid
import mock
from cqlengine import columns
from cqlengine import models
from cqlengine.connection import get_session
from cqlengine.tests.base import BaseCassEngTestCase
from cqlengine import management
class TestPolymorphicClassConstruction(BaseCassEngTestCase):
def test_multiple_polymorphic_key_failure(self):
""" Tests that defining a model with more than one polymorphic key fails """
with self.assertRaises(models.ModelDefinitionException):
class M(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(polymorphic_key=True)
type2 = columns.Integer(polymorphic_key=True)
def test_polymorphic_key_inheritance(self):
""" Tests that polymorphic_key attribute is not inherited """
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(polymorphic_key=True)
class M1(Base):
__polymorphic_key__ = 1
class M2(M1):
pass
assert M2.__polymorphic_key__ is None
def test_polymorphic_metaclass(self):
""" Tests that the model meta class configures polymorphic models properly """
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(polymorphic_key=True)
class M1(Base):
__polymorphic_key__ = 1
assert Base._is_polymorphic
assert M1._is_polymorphic
assert Base._is_polymorphic_base
assert not M1._is_polymorphic_base
assert Base._polymorphic_column is Base._columns['type1']
assert M1._polymorphic_column is M1._columns['type1']
assert Base._polymorphic_column_name == 'type1'
assert M1._polymorphic_column_name == 'type1'
def test_table_names_are_inherited_from_poly_base(self):
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Integer(polymorphic_key=True)
class M1(Base):
__polymorphic_key__ = 1
assert Base.column_family_name() == M1.column_family_name()
def test_collection_columns_cant_be_polymorphic_keys(self):
with self.assertRaises(models.ModelDefinitionException):
class Base(models.Model):
partition = columns.Integer(primary_key=True)
type1 = columns.Set(columns.Integer, polymorphic_key=True)
class PolyBase(models.Model):
partition = columns.UUID(primary_key=True, default=uuid.uuid4)
row_type = columns.Integer(polymorphic_key=True)
class Poly1(PolyBase):
__polymorphic_key__ = 1
data1 = columns.Text()
class Poly2(PolyBase):
__polymorphic_key__ = 2
data2 = columns.Text()
class TestPolymorphicModel(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestPolymorphicModel, cls).setUpClass()
management.sync_table(Poly1)
management.sync_table(Poly2)
@classmethod
def tearDownClass(cls):
super(TestPolymorphicModel, cls).tearDownClass()
management.drop_table(Poly1)
management.drop_table(Poly2)
def test_saving_base_model_fails(self):
with self.assertRaises(models.PolyMorphicModelException):
PolyBase.create()
def test_saving_subclass_saves_poly_key(self):
p1 = Poly1.create(data1='pickle')
p2 = Poly2.create(data2='bacon')
assert p1.row_type == Poly1.__polymorphic_key__
assert p2.row_type == Poly2.__polymorphic_key__
def test_query_deserialization(self):
p1 = Poly1.create(data1='pickle')
p2 = Poly2.create(data2='bacon')
p1r = PolyBase.get(partition=p1.partition)
p2r = PolyBase.get(partition=p2.partition)
assert isinstance(p1r, Poly1)
assert isinstance(p2r, Poly2)
def test_delete_on_polymorphic_subclass_does_not_include_polymorphic_key(self):
p1 = Poly1.create()
session = get_session()
with mock.patch.object(session, 'execute') as m:
Poly1.objects(partition=p1.partition).delete()
# make sure our polymorphic key isn't in the CQL
# not sure how we would even get here if it was in there
# since the CQL would fail.
self.assertNotIn("row_type", m.call_args[0][0].query_string)
class UnindexedPolyBase(models.Model):
partition = columns.UUID(primary_key=True, default=uuid.uuid4)
cluster = columns.UUID(primary_key=True, default=uuid.uuid4)
row_type = columns.Integer(polymorphic_key=True)
class UnindexedPoly1(UnindexedPolyBase):
__polymorphic_key__ = 1
data1 = columns.Text()
class UnindexedPoly2(UnindexedPolyBase):
__polymorphic_key__ = 2
data2 = columns.Text()
class UnindexedPoly3(UnindexedPoly2):
__polymorphic_key__ = 3
data3 = columns.Text()
class TestUnindexedPolymorphicQuery(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestUnindexedPolymorphicQuery, cls).setUpClass()
management.sync_table(UnindexedPoly1)
management.sync_table(UnindexedPoly2)
management.sync_table(UnindexedPoly3)
cls.p1 = UnindexedPoly1.create(data1='pickle')
cls.p2 = UnindexedPoly2.create(partition=cls.p1.partition, data2='bacon')
cls.p3 = UnindexedPoly3.create(partition=cls.p1.partition, data3='turkey')
@classmethod
def tearDownClass(cls):
super(TestUnindexedPolymorphicQuery, cls).tearDownClass()
management.drop_table(UnindexedPoly1)
management.drop_table(UnindexedPoly2)
management.drop_table(UnindexedPoly3)
def test_non_conflicting_type_results_work(self):
p1, p2, p3 = self.p1, self.p2, self.p3
assert len(list(UnindexedPoly1.objects(partition=p1.partition, cluster=p1.cluster))) == 1
assert len(list(UnindexedPoly2.objects(partition=p1.partition, cluster=p2.cluster))) == 1
def test_subclassed_model_results_work_properly(self):
p1, p2, p3 = self.p1, self.p2, self.p3
assert len(list(UnindexedPoly2.objects(partition=p1.partition, cluster__in=[p2.cluster, p3.cluster]))) == 2
def test_conflicting_type_results(self):
with self.assertRaises(models.PolyMorphicModelException):
list(UnindexedPoly1.objects(partition=self.p1.partition))
with self.assertRaises(models.PolyMorphicModelException):
list(UnindexedPoly2.objects(partition=self.p1.partition))
class IndexedPolyBase(models.Model):
partition = columns.UUID(primary_key=True, default=uuid.uuid4)
cluster = columns.UUID(primary_key=True, default=uuid.uuid4)
row_type = columns.Integer(polymorphic_key=True, index=True)
class IndexedPoly1(IndexedPolyBase):
__polymorphic_key__ = 1
data1 = columns.Text()
class IndexedPoly2(IndexedPolyBase):
__polymorphic_key__ = 2
data2 = columns.Text()
class TestIndexedPolymorphicQuery(BaseCassEngTestCase):
@classmethod
def setUpClass(cls):
super(TestIndexedPolymorphicQuery, cls).setUpClass()
management.sync_table(IndexedPoly1)
management.sync_table(IndexedPoly2)
cls.p1 = IndexedPoly1.create(data1='pickle')
cls.p2 = IndexedPoly2.create(partition=cls.p1.partition, data2='bacon')
@classmethod
def tearDownClass(cls):
super(TestIndexedPolymorphicQuery, cls).tearDownClass()
management.drop_table(IndexedPoly1)
management.drop_table(IndexedPoly2)
def test_success_case(self):
assert len(list(IndexedPoly1.objects(partition=self.p1.partition))) == 1
assert len(list(IndexedPoly2.objects(partition=self.p1.partition))) == 1
``` |
{
"source": "Jpuf0/PWrapper",
"score": 3
} |
#### File: PWrapper/PWrapper/alliance.py
```python
class Alliance(object):
def __init__(self, id):
# Local
from . import session
from . import API_PATH
from . import API_KEY
path = f'{API_PATH}/alliance/id={id}&key={API_KEY}'
r = session.get(path)
_raw_JSON = r.json()
self.raw_JSON = r.json()
self.leaderids = list(map(int, _raw_JSON['leaderids']))
self.success = bool(_raw_JSON['success'])
self.allianceid = int(_raw_JSON['allianceid'])
self.name = str(_raw_JSON['name'])
self.acronym = str(_raw_JSON['acronym'])
self.score = float(_raw_JSON['score'])
self.color = str(_raw_JSON['color'])
self.colour = str(_raw_JSON['color'])
self.members = int(_raw_JSON['members'])
self.member_id_list = list(map(int, _raw_JSON['member_id_list']))
self.vmodemembers = int(_raw_JSON['vmodemembers'])
self.vmode_members = int(_raw_JSON['vmodemembers'])
self.accepting_members = bool(_raw_JSON['accepting members']) # wtf why a space there
self.applicants = int(_raw_JSON['applicants'])
self.flagurl = str(_raw_JSON['flagurl'])
self.forumurl = str(_raw_JSON['forumurl'])
self.irc = str(_raw_JSON['irc'])
self.discord = str(_raw_JSON['irc'])
self.gdp = float(_raw_JSON['gdp'])
self.gdp_str = '${:,.2f}'.format(float(_raw_JSON['gdp']))
self.cities = int(_raw_JSON['cities'])
# --- Military ---
self.soldiers = int(_raw_JSON['soldiers'])
self.tanks = int(_raw_JSON['tanks'])
self.aircraft = int(_raw_JSON['aircraft'])
self.ships = int(_raw_JSON['ships'])
self.missiles = int(_raw_JSON['missiles'])
self.nukes = int(_raw_JSON['nukes'])
# --- ---
self.treasures = int(_raw_JSON['treasures'])
def get(self, field):
return getattr(self, field)
def get_members(self):
from .nation import Nation
def Member_list():
for member in self.member_id_list:
yield(str(Nation(member)))
return list(Member_list())
def __str__(self):
return self.raw_JSON['name']
``` |
{
"source": "jpugliesi/feast",
"score": 2
} |
#### File: launchers/standalone/local.py
```python
import os
import socket
import subprocess
import threading
import uuid
from contextlib import closing
from datetime import datetime
from typing import Dict, List, Optional
import requests
from requests.exceptions import RequestException
from feast.pyspark.abc import (
BQ_SPARK_PACKAGE,
BatchIngestionJob,
BatchIngestionJobParameters,
JobLauncher,
RetrievalJob,
RetrievalJobParameters,
SparkJob,
SparkJobFailure,
SparkJobParameters,
SparkJobStatus,
StreamIngestionJob,
StreamIngestionJobParameters,
)
class JobCache:
"""
A *global* in-memory cache of Spark jobs.
This is necessary since we can't easily keep track of running Spark jobs in local mode, since
there is no external state (unlike EMR and Dataproc which keep track of the running jobs for
us).
"""
# Map of job_id -> spark job
job_by_id: Dict[str, SparkJob]
# Map of job_id -> job_hash. The value can be None, indicating this job was
# manually created and Job Service isn't maintaining the state of this job
hash_by_id: Dict[str, Optional[str]]
# This reentrant lock is necessary for multi-threading access
lock: threading.RLock
def __init__(self):
self.job_by_id = {}
self.hash_by_id = {}
self.lock = threading.RLock()
def add_job(self, job: SparkJob) -> None:
"""Add a Spark job to the cache.
Args:
job (SparkJob): The new Spark job to add.
"""
with self.lock:
self.job_by_id[job.get_id()] = job
if isinstance(job, StreamIngestionJob):
self.hash_by_id[job.get_id()] = job.get_hash()
def list_jobs(self) -> List[SparkJob]:
"""List all Spark jobs in the cache."""
with self.lock:
return list(self.job_by_id.values())
def get_job_by_id(self, job_id: str) -> SparkJob:
"""Get a Spark job with the given ID. Throws an exception if such job doesn't exist.
Args:
job_id (str): External ID of the Spark job to get.
Returns:
SparkJob: The Spark job with the given ID.
"""
with self.lock:
return self.job_by_id[job_id]
global_job_cache = JobCache()
def reset_job_cache():
global global_job_cache
global_job_cache = JobCache()
def _find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
class StandaloneClusterJobMixin:
def __init__(
self, job_id: str, job_name: str, process: subprocess.Popen, ui_port: int = None
):
self._job_id = job_id
self._job_name = job_name
self._process = process
self._ui_port = ui_port
self._start_time = datetime.utcnow()
def get_id(self) -> str:
return self._job_id
def check_if_started(self):
if not self._ui_port:
return True
try:
applications = requests.get(
f"http://localhost:{self._ui_port}/api/v1/applications"
).json()
except RequestException:
return False
app = next(
iter(app for app in applications if app["name"] == self._job_name), None
)
if not app:
return False
stages = requests.get(
f"http://localhost:{self._ui_port}/api/v1/applications/{app['id']}/stages"
).json()
return bool(stages)
def get_start_time(self) -> datetime:
return self._start_time
def get_status(self) -> SparkJobStatus:
code = self._process.poll()
if code is None:
if not self.check_if_started():
return SparkJobStatus.STARTING
return SparkJobStatus.IN_PROGRESS
if code != 0:
return SparkJobStatus.FAILED
return SparkJobStatus.COMPLETED
def cancel(self):
self._process.terminate()
class StandaloneClusterBatchIngestionJob(StandaloneClusterJobMixin, BatchIngestionJob):
"""
Batch Ingestion job result for a standalone spark cluster
"""
def __init__(
self,
job_id: str,
job_name: str,
process: subprocess.Popen,
ui_port: int,
feature_table: str,
) -> None:
super().__init__(job_id, job_name, process, ui_port)
self._feature_table = feature_table
def get_feature_table(self) -> str:
return self._feature_table
class StandaloneClusterStreamingIngestionJob(
StandaloneClusterJobMixin, StreamIngestionJob
):
"""
Streaming Ingestion job result for a standalone spark cluster
"""
def __init__(
self,
job_id: str,
job_name: str,
process: subprocess.Popen,
ui_port: int,
job_hash: str,
feature_table: str,
) -> None:
super().__init__(job_id, job_name, process, ui_port)
self._job_hash = job_hash
self._feature_table = feature_table
def get_hash(self) -> str:
return self._job_hash
def get_feature_table(self) -> str:
return self._feature_table
class StandaloneClusterRetrievalJob(StandaloneClusterJobMixin, RetrievalJob):
"""
Historical feature retrieval job result for a standalone spark cluster
"""
def __init__(
self,
job_id: str,
job_name: str,
process: subprocess.Popen,
output_file_uri: str,
):
"""
This is the returned historical feature retrieval job result for StandaloneClusterLauncher.
Args:
job_id (str): Historical feature retrieval job id.
process (subprocess.Popen): Pyspark driver process, spawned by the launcher.
output_file_uri (str): Uri to the historical feature retrieval job output file.
"""
super().__init__(job_id, job_name, process)
self._output_file_uri = output_file_uri
def get_output_file_uri(self, timeout_sec: int = None, block=True):
if not block:
return self._output_file_uri
with self._process as p:
try:
p.wait(timeout_sec)
return self._output_file_uri
except Exception:
p.kill()
raise SparkJobFailure("Timeout waiting for subprocess to return")
if self._process.returncode != 0:
stderr = "" if self._process.stderr is None else self._process.stderr.read()
stdout = "" if self._process.stdout is None else self._process.stdout.read()
raise SparkJobFailure(
f"Non zero return code: {self._process.returncode}. stderr: {stderr} stdout: {stdout}"
)
return self._output_file_uri
class StandaloneClusterLauncher(JobLauncher):
"""
Submits jobs to a standalone Spark cluster in client mode.
"""
def __init__(self, master_url: str, spark_home: str = None):
"""
This launcher executes the spark-submit script in a subprocess. The subprocess
will run until the Pyspark driver exits.
Args:
master_url (str):
Spark cluster url. Must start with spark://.
spark_home (str):
Local file path to Spark installation directory. If not provided,
the environmental variable `SPARK_HOME` will be used instead.
"""
self.master_url = master_url
self.spark_home = spark_home if spark_home else os.getenv("SPARK_HOME")
@property
def spark_submit_script_path(self):
return os.path.join(self.spark_home, "bin/spark-submit")
def spark_submit(
self, job_params: SparkJobParameters, ui_port: int = None
) -> subprocess.Popen:
submission_cmd = [
self.spark_submit_script_path,
"--master",
self.master_url,
"--name",
job_params.get_name(),
]
if job_params.get_class_name():
submission_cmd.extend(["--class", job_params.get_class_name()])
if ui_port:
submission_cmd.extend(["--conf", f"spark.ui.port={ui_port}"])
# Workaround for https://github.com/apache/spark/pull/26552
# Fix running spark job with bigquery connector (w/ shadowing) on JDK 9+
submission_cmd.extend(
[
"--conf",
"spark.executor.extraJavaOptions="
"-Dcom.google.cloud.spark.bigquery.repackaged.io.netty.tryReflectionSetAccessible=true -Duser.timezone=GMT",
"--conf",
"spark.driver.extraJavaOptions="
"-Dcom.google.cloud.spark.bigquery.repackaged.io.netty.tryReflectionSetAccessible=true -Duser.timezone=GMT",
"--conf",
"spark.sql.session.timeZone=UTC", # ignore local timezone
"--packages",
",".join([BQ_SPARK_PACKAGE] + job_params.get_extra_packages()),
"--jars",
"https://storage.googleapis.com/hadoop-lib/gcs/gcs-connector-hadoop2-latest.jar,"
"https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-aws/2.7.3/hadoop-aws-2.7.3.jar,"
"https://repo1.maven.org/maven2/com/amazonaws/aws-java-sdk/1.7.4/aws-java-sdk-1.7.4.jar",
"--conf",
"spark.hadoop.fs.s3a.impl=org.apache.hadoop.fs.s3a.S3AFileSystem",
"--conf",
"spark.hadoop.fs.gs.impl=com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystem",
]
)
submission_cmd.append(job_params.get_main_file_path())
submission_cmd.extend(job_params.get_arguments())
return subprocess.Popen(submission_cmd)
def historical_feature_retrieval(
self, job_params: RetrievalJobParameters
) -> RetrievalJob:
job_id = str(uuid.uuid4())
job = StandaloneClusterRetrievalJob(
job_id,
job_params.get_name(),
self.spark_submit(job_params),
job_params.get_destination_path(),
)
global_job_cache.add_job(job)
return job
def offline_to_online_ingestion(
self, ingestion_job_params: BatchIngestionJobParameters
) -> BatchIngestionJob:
job_id = str(uuid.uuid4())
ui_port = _find_free_port()
job = StandaloneClusterBatchIngestionJob(
job_id,
ingestion_job_params.get_name(),
self.spark_submit(ingestion_job_params, ui_port),
ui_port,
ingestion_job_params.get_feature_table_name(),
)
global_job_cache.add_job(job)
return job
def start_stream_to_online_ingestion(
self, ingestion_job_params: StreamIngestionJobParameters
) -> StreamIngestionJob:
job_id = str(uuid.uuid4())
ui_port = _find_free_port()
job = StandaloneClusterStreamingIngestionJob(
job_id,
ingestion_job_params.get_name(),
self.spark_submit(ingestion_job_params, ui_port),
ui_port,
ingestion_job_params.get_job_hash(),
ingestion_job_params.get_feature_table_name(),
)
global_job_cache.add_job(job)
return job
def get_job_by_id(self, job_id: str) -> SparkJob:
return global_job_cache.get_job_by_id(job_id)
def list_jobs(self, include_terminated: bool) -> List[SparkJob]:
if include_terminated is True:
return global_job_cache.list_jobs()
else:
return [
job
for job in global_job_cache.list_jobs()
if job.get_status()
in (SparkJobStatus.STARTING, SparkJobStatus.IN_PROGRESS)
]
``` |
{
"source": "jpuigcerver/nnutils",
"score": 2
} |
#### File: nnutils/pytorch/setup.py
```python
import io
import os
import re
import torch
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension
extra_compile_args = {
"cxx": ["-std=c++14", "-O3", "-fopenmp"],
"nvcc": ["-std=c++14", "-O3", "--compiler-options=-fopenmp"],
}
CC = os.getenv("CC", None)
if CC is not None:
extra_compile_args["nvcc"].append("-ccbin=" + CC)
def get_cuda_compile_archs(nvcc_flags=None):
"""Get the target CUDA architectures from CUDA_ARCH_LIST env variable"""
if nvcc_flags is None:
nvcc_flags = []
CUDA_ARCH_LIST = os.getenv("CUDA_ARCH_LIST", None)
if CUDA_ARCH_LIST is not None:
for arch in CUDA_ARCH_LIST.split(";"):
m = re.match(r"^([0-9.]+)(?:\(([0-9.]+)\))?(\+PTX)?$", arch)
assert m, "Wrong architecture list: %s" % CUDA_ARCH_LIST
cod_arch = m.group(1).replace(".", "")
com_arch = m.group(2).replace(".", "") if m.group(2) else cod_arch
ptx = True if m.group(3) else False
nvcc_flags.extend(
["-gencode", "arch=compute_{},code=sm_{}".format(com_arch, cod_arch)]
)
if ptx:
nvcc_flags.extend(
[
"-gencode",
"arch=compute_{},code=compute_{}".format(com_arch, cod_arch),
]
)
return nvcc_flags
def get_requirements():
req_file = os.path.join(os.path.dirname(__file__), "requirements.txt")
with io.open(req_file, "r", encoding="utf-8") as f:
return [line.strip() for line in f]
def get_long_description():
readme_file = os.path.join(os.path.dirname(__file__), "README.md")
with io.open(readme_file, "r", encoding="utf-8") as f:
return f.read()
include_dirs = [os.path.dirname(os.path.realpath(__file__)) + "/src"]
headers = [
"src/adaptive_avgpool_2d.h",
"src/adaptive_maxpool_2d.h",
"src/mask_image_from_size.h",
"src/nnutils/adaptive_pool.h",
"src/nnutils/utils.h",
"src/nnutils/cpu/adaptive_avgpool_2d.h",
"src/nnutils/cpu/adaptive_maxpool_2d.h",
"src/nnutils/cpu/mask_image_from_size.h",
]
sources = [
"src/binding.cc",
"src/adaptive_avgpool_2d.cc",
"src/adaptive_maxpool_2d.cc",
"src/mask_image_from_size.cc",
"src/cpu/adaptive_avgpool_2d.cc",
"src/cpu/adaptive_maxpool_2d.cc",
"src/cpu/mask_image_from_size.cc",
]
if torch.cuda.is_available():
sources += [
"src/gpu/adaptive_avgpool_2d.cu",
"src/gpu/adaptive_maxpool_2d.cu",
"src/gpu/mask_image_from_size.cu",
]
headers += [
"src/nnutils/gpu/adaptive_avgpool_2d.h",
"src/nnutils/gpu/adaptive_maxpool_2d.h",
"src/nnutils/gpu/mask_image_from_size.h",
]
Extension = CUDAExtension
extra_compile_args["cxx"].append("-DWITH_CUDA")
extra_compile_args["nvcc"].append("-DWITH_CUDA")
extra_compile_args["nvcc"].extend(get_cuda_compile_archs())
else:
Extension = CppExtension
requirements = get_requirements()
long_description = get_long_description()
setup(
name="nnutils_pytorch",
version="1.6.0",
description="PyTorch bindings of the nnutils library",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jpuigcerver/nnutils",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
packages=find_packages(),
ext_modules=[
Extension(
name="nnutils_pytorch._C",
sources=sources,
include_dirs=include_dirs,
extra_compile_args=extra_compile_args,
)
],
cmdclass={"build_ext": BuildExtension},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Image Recognition",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
setup_requires=requirements,
install_requires=requirements,
)
``` |
{
"source": "jpuigcerver/phocnet",
"score": 2
} |
#### File: phocnet/training/phocnet_trainer.py
```python
import logging
import os
import time
import caffe
import numpy as np
from skimage.transform import resize
from phocnet.attributes.phoc import build_phoc, unigrams_from_word_list,\
get_most_common_n_grams
from phocnet.caffe.model_proto_generator import ModelProtoGenerator
from phocnet.caffe.solver_proto_generator import generate_solver_proto
from phocnet.caffe.lmdb_creator import CaffeLMDBCreator
from phocnet.caffe.augmentation import AugmentationCreator
from phocnet.evaluation.time import convert_secs2HHMMSS
from phocnet.evaluation.cnn import calc_map_from_cnn_features
from phocnet.io.xml_io import XMLReader
from phocnet.io.files import save_prototxt, write_list
from phocnet.io.context_manager import Suppressor
from phocnet.numpy.numpy_helper import NumpyHelper
class PHOCNetTrainer(object):
'''
Driver class for all PHOCNet experiments
'''
def __init__(self, doc_img_dir, train_annotation_file, test_annotation_file,
proto_dir, n_train_images, lmdb_dir, save_net_dir,
phoc_unigram_levels, recreate_lmdbs, gpu_id, learning_rate, momentum,
weight_decay, batch_size, test_interval, display, max_iter, step_size,
gamma, debug_mode, metric, annotation_delimiter, use_lower_case_only,
use_bigrams):
'''
The constructor
Args:
doc_img_dir (str): the location of the document images for the given dataset
train_annotation_file (str): the absolute path to the READ-style annotation file for the training samples
test_annotation_file (str): the absolute path to the READ-style annotation file for the test samples
proto_dir (str): absolute path where to save the Caffe protobuffer files
n_train_images (int): the total number of training images to be used
lmdb_dir (str): directory to save the LMDB files into
save_net_dir (str): directory where to save the trained PHOCNet
phoc_unigrams_levels (list of int): the list of unigram levels
recreate_lmdbs (bool): whether to delete and recompute existing LMDBs
debug_mode (bool): flag indicating to run this experiment in debug mode
metric (str): metric for comparing the PHOCNet output during test
annotation_delimiter (str): delimiter for the annotation in the XML files
use_lower_case_only (bool): convert annotation to lower case before creating LMDBs
use_bigrams (bool): if true, the PHOC predicted from the net contains bigrams
gpu_id (int): the ID of the GPU to use
learning_rate (float): the learning rate to be used in training
momentum (float): the SGD momentum to be used in training
weight_decay (float): the SGD weight decay to be used in training
batch_size (int): the number of images to be used in a mini batch
test_interval (int): the number of steps after which to evaluate the PHOCNet during training
display (int): the number of iterations after which to show the training net loss
max_iter (int): the maximum number of SGD iterations
step_size (int): the number of iterations after which to reduce the learning rate
gamma (float): the factor to multiply the step size with after step_size iterations
'''
# store the experiment parameters
self.doc_img_dir = doc_img_dir
self.train_annotation_file = train_annotation_file
self.test_annotation_file = test_annotation_file
self.proto_dir = proto_dir
self.n_train_images = n_train_images
self.lmdb_dir = lmdb_dir
self.save_net_dir = save_net_dir
self.phoc_unigram_levels = phoc_unigram_levels
self.recreate_lmdbs = recreate_lmdbs
self.debug_mode = debug_mode
self.metric = metric
self.annotation_delimiter = annotation_delimiter
self.use_lower_case_only = use_lower_case_only
self.use_bigrams = use_bigrams
# store the Caffe parameters
self.gpu_id = gpu_id
self.learning_rate = learning_rate
self.momentum = momentum
self.weight_decay = weight_decay
self.batch_size = batch_size
self.test_interval = test_interval
self.display = display
self.max_iter = max_iter
self.step_size = step_size
self.gamma = gamma
# misc members for training/evaluation
if self.gpu_id is not None:
self.solver_mode = 'GPU'
else:
self.solver_mode = 'CPU'
self.min_image_width_height = 26
self.epoch_map = None
self.test_iter = None
self.dataset_name = None
# set up the logging
logging_format = '[%(asctime)-19s, %(name)s] %(message)s'
if self.debug_mode:
logging_level = logging.DEBUG
else:
logging_level = logging.INFO
logging.basicConfig(level=logging_level, format=logging_format)
self.logger = logging.getLogger(self.__class__.__name__)
def train_phocnet(self):
self.logger.info('--- Running PHOCNet Training ---')
# --- Step 1: check if we need to create the LMDBs
# load the word lists
xml_reader = XMLReader(make_lower_case=self.use_lower_case_only)
self.dataset_name, train_list, test_list = xml_reader.load_train_test_xml(train_xml_path=self.train_annotation_file,
test_xml_path=self.test_annotation_file,
img_dir=self.doc_img_dir)
phoc_unigrams = unigrams_from_word_list(word_list=train_list, split_character=self.annotation_delimiter)
self.logger.info('PHOC unigrams: %s', ' '.join(phoc_unigrams))
self.test_iter = len(test_list)
self.logger.info('Using dataset \'%s\'', self.dataset_name)
# check if we need to create LMDBs
lmdb_prefix = '%s_nti%d_pul%s' % (self.dataset_name, self.n_train_images,
'-'.join([str(elem) for elem in self.phoc_unigram_levels]))
train_word_images_lmdb_path = os.path.join(self.lmdb_dir, '%s_train_word_images_lmdb' % lmdb_prefix)
train_phoc_lmdb_path = os.path.join(self.lmdb_dir, '%s_train_phocs_lmdb' % lmdb_prefix)
test_word_images_lmdb_path = os.path.join(self.lmdb_dir, '%s_test_word_images_lmdb' % lmdb_prefix)
test_phoc_lmdb_path = os.path.join(self.lmdb_dir, '%s_test_phocs_lmdb' % lmdb_prefix)
lmdbs_exist = (os.path.exists(train_word_images_lmdb_path),
os.path.exists(train_phoc_lmdb_path),
os.path.exists(test_word_images_lmdb_path),
os.path.exists(test_phoc_lmdb_path))
if self.use_bigrams:
n_bigrams = 50
bigrams = get_most_common_n_grams(words=[word.get_transcription()
for word in train_list],
num_results=n_bigrams, n=2)
bigram_levels = [2]
else:
n_bigrams = 0
bigrams = None
bigram_levels = None
if not np.all(lmdbs_exist) or self.recreate_lmdbs:
self.logger.info('Creating LMDBs...')
train_phocs = build_phoc(words=[word.get_transcription() for word in train_list],
phoc_unigrams=phoc_unigrams, unigram_levels=self.phoc_unigram_levels,
phoc_bigrams=bigrams, bigram_levels=bigram_levels,
split_character=self.annotation_delimiter,
on_unknown_unigram='warn')
test_phocs = build_phoc(words=[word.get_transcription() for word in test_list],
phoc_unigrams=phoc_unigrams, unigram_levels=self.phoc_unigram_levels,
phoc_bigrams=bigrams, bigram_levels=bigram_levels,
split_character=self.annotation_delimiter,
on_unknown_unigram='warn')
self._create_train_test_phocs_lmdbs(train_list=train_list, train_phocs=train_phocs,
test_list=test_list, test_phocs=test_phocs,
train_word_images_lmdb_path=train_word_images_lmdb_path,
train_phoc_lmdb_path=train_phoc_lmdb_path,
test_word_images_lmdb_path=test_word_images_lmdb_path,
test_phoc_lmdb_path=test_phoc_lmdb_path)
else:
self.logger.info('Found LMDBs...')
# --- Step 2: create the proto files
self.logger.info('Saving proto files...')
# prepare the output paths
train_proto_path = os.path.join(self.proto_dir, 'train_phocnet_%s.prototxt' % self.dataset_name)
test_proto_path = os.path.join(self.proto_dir, 'test_phocnet_%s.prototxt' % self.dataset_name)
solver_proto_path = os.path.join(self.proto_dir, 'solver_phocnet_%s.prototxt' % self.dataset_name)
# generate the proto files
n_attributes = np.sum(self.phoc_unigram_levels)*len(phoc_unigrams)
if self.use_bigrams:
n_attributes += np.sum(bigram_levels)*n_bigrams
mpg = ModelProtoGenerator(initialization='msra', use_cudnn_engine=self.gpu_id is not None)
train_proto = mpg.get_phocnet(word_image_lmdb_path=train_word_images_lmdb_path, phoc_lmdb_path=train_phoc_lmdb_path,
phoc_size=n_attributes,
generate_deploy=False)
test_proto = mpg.get_phocnet(word_image_lmdb_path=test_word_images_lmdb_path, phoc_lmdb_path=test_phoc_lmdb_path,
phoc_size=n_attributes, generate_deploy=False)
solver_proto = generate_solver_proto(train_net=train_proto_path, test_net=test_proto_path,
base_lr=self.learning_rate, momentum=self.momentum, display=self.display,
lr_policy='step', gamma=self.gamma, stepsize=self.step_size,
solver_mode=self.solver_mode, iter_size=self.batch_size, max_iter=self.max_iter,
average_loss=self.display, test_iter=self.test_iter, test_interval=self.test_interval,
weight_decay=self.weight_decay)
# save the proto files
save_prototxt(file_path=train_proto_path, proto_object=train_proto, header_comment='Train PHOCNet %s' % self.dataset_name)
save_prototxt(file_path=test_proto_path, proto_object=test_proto, header_comment='Test PHOCNet %s' % self.dataset_name)
save_prototxt(file_path=solver_proto_path, proto_object=solver_proto, header_comment='Solver PHOCNet %s' % self.dataset_name)
# --- Step 3: train the PHOCNet
self.logger.info('Starting SGD...')
self._run_sgd(solver_proto_path=solver_proto_path)
def pretrain_callback(self, solver):
'''
Method called before starting the training
'''
# init numpy arrays for mAP results
epochs = self.max_iter/self.test_interval
self.epoch_map = np.zeros(epochs+1)
self.epoch_map[0], _ = calc_map_from_cnn_features(solver=solver,
test_iterations=self.test_iter,
metric=self.metric)
self.logger.info('mAP: %f', self.epoch_map[0])
def test_callback(self, solver, epoch):
'''
Method called every self.test_interval iterations during training
'''
self.logger.info('Evaluating CNN after %d steps:', epoch*solver.param.test_interval)
self.epoch_map[epoch+1], _ = calc_map_from_cnn_features(solver=solver,
test_iterations=self.test_iter,
metric=self.metric)
self.logger.info('mAP: %f', self.epoch_map[epoch+1])
def posttrain_callback(self, solver):
'''
Method called after finishing the training
'''
# if self.save_net is not None, save the PHOCNet to the desired location
if self.save_net_dir is not None:
filename = 'phocnet_%s_nti%d_pul%s.binaryproto' % (self.dataset_name, self.n_train_images,
'-'.join([str(elem) for elem in self.phoc_unigram_levels]))
solver.net.save(os.path.join(self.save_net_dir, filename))
def _create_train_test_phocs_lmdbs(self, train_list, train_phocs, test_list, test_phocs,
train_word_images_lmdb_path, train_phoc_lmdb_path,
test_word_images_lmdb_path, test_phoc_lmdb_path):
start_time = time.time()
# --- TRAIN IMAGES
# find all unique transcriptions and the label map...
_, transcription_map = self.__get_unique_transcriptions_and_labelmap(train_list, test_list)
# get the numeric training labels plus a random order to insert them into
# create the numeric labels and counts
train_labels = np.array([transcription_map[word.get_transcription()] for word in train_list])
unique_train_labels, counts = np.unique(train_labels, return_counts=True)
# find the number of images that should be present for training per class
n_images_per_class = self.n_train_images/unique_train_labels.shape[0] + 1
# create randomly shuffled numbers for later use as keys
random_indices = list(xrange(n_images_per_class*unique_train_labels.shape[0]))
np.random.shuffle(random_indices)
#set random limits for affine transform
random_limits = (0.8, 1.1)
n_rescales = 0
# loading should be done in gray scale
load_grayscale = True
# create train LMDB
self.logger.info('Creating Training LMDB (%d total word images)', len(random_indices))
lmdb_creator = CaffeLMDBCreator()
lmdb_creator.open_dual_lmdb_for_write(image_lmdb_path=train_word_images_lmdb_path,
additional_lmdb_path=train_phoc_lmdb_path,
create=True)
for cur_label, count in zip(unique_train_labels, counts):
# find the words for the current class label and the
# corresponding PHOC
cur_word_indices = np.where(train_labels == cur_label)[0]
cur_transcription = train_list[cur_word_indices[0]].get_transcription()
cur_phoc = NumpyHelper.get_unique_rows(train_phocs[cur_word_indices])
# unique rows should only return one specific PHOC
if cur_phoc.shape[0] != 1:
raise ValueError('Extracted more than one PHOC for label %d' % cur_label)
cur_phoc = np.atleast_3d(cur_phoc).transpose((2,0,1)).astype(np.uint8)
# if there are to many images for the current word image class,
# draw from them and cut the rest off
if count > n_images_per_class:
np.random.shuffle(cur_word_indices)
cur_word_indices = cur_word_indices[:n_images_per_class]
# load the word images
cur_word_images = []
for idx in cur_word_indices:
img = train_list[idx].get_word_image(gray_scale=load_grayscale)
# check image size
img, resized = self.__check_size(img)
n_rescales += int(resized)
# append to the current word images and
# put into LMDB
cur_word_images.append(img)
key = '%s_%s' % (str(random_indices.pop()).zfill(8), cur_transcription.encode('ascii', 'ignore'))
lmdb_creator.put_dual(img_mat=np.atleast_3d(img).transpose((2,0,1)).astype(np.uint8),
additional_mat=cur_phoc, label=cur_label, key=key)
# extract the extra augmented images
# the random limits are the maximum percentage
# that the destination point may deviate from the reference point
# in the affine transform
if len(cur_word_images) < n_images_per_class:
# create the warped images
inds = np.random.randint(len(cur_word_images), size=n_images_per_class - len(cur_word_images))
for ind in inds:
aug_img = AugmentationCreator.create_affine_transform_augmentation(img=cur_word_images[ind], random_limits=random_limits)
aug_img = np.atleast_3d(aug_img).transpose((2,0,1)).astype(np.uint8)
key = '%s_%s' % (str(random_indices.pop()).zfill(8), cur_transcription.encode('ascii', 'ignore'))
lmdb_creator.put_dual(img_mat=aug_img, additional_mat=cur_phoc, label=cur_label, key=key)
# wrap up training LMDB creation
if len(random_indices) != 0:
raise ValueError('Random Indices are not empty, something went wrong during training LMDB creation')
lmdb_creator.finish_creation()
# write the label map to the LMDBs as well
write_list(file_path=train_word_images_lmdb_path + '/label_map.txt',
line_list=['%s %s' % elem for elem in transcription_map.items()])
write_list(file_path=train_phoc_lmdb_path + '/label_map.txt',
line_list=['%s %s' % elem for elem in transcription_map.items()])
self.logger.info('Finished processing train words (took %s, %d rescales)', convert_secs2HHMMSS(time.time() - start_time), n_rescales)
# --- TEST IMAGES
self.logger.info('Creating Test LMDB (%d total word images)', len(test_list))
n_rescales = 0
start_time = time.time()
lmdb_creator.open_dual_lmdb_for_write(image_lmdb_path=test_word_images_lmdb_path, additional_lmdb_path=test_phoc_lmdb_path,
create=True, label_map=transcription_map)
for word, phoc in zip(test_list, test_phocs):
if word.get_transcription() not in transcription_map:
transcription_map[word.get_transcription()] = len(transcription_map)
img = word.get_word_image(gray_scale=load_grayscale)
img, resized = self.__check_size(img)
if img is None:
self.logger.warning('!WARNING! Found image with 0 width or height!')
else:
n_rescales += int(resized)
img = np.atleast_3d(img).transpose((2,0,1)).astype(np.uint8)
phoc_3d = np.atleast_3d(phoc).transpose((2,0,1)).astype(np.uint8)
lmdb_creator.put_dual(img_mat=img, additional_mat=phoc_3d, label=transcription_map[word.get_transcription()])
lmdb_creator.finish_creation()
write_list(file_path=test_word_images_lmdb_path + '/label_map.txt',
line_list=['%s %s' % elem for elem in transcription_map.items()])
write_list(file_path=test_phoc_lmdb_path + '/label_map.txt',
line_list=['%s %s' % elem for elem in transcription_map.items()])
self.logger.info('Finished processing test words (took %s, %d rescales)', convert_secs2HHMMSS(time.time() - start_time), n_rescales)
def __check_size(self, img):
'''
checks if the image accords to the minimum size requirements
Returns:
tuple (img, bool):
img: the original image if the image size was ok, a resized image otherwise
bool: flag indicating whether the image was resized
'''
if np.amin(img.shape[:2]) < self.min_image_width_height:
if np.amin(img.shape[:2]) == 0:
return None, False
scale = float(self.min_image_width_height+1)/float(np.amin(img.shape[:2]))
new_shape = (int(scale*img.shape[0]), int(scale*img.shape[1]))
new_img = resize(image=img, output_shape=new_shape)
return new_img, True
else:
return img, False
def __get_unique_transcriptions_and_labelmap(self, train_list, test_list):
'''
Returns a list of unique transcriptions for the given train and test lists
and creates a dictionary mapping transcriptions to numeric class labels.
'''
unique_transcriptions = [word.get_transcription() for word in train_list]
unique_transcriptions.extend([word.get_transcription() for word in test_list])
unique_transcriptions = list(set(unique_transcriptions))
transcription_map = dict((k,v) for v,k in enumerate(unique_transcriptions))
return unique_transcriptions, transcription_map
def _run_sgd(self, solver_proto_path):
'''
Starts the SGD training of the PHOCNet
Args:
solver_proto_path (str): the absolute path to the solver protobuffer file to use
'''
# Set CPU/GPU mode for solver training
if self.gpu_id != None:
self.logger.info('Setting Caffe to GPU mode using device %d', self.gpu_id)
caffe.set_mode_gpu()
caffe.set_device(self.gpu_id)
else:
self.logger.info('Setting Caffe to CPU mode')
caffe.set_mode_cpu()
# Create SGD solver
self.logger.info('Using solver protofile at %s', solver_proto_path)
solver = self.__get_solver(solver_proto_path)
epochs = self.max_iter/self.test_interval
# run test on the net before training
self.logger.info('Running pre-train evaluation')
self.pretrain_callback(solver=solver)
# run the training
self.logger.info('Finished Setup, running SGD')
for epoch in xrange(epochs):
# run training until we want to test
self.__solver_step(solver, self.test_interval)
# run test callback after test_interval iterations
self.logger.debug('Running test evaluation')
self.test_callback(solver=solver, epoch=epoch)
# if we have iterations left to compute, do so
iters_left = self.max_iter % self.test_interval
if iters_left > 0:
self.__solver_step(solver, iters_left)
# run post train callback
self.logger.info('Running post-train evaluation')
self.posttrain_callback(solver=solver)
# return the solver
return solver
def __solver_step(self, solver, steps):
'''
Runs Caffe solver suppressing Caffe output if necessary
'''
if not self.debug_mode:
with Suppressor():
solver.step(steps)
else:
solver.step(steps)
def __get_solver(self, solver_proto_path):
'''
Returns a caffe.SGDSolver for the given protofile path,
ignoring Caffe command line chatter if debug mode is not set
to True.
'''
if not self.debug_mode:
# disable Caffe init chatter when not in debug
with Suppressor():
return caffe.SGDSolver(solver_proto_path)
else:
return caffe.SGDSolver(solver_proto_path)
``` |
{
"source": "jpuigcerver/prob-phoc",
"score": 3
} |
#### File: prob-phoc/prob_phoc/__init__.py
```python
from __future__ import absolute_import
from __future__ import division
import numpy as np
import torch
import prob_phoc._C as _C
def _convert_to_tensor_if_needed(x):
if isinstance(x, np.ndarray):
return torch.from_numpy(x)
elif torch.is_tensor(x):
return x
else:
raise ValueError("The argument cannot be converted to a PyTorch tensor")
def cphoc(xa, xb, y=None, method="sum_prod_log"):
"""Computes probabilistic PHOC relevance scores between each pair of inputs.
"""
xa = _convert_to_tensor_if_needed(xa)
xb = _convert_to_tensor_if_needed(xb)
if y is None:
y = xa.new(xa.size(0), xb.size(0))
else:
y = _convert_to_tensor_if_needed(y)
_C.cphoc(xa, xb, y, method)
return y
def pphoc(x, y=None, method="sum_prod_log"):
"""Pairwise probabilistic PHOC relevance scores."""
x = _convert_to_tensor_if_needed(x)
if y is None:
y = x.new(x.size(0) * (x.size(0) - 1) // 2)
else:
y = _convert_to_tensor_if_needed(y)
_C.pphoc(x, y, method)
return y
``` |
{
"source": "jpuigcerver/rnn2d",
"score": 2
} |
#### File: rnn2d/ref_impl/MultiDirectionalTwoDLSTMOp.py
```python
import theano
import theano.gradient
import theano.printing
import theano.gof
from theano import gof
from theano.sandbox.cuda.basic_ops import (as_cuda_ndarray_variable, gpu_contiguous)
from Util import raw_variable
from theano.gof.opt import OpSub
from theano.compile import optdb
import theano.tensor as T
from Util import get_c_support_code_common, get_c_support_code_mdlstm
class MultiDirectionalTwoDLSTMOpGrad(theano.sandbox.cuda.GpuOp):
__props__ = ("inplace",)
def __init__(self, inplace):
super(MultiDirectionalTwoDLSTMOpGrad, self).__init__()
self.inplace = inplace
if inplace:
#inputs: X, 4xW, 4xV_h, 4xV_v, 4xb, 4xDy, 4xY, 4xH
#note: outputs [17,18,19,20] are the DYs, but we don't use them here for inplace,
#as they will usually be aligned with each other (atleast when the sum of the outputs is used)
#all outputs operate inplace on inputs [26,27,28,29] (which are the Hs)
#but when the input is marked multiple times, we get an error
#so we do this workaround
#anyway theano knows that these inputs will be destroyed, so it should be OK
self.destroy_map = {4: [26], 5: [27], 6: [28], 7: [29]}
def make_node(self, X, W1, W2, W3, W4, V_h1, V_h2, V_h3, V_h4, V_v1, V_v2, V_v3, V_v4,
b1, b2, b3, b4, sizes, DY1, DY2, DY3, DY4, Y1, Y2, Y3, Y4, H1, H2, H3, H4):
var_names = ["X", "W1", "W2", "W3", "W4", "V_h1", "V_h2", "V_h3", "V_h4",
"V_v1", "V_v2", "V_v3", "V_v4", "b1", "b2", "b3", "b4",
"DY1", "DY2", "DY3", "DY4", "Y1", "Y2", "Y3", "Y4",
"H1", "H2", "H3", "H4"]
lcl = locals()
for var_name in var_names:
lcl[var_name] = gpu_contiguous(as_cuda_ndarray_variable(lcl[var_name]))
assert lcl[var_name].dtype == "float32"
#note: sizes lives on the CPU!
sizes = T.as_tensor_variable(sizes)
assert sizes.dtype == "float32"
expected_ndims = [4] + ([2] * 12) + ([1] * 4) + ([4] * 12)
assert len(var_names) == len(expected_ndims), (len(var_names), len(expected_ndims))
for var_name, expected_ndim in zip(var_names, expected_ndims):
assert lcl[var_name].ndim == expected_ndim, \
(var_name, lcl[var_name].name, lcl[var_name].ndim, expected_ndim)
assert sizes.ndim == 2
all_vars_no_sizes = [lcl[var_name] for var_name in var_names]
all_vars = all_vars_no_sizes[:17] + [sizes] + all_vars_no_sizes[17:]
inputs_vars = all_vars[:17]
return theano.Apply(self, all_vars, [v.type() for v in inputs_vars])
def c_support_code(self):
return get_c_support_code_common() + get_c_support_code_mdlstm()
def c_code(self, node, name, input_names, output_names, sub):
X, W1, W2, W3, W4, V_h1, V_h2, V_h3, V_h4, V_v1, V_v2, V_v3, V_v4, \
b1, b2, b3, b4, sizes, DY1, DY2, DY3, DY4, Y1, Y2, Y3, Y4, H1, H2, H3, H4 = input_names
DX, DW1, DW2, DW3, DW4, DV_h1, DV_h2, DV_h3, DV_h4, \
DV_v1, DV_v2, DV_v3, DV_v4, Db1, Db2, Db3, Db4 = output_names
fail = sub['fail']
inplace = "true" if self.inplace else "false"
return """
//std::cout << "MultiDirectionalTwoDLSTMOpGrad called" << std::endl;
if(!%(inplace)s)
{
std::cout << "warning, inplace optimization failed, not working inplace" << std::endl;
}
if(%(DX)s || %(DW1)s || %(DW2)s || %(DW3)s || %(DW4)s ||
%(DV_h1)s || %(DV_h2)s || %(DV_h3)s || %(DV_h4)s ||
%(DV_v1)s || %(DV_v2)s || %(DV_v3)s || %(DV_v4)s ||
%(Db1)s || %(Db2)s || %(Db3)s || %(Db4)s)
{
cout << "output storage already exists" << endl;
//TODO check if we can reuse it
Py_XDECREF(%(DX)s);
Py_XDECREF(%(DW1)s);
Py_XDECREF(%(DW2)s);
Py_XDECREF(%(DW3)s);
Py_XDECREF(%(DW4)s);
Py_XDECREF(%(DV_h1)s);
Py_XDECREF(%(DV_h2)s);
Py_XDECREF(%(DV_h3)s);
Py_XDECREF(%(DV_h4)s);
Py_XDECREF(%(DV_v1)s);
Py_XDECREF(%(DV_v2)s);
Py_XDECREF(%(DV_v3)s);
Py_XDECREF(%(DV_v4)s);
Py_XDECREF(%(Db1)s);
Py_XDECREF(%(Db2)s);
Py_XDECREF(%(Db3)s);
Py_XDECREF(%(Db4)s);
}
const int * X_dim = CudaNdarray_HOST_DIMS(%(X)s);
const int * Y_dim = CudaNdarray_HOST_DIMS(%(Y1)s);
const int height = X_dim[0];
const int width = X_dim[1];
const int n_minibatch = X_dim[2];
const int n_diags = width + height - 1;
const int max_diag_size = std::min(Y_dim[0], Y_dim[1]);
CudaNdarray * delta1 = 0;
CudaNdarray * delta2 = 0;
CudaNdarray * delta3 = 0;
CudaNdarray * delta4 = 0;
if(%(inplace)s)
{
delta1 = %(H1)s;
delta2 = %(H2)s;
delta3 = %(H3)s;
delta4 = %(H4)s;
}
else
{
delta1 = (CudaNdarray *) CudaNdarray_Copy(%(H1)s);
delta2 = (CudaNdarray *) CudaNdarray_Copy(%(H2)s);
delta3 = (CudaNdarray *) CudaNdarray_Copy(%(H3)s);
delta4 = (CudaNdarray *) CudaNdarray_Copy(%(H4)s);
}
CudaNdarray * epsilon1 = (CudaNdarray *) CudaNdarray_Copy(%(DY1)s);
CudaNdarray * epsilon2 = (CudaNdarray *) CudaNdarray_Copy(%(DY2)s);
CudaNdarray * epsilon3 = (CudaNdarray *) CudaNdarray_Copy(%(DY3)s);
CudaNdarray * epsilon4 = (CudaNdarray *) CudaNdarray_Copy(%(DY4)s);
const int workmem1_dims[] = {2, Y_dim[0], Y_dim[1], Y_dim[2], Y_dim[3]};
CudaNdarray * workmem1_1 = (CudaNdarray*) MyCudaNdarray_NewDims(5, workmem1_dims);
assert(workmem1_1);
CudaNdarray * workmem1_2 = (CudaNdarray*) MyCudaNdarray_NewDims(5, workmem1_dims);
assert(workmem1_2);
CudaNdarray * workmem1_3 = (CudaNdarray*) MyCudaNdarray_NewDims(5, workmem1_dims);
assert(workmem1_3);
CudaNdarray * workmem1_4 = (CudaNdarray*) MyCudaNdarray_NewDims(5, workmem1_dims);
assert(workmem1_4);
//we use floats to store float*'s, as CudaNdarray only supports floats. factor 10 for lstm bwd kernel
int ptr_storage_dims[] = {4 * 10 * max_diag_size * sizeof(float*) / sizeof(float)};
CudaNdarray * ptr_storage = (CudaNdarray*) MyCudaNdarray_NewDims(1, ptr_storage_dims);
assert(ptr_storage);
//valid: float tensor of 1s and 0s indicating the size of the image
//4 dirs * max_diag_size * n_minibatch
int valid_dims[] = {4 * max_diag_size * n_minibatch};
CudaNdarray * valid_storage = (CudaNdarray*) MyCudaNdarray_NewDims(1, valid_dims);
assert(valid_storage);
for(int diag = n_diags-1; diag >= 0; --diag)
{
int diag_size = min(diag+1, min(abs(n_diags-diag), min(width, height)));
int y_high = min(diag, height-1);
int x_low = max(diag-height+1,0);
vector<int> ys_h, xs_h, ys_v, xs_v, ys, xs;
for(int idx = 0; idx < diag_size; ++idx)
{
int y = y_high - idx;
int x = x_low + idx;
bool rightBorder = (x == X_dim[1]-1);
if(!rightBorder)
{
ys_h.push_back(y);
xs_h.push_back(x);
}
bool botBorder = (y == X_dim[0]-1);
if(!botBorder)
{
ys_v.push_back(y);
xs_v.push_back(x);
}
ys.push_back(y);
xs.push_back(x);
}
affine_y_x_batched_multidir(0, 1, delta1, delta2, delta3, delta4, %(V_h1)s, %(V_h2)s, %(V_h3)s, %(V_h4)s,
epsilon1, epsilon2, epsilon3, epsilon4, ys_h, xs_h, ptr_storage, height, width, 0, false, true);
affine_y_x_batched_multidir(1, 0, delta1, delta2, delta3, delta4, %(V_v1)s, %(V_v2)s, %(V_v3)s, %(V_v4)s,
epsilon1, epsilon2, epsilon3, epsilon4, ys_v, xs_v, ptr_storage, height, width, 0, false, true);
do_lstm_bwd_batched_multidir(delta1, delta2, delta3, delta4, epsilon1, epsilon2, epsilon3, epsilon4,
%(Y1)s, %(Y2)s, %(Y3)s, %(Y4)s, workmem1_1, workmem1_2, workmem1_3, workmem1_4,
X_dim[0], X_dim[1], ys, xs, ptr_storage, valid_storage, %(sizes)s);
}
Py_XDECREF(ptr_storage);
Py_XDECREF(valid_storage);
Py_XDECREF(workmem1_1);
Py_XDECREF(workmem1_2);
Py_XDECREF(workmem1_3);
Py_XDECREF(workmem1_4);
%(DX)s = CudaNdarray_uninitialized_like(%(X)s);
assert(%(DX)s);
%(DW1)s = CudaNdarray_uninitialized_like(%(W1)s);
assert(%(DW1)s);
%(DW2)s = CudaNdarray_uninitialized_like(%(W2)s);
assert(%(DW2)s);
%(DW3)s = CudaNdarray_uninitialized_like(%(W3)s);
assert(%(DW3)s);
%(DW4)s = CudaNdarray_uninitialized_like(%(W4)s);
assert(%(DW4)s);
//DW = X^T * delta
affine_global(%(X)s, delta1, %(DW1)s, true, false, 0, 0, 0.0f);
affine_global(%(X)s, delta2, %(DW2)s, true, false, 0, 0, 0.0f);
affine_global(%(X)s, delta3, %(DW3)s, true, false, 0, 0, 0.0f);
affine_global(%(X)s, delta4, %(DW4)s, true, false, 0, 0, 0.0f);
//important! mind the order, first use X, then update DX, which might be aligned to X
//DX = delta * W^T
affine_global(delta1, %(W1)s, %(DX)s, false, true, 0, 0, 0.0f);
affine_global(delta2, %(W2)s, %(DX)s, false, true, 0, 0, 1.0f);
affine_global(delta3, %(W3)s, %(DX)s, false, true, 0, 0, 1.0f);
affine_global(delta4, %(W4)s, %(DX)s, false, true, 0, 0, 1.0f);
//Db = (1 ... 1) * delta
%(Db1)s = sumOverAllButLastDimensions(delta1);
%(Db2)s = sumOverAllButLastDimensions(delta2);
%(Db3)s = sumOverAllButLastDimensions(delta3);
%(Db4)s = sumOverAllButLastDimensions(delta4);
//copy left/right part to workmem2 and set to 0 (could be done more efficient, but profiling shows, it's not worth it)
const int * H_dim = CudaNdarray_HOST_DIMS(%(H1)s);
const int workmem2_dims[] = {H_dim[0], H_dim[2], H_dim[3]};
const int block_size = H_dim[2] * H_dim[3];
CudaNdarray * workmem2_1 = (CudaNdarray*) MyCudaNdarray_NewDims(3, workmem2_dims);
assert(workmem2_1);
CudaNdarray * workmem2_2 = (CudaNdarray*) MyCudaNdarray_NewDims(3, workmem2_dims);
assert(workmem2_2);
CudaNdarray * workmem2_3 = (CudaNdarray*) MyCudaNdarray_NewDims(3, workmem2_dims);
assert(workmem2_3);
CudaNdarray * workmem2_4 = (CudaNdarray*) MyCudaNdarray_NewDims(3, workmem2_dims);
assert(workmem2_4);
for(int y = 0; y < Y_dim[0]; ++y)
{
float * workmem2_1_data_ptr = CudaNdarray_DEV_DATA(workmem2_1) + y * block_size;
float * delta1_data_ptr = data_ptr(delta1, y, 0);
HANDLE_ERROR(cudaMemcpy(workmem2_1_data_ptr, delta1_data_ptr, block_size * sizeof(float), cudaMemcpyDeviceToDevice));
HANDLE_ERROR(cudaMemset(delta1_data_ptr, 0, sizeof(float) * H_dim[2] * H_dim[3]));
float * workmem2_2_data_ptr = CudaNdarray_DEV_DATA(workmem2_2) + y * block_size;
float * delta2_data_ptr = data_ptr(delta2, y, 0);
HANDLE_ERROR(cudaMemcpy(workmem2_2_data_ptr, delta2_data_ptr, block_size * sizeof(float), cudaMemcpyDeviceToDevice));
HANDLE_ERROR(cudaMemset(delta2_data_ptr, 0, sizeof(float) * H_dim[2] * H_dim[3]));
float * workmem2_3_data_ptr = CudaNdarray_DEV_DATA(workmem2_3) + y * block_size;
float * delta3_data_ptr = data_ptr(delta3, y, width - 1);
HANDLE_ERROR(cudaMemcpy(workmem2_3_data_ptr, delta3_data_ptr, block_size * sizeof(float), cudaMemcpyDeviceToDevice));
HANDLE_ERROR(cudaMemset(delta3_data_ptr, 0, sizeof(float) * H_dim[2] * H_dim[3]));
float * workmem2_4_data_ptr = CudaNdarray_DEV_DATA(workmem2_4) + y * block_size;
float * delta4_data_ptr = data_ptr(delta4, y, width - 1);
HANDLE_ERROR(cudaMemcpy(workmem2_4_data_ptr, delta4_data_ptr, block_size * sizeof(float), cudaMemcpyDeviceToDevice));
HANDLE_ERROR(cudaMemset(delta4_data_ptr, 0, sizeof(float) * H_dim[2] * H_dim[3]));
}
%(DV_h1)s = CudaNdarray_uninitialized_like(%(V_h1)s);
assert(%(DV_h1)s);
%(DV_h2)s = CudaNdarray_uninitialized_like(%(V_h2)s);
assert(%(DV_h2)s);
%(DV_h3)s = CudaNdarray_uninitialized_like(%(V_h3)s);
assert(%(DV_h3)s);
%(DV_h4)s = CudaNdarray_uninitialized_like(%(V_h4)s);
assert(%(DV_h4)s);
//DV_h = Y[0..end-1]^T * delta[1..end]
affine_global(%(Y1)s, delta1, %(DV_h1)s, true, false, 0, 1, 0.0f);
affine_global(%(Y2)s, delta2, %(DV_h2)s, true, false, 0, 1, 0.0f);
affine_global(%(Y3)s, delta3, %(DV_h3)s, true, false, 1, 0, 0.0f);
affine_global(%(Y4)s, delta4, %(DV_h4)s, true, false, 1, 0, 0.0f);
//copy left/right part back
for(int y = 0; y < Y_dim[0]; ++y)
{
float * workmem2_1_data_ptr = CudaNdarray_DEV_DATA(workmem2_1) + y * block_size;
float * delta1_data_ptr = data_ptr(delta1, y, 0);
HANDLE_ERROR(cudaMemcpy(delta1_data_ptr, workmem2_1_data_ptr, block_size * sizeof(float), cudaMemcpyDeviceToDevice));
float * workmem2_2_data_ptr = CudaNdarray_DEV_DATA(workmem2_2) + y * block_size;
float * delta2_data_ptr = data_ptr(delta2, y, 0);
HANDLE_ERROR(cudaMemcpy(delta2_data_ptr, workmem2_2_data_ptr, block_size * sizeof(float), cudaMemcpyDeviceToDevice));
float * workmem2_3_data_ptr = CudaNdarray_DEV_DATA(workmem2_3) + y * block_size;
float * delta3_data_ptr = data_ptr(delta3, y, width - 1);
HANDLE_ERROR(cudaMemcpy(delta3_data_ptr, workmem2_3_data_ptr, block_size * sizeof(float), cudaMemcpyDeviceToDevice));
float * workmem2_4_data_ptr = CudaNdarray_DEV_DATA(workmem2_4) + y * block_size;
float * delta4_data_ptr = data_ptr(delta4, y, width - 1);
HANDLE_ERROR(cudaMemcpy(delta4_data_ptr, workmem2_4_data_ptr, block_size * sizeof(float), cudaMemcpyDeviceToDevice));
}
Py_XDECREF(workmem2_1);
Py_XDECREF(workmem2_2);
Py_XDECREF(workmem2_3);
Py_XDECREF(workmem2_4);
%(DV_v1)s = CudaNdarray_uninitialized_like(%(V_v1)s);
assert(%(DV_v1)s);
%(DV_v2)s = CudaNdarray_uninitialized_like(%(V_v2)s);
assert(%(DV_v2)s);
%(DV_v3)s = CudaNdarray_uninitialized_like(%(V_v3)s);
assert(%(DV_v3)s);
%(DV_v4)s = CudaNdarray_uninitialized_like(%(V_v4)s);
assert(%(DV_v4)s);
//DV_v = Y[0..end-1]^T * delta[1..end]
affine_global(%(Y1)s, delta1, %(DV_v1)s, true, false, 0, Y_dim[1], 0.0f);
affine_global(%(Y2)s, delta2, %(DV_v2)s, true, false, Y_dim[1], 0, 0.0f);
affine_global(%(Y3)s, delta3, %(DV_v3)s, true, false, 0, Y_dim[1], 0.0f);
affine_global(%(Y4)s, delta4, %(DV_v4)s, true, false, Y_dim[1], 0, 0.0f);
//for debugging
/*cout << "=====delta1====" << endl;
CudaNdarray_print_part(delta1);
cout << "=====delta2====" << endl;
CudaNdarray_print_part(delta2);
cout << "=====delta3====" << endl;
CudaNdarray_print_part(delta3);
cout << "=====delta4====" << endl;
CudaNdarray_print_part(delta4);
cout << "===============" << endl;*/
if(!%(inplace)s)
{
Py_XDECREF(delta1);
Py_XDECREF(delta2);
Py_XDECREF(delta3);
Py_XDECREF(delta4);
}
Py_XDECREF(epsilon1);
Py_XDECREF(epsilon2);
Py_XDECREF(epsilon3);
Py_XDECREF(epsilon4);
""" % locals()
#!!! change this when changing the code!
def c_code_cache_version(self):
return 2, 10
MultiDirectionalTwoDLSTMOpGradNoInplaceInstance = MultiDirectionalTwoDLSTMOpGrad(inplace=False)
MultiDirectionalTwoDLSTMOpGradInplaceInstance = MultiDirectionalTwoDLSTMOpGrad(inplace=True)
MultiDirectionalTwoDLSTMOpInplaceOpt = OpSub(MultiDirectionalTwoDLSTMOpGradNoInplaceInstance,
MultiDirectionalTwoDLSTMOpGradInplaceInstance)
#hack to avoid being called twice
if not hasattr(optdb, 'MultiDirectionalTwoDLSTMOpInplaceOpt_registered'):
optdb.register('MultiDirectionalTwoDLSTMOpInplaceOpt',
theano.gof.TopoOptimizer(MultiDirectionalTwoDLSTMOpInplaceOpt, failure_callback=gof.TopoOptimizer.warn_inplace),
50.0, 'fast_run', 'inplace', 'gpuarray')
optdb.MultiDirectionalTwoDLSTMOpInplaceOpt_registered = True
class MultiDirectionalTwoDLSTMOp(theano.sandbox.cuda.GpuOp):
__props__ = ()
def __init__(self):
super(MultiDirectionalTwoDLSTMOp, self).__init__()
def make_node(self, X, W1, W2, W3, W4, V_h1, V_h2, V_h3, V_h4, V_v1, V_v2, V_v3, V_v4, b1, b2, b3, b4, sizes):
var_names = ["X", "W1", "W2", "W3", "W4", "V_h1", "V_h2", "V_h3", "V_h4",
"V_v1", "V_v2", "V_v3", "V_v4", "b1", "b2", "b3", "b4"]
lcl = locals()
for var_name in var_names:
lcl[var_name] = gpu_contiguous(as_cuda_ndarray_variable(lcl[var_name]))
assert lcl[var_name].dtype == "float32"
#note: sizes lives on the CPU!
sizes = T.as_tensor_variable(sizes)
assert sizes.dtype == "float32"
assert lcl["X"].ndim == 4
assert lcl["W1"].ndim == 2
assert lcl["W2"].ndim == 2
assert lcl["W3"].ndim == 2
assert lcl["W4"].ndim == 2
assert lcl["V_h1"].ndim == 2
assert lcl["V_h2"].ndim == 2
assert lcl["V_h3"].ndim == 2
assert lcl["V_h4"].ndim == 2
assert lcl["V_v1"].ndim == 2
assert lcl["V_v2"].ndim == 2
assert lcl["V_v3"].ndim == 2
assert lcl["V_v4"].ndim == 2
assert lcl["b1"].ndim == 1
assert lcl["b2"].ndim == 1
assert lcl["b3"].ndim == 1
assert lcl["b4"].ndim == 1
assert sizes.ndim == 2
all_vars = [lcl[var_name] for var_name in var_names] + [sizes]
#results: outputs Y1, Y2, Y3, Y4, (gates and cell states) H1, H2, H3, H4
return theano.Apply(self, all_vars, [lcl["X"].type() for _ in range(8)])
def c_support_code(self):
return get_c_support_code_common() + get_c_support_code_mdlstm()
def c_code(self, node, name, input_names, output_names, sub):
X, W1, W2, W3, W4, V_h1, V_h2, V_h3, V_h4, V_v1, V_v2, V_v3, V_v4, b1, b2, b3, b4, sizes = input_names
Y1, Y2, Y3, Y4, H1, H2, H3, H4 = output_names
fail = sub['fail']
return """
//std::cout << "MultiDirectionalTwoDLSTMOp called" << std::endl;
if(%(Y1)s || %(Y2)s || %(Y3)s || %(Y4)s || %(H1)s || %(H2)s || %(H3)s || %(H4)s)
{
//cout << "Ys or Hs already exist" << endl;
//TODO check if we can reuse it
Py_XDECREF(%(Y1)s);
Py_XDECREF(%(Y2)s);
Py_XDECREF(%(Y3)s);
Py_XDECREF(%(Y4)s);
Py_XDECREF(%(H1)s);
Py_XDECREF(%(H2)s);
Py_XDECREF(%(H3)s);
Py_XDECREF(%(H4)s);
}
const int * X_dim = CudaNdarray_HOST_DIMS(%(X)s);
const int * W_dim = CudaNdarray_HOST_DIMS(%(W1)s);
const int * V_dim = CudaNdarray_HOST_DIMS(%(V_h1)s);
assert(W_dim[1] %% 5 == 0 && "W has wrong shape");
assert(5 * V_dim[0] == V_dim[1] && "V has wrong shape");
assert(W_dim[1] == V_dim[1]);
assert(W_dim[0] == X_dim[3]);
const int Y_dim[] = {X_dim[0], X_dim[1], X_dim[2], W_dim[1] / 5};
const int H_dim[] = {X_dim[0], X_dim[1], X_dim[2], W_dim[1]};
const int height = X_dim[0];
const int width = X_dim[1];
const int n_minibatch = X_dim[2];
const int max_diag_size = std::min(height, width);
const int n_diags = width + height - 1;
//init Ys
%(Y1)s = (CudaNdarray*) MyCudaNdarray_NewDims(4, Y_dim);
assert(%(Y1)s);
%(Y2)s = (CudaNdarray*) MyCudaNdarray_NewDims(4, Y_dim);
assert(%(Y2)s);
%(Y3)s = (CudaNdarray*) MyCudaNdarray_NewDims(4, Y_dim);
assert(%(Y3)s);
%(Y4)s = (CudaNdarray*) MyCudaNdarray_NewDims(4, Y_dim);
assert(%(Y4)s);
//init Hs
%(H1)s = (CudaNdarray*) MyCudaNdarray_NewDims(4, H_dim);
assert(%(H1)s);
%(H2)s = (CudaNdarray*) MyCudaNdarray_NewDims(4, H_dim);
assert(%(H2)s);
%(H3)s = (CudaNdarray*) MyCudaNdarray_NewDims(4, H_dim);
assert(%(H3)s);
%(H4)s = (CudaNdarray*) MyCudaNdarray_NewDims(4, H_dim);
assert(%(H4)s);
//init Hs with bs
fillmat(%(b1)s, %(H1)s);
fillmat(%(b2)s, %(H2)s);
fillmat(%(b3)s, %(H3)s);
fillmat(%(b4)s, %(H4)s);
//H+=XW
affine_global(%(X)s, %(W1)s, %(H1)s);
affine_global(%(X)s, %(W2)s, %(H2)s);
affine_global(%(X)s, %(W3)s, %(H3)s);
affine_global(%(X)s, %(W4)s, %(H4)s);
//we use floats to store float*'s, as CudaNdarray only supports floats. factor 5 for lstm kernel,
//additional factor 4 for 4 directions
int ptr_storage_dims[] = {4 * 5 * max_diag_size * sizeof(float*) / sizeof(float)};
CudaNdarray * ptr_storage = (CudaNdarray*) MyCudaNdarray_NewDims(1, ptr_storage_dims);
assert(ptr_storage);
//valid: float tensor of 1s and 0s indicating the size of the image
//4 dirs * max_diag_size * n_minibatch
int valid_dims[] = {4 * max_diag_size * n_minibatch};
CudaNdarray * valid_storage = (CudaNdarray*) MyCudaNdarray_NewDims(1, valid_dims);
assert(valid_storage);
for(int diag = 0; diag < n_diags; ++diag)
{
int diag_size = min(diag+1, min(abs(n_diags-diag), min(width, height)));
int y_high = min(diag, height-1);
int x_low = max(diag-height+1,0);
vector<int> ys_h, xs_h, ys_v, xs_v, ys, xs;
for(int idx = 0; idx < diag_size; ++idx)
{
int y = y_high - idx;
int x = x_low + idx;
if(x > 0)
{
ys_h.push_back(y);
xs_h.push_back(x);
}
if(y > 0)
{
ys_v.push_back(y);
xs_v.push_back(x);
}
ys.push_back(y);
xs.push_back(x);
}
affine_y_x_batched_multidir(0, -1,
%(Y1)s, %(Y2)s, %(Y3)s, %(Y4)s,
%(V_h1)s, %(V_h2)s, %(V_h3)s, %(V_h4)s,
%(H1)s, %(H2)s, %(H3)s, %(H4)s,
ys_h, xs_h, ptr_storage, height, width);
affine_y_x_batched_multidir(-1, 0,
%(Y1)s, %(Y2)s, %(Y3)s, %(Y4)s,
%(V_v1)s, %(V_v2)s, %(V_v3)s, %(V_v4)s,
%(H1)s, %(H2)s, %(H3)s, %(H4)s,
ys_v, xs_v, ptr_storage, height, width);
do_lstm_batched_multidir(%(H1)s, %(H2)s, %(H3)s, %(H4)s,
%(Y1)s, %(Y2)s, %(Y3)s, %(Y4)s,
ys, xs, ptr_storage, valid_storage, %(sizes)s);
}
Py_XDECREF(ptr_storage);
Py_XDECREF(valid_storage);
""" % locals()
def grad(self, inputs, output_grads):
raw_inputs = [raw_variable(v) for v in inputs]
fwd_results = self(*raw_inputs)
args = inputs + output_grads[:4] + fwd_results
grads = MultiDirectionalTwoDLSTMOpGradNoInplaceInstance(*args)
Dsizes = theano.gradient.grad_undefined(self, len(inputs) - 1, inputs[-1], 'cannot diff w.r.t. sizes')
return grads + [Dsizes]
# noinspection PyMethodMayBeStatic
def infer_shape(self, node, input_shapes):
Xs, W1s = input_shapes[:2]
Y_shape = (Xs[0], Xs[1], Xs[2], W1s[1] / 5)
H_shape = (Xs[0], Xs[1], Xs[2], W1s[1])
return [Y_shape, Y_shape, Y_shape, Y_shape, H_shape, H_shape, H_shape, H_shape]
#!!! change this when changing the code!
def c_code_cache_version(self):
return 2, 10
MultiDirectionalTwoDLSTMOpInstance = MultiDirectionalTwoDLSTMOp()
``` |
{
"source": "jpuk/py-caeser-cipher",
"score": 3
} |
#### File: jpuk/py-caeser-cipher/caeser.py
```python
low_ascii = 65
high_ascii = 90
magic_number = 7
alpha = [chr(i) for i in range(low_ascii, high_ascii+1)]
def encode(inString):
inString = inString.upper()
outString = []
for char in inString:
try:
index = alpha.index(char)
except ValueError:
outString.append(char)
continue
if (index + low_ascii) > (high_ascii - magic_number):
valid_char = alpha[index - (len(alpha) - magic_number)]
outString.append(valid_char)
else:
outString.append(chr(index + low_ascii + magic_number))
return outString
def decode(inString):
outString = []
for char in inString:
try:
index = alpha.index(char)
except ValueError:
outString.append(char)
continue
if index < magic_number:
decoded_char = (alpha.index(char) + len(alpha) - magic_number) + low_ascii
else:
decoded_char = alpha.index(char) + low_ascii - magic_number
outString.append(chr(decoded_char))
return outString
``` |
{
"source": "jpuk/pyDiskX",
"score": 2
} |
#### File: jpuk/pyDiskX/pyDiskX_gui_qt.py
```python
from PyQt5 import QtCore, QtGui, QtWidgets
import pyDiskX
import os
class Ui_MainWindow(object):
selected_folder_name = ""
folder_object = None
previous_folder_object_list = []
folder_list_being_cleared = False
currently_folders_deep = 0
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 703)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.rootDirectoyPushButton = QtWidgets.QPushButton(self.centralwidget)
self.rootDirectoyPushButton.setGeometry(QtCore.QRect(20, 10, 141, 32))
self.rootDirectoyPushButton.setObjectName("rootDirectoyPushButton")
self.folderListView = QtWidgets.QListWidget(self.centralwidget)
self.folderListView.setGeometry(QtCore.QRect(20, 60, 591, 241))
self.folderListView.setObjectName("folderListView")
self.fileListView = QtWidgets.QListWidget(self.centralwidget)
self.fileListView.setGeometry(QtCore.QRect(20, 330, 591, 311))
self.fileListView.setObjectName("fileListView")
self.getTreeSizePushButton = QtWidgets.QPushButton(self.centralwidget)
self.getTreeSizePushButton.setGeometry(QtCore.QRect(220, 10, 113, 32))
self.getTreeSizePushButton.setObjectName("getTreeSizePushButton")
self.foldersLabel = QtWidgets.QLabel(self.centralwidget)
self.foldersLabel.setGeometry(QtCore.QRect(20, 40, 591, 16))
self.foldersLabel.setObjectName("foldersLabel")
self.filesLabel = QtWidgets.QLabel(self.centralwidget)
self.filesLabel.setGeometry(QtCore.QRect(20, 310, 60, 16))
self.filesLabel.setObjectName("filesLabel")
self.backPushButton = QtWidgets.QPushButton(self.centralwidget)
self.backPushButton.setGeometry(QtCore.QRect(160, 10, 61, 32))
self.backPushButton.setObjectName("backPushButton")
self.treeSizeLabel = QtWidgets.QLabel(self.centralwidget)
self.treeSizeLabel.setGeometry(QtCore.QRect(330, 20, 281, 16))
self.treeSizeLabel.setText("")
self.treeSizeLabel.setObjectName("treeSizeLabel")
self.extensionsTreeListWidget = QtWidgets.QListWidget(self.centralwidget)
self.extensionsTreeListWidget.setGeometry(QtCore.QRect(620, 60, 151, 241))
self.extensionsTreeListWidget.setObjectName("extensionsTreeListWidget")
self.extensionsLabel = QtWidgets.QLabel(self.centralwidget)
self.extensionsLabel.setGeometry(QtCore.QRect(620, 40, 151, 20))
self.extensionsLabel.setObjectName("extensionsLabel")
self.extensionsLabel_2 = QtWidgets.QLabel(self.centralwidget)
self.extensionsLabel_2.setGeometry(QtCore.QRect(620, 310, 151, 20))
self.extensionsLabel_2.setObjectName("extensionsLabel_2")
self.extensionsFolderListWidget = QtWidgets.QListWidget(self.centralwidget)
self.extensionsFolderListWidget.setGeometry(QtCore.QRect(620, 330, 151, 311))
self.extensionsFolderListWidget.setObjectName("extensionsFolderListWidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(620, 10, 111, 16))
self.label.setObjectName("label")
self.maxFolderDepthLineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.maxFolderDepthLineEdit.setGeometry(QtCore.QRect(730, 10, 31, 21))
self.maxFolderDepthLineEdit.setObjectName("maxFolderDepthLineEdit")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 22))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# my message handler connectors
self.rootDirectoyPushButton.clicked.connect(self.select_folder_push_button_clicked)
self.folderListView.itemSelectionChanged.connect(self.folder_list_view_selection_changed)
self.getTreeSizePushButton.clicked.connect(self.get_tree_size_push_button_clicked)
self.backPushButton.clicked.connect(self.back_push_button_clicked)
# set ui defaults
self.backPushButton.setDisabled(True)
self.getTreeSizePushButton.setDisabled(True)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "pyDiskX"))
self.rootDirectoyPushButton.setText(_translate("MainWindow", "Select Directory"))
self.getTreeSizePushButton.setText(_translate("MainWindow", "Get Tree Size"))
self.foldersLabel.setText(_translate("MainWindow", "Folders:"))
self.filesLabel.setText(_translate("MainWindow", "Files:"))
self.backPushButton.setText(_translate("MainWindow", "Back"))
self.extensionsLabel.setText(_translate("MainWindow", "File Types: folder tree"))
self.extensionsLabel_2.setText(_translate("MainWindow", "File Types: this folder"))
self.label.setText(_translate("MainWindow", "Max folder depth:"))
## my handler functions
def get_tree_size_push_button_clicked(self):
if self.currently_folders_deep != 0:
self.folder_object = pyDiskX.Folder(full_path_name=self.folder_object.full_path_name)
size = self.folder_object.tree_size.size
self.treeSizeLabel.setText(str(round(size, 4)) + "MB")
def back_push_button_clicked(self):
print("Back push button clicked")
self.clear_folder_list_view()
self.folder_object = self.previous_folder_object_list[self.currently_folders_deep-1]
self.currently_folders_deep -= 1
print("Decreased folders deep to {}".format(self.currently_folders_deep))
if self.currently_folders_deep == 0:
self.previous_folder_object_list = []
self.backPushButton.setDisabled(True)
self.foldersLabel.setText(str("Folders: {}".format(self.folder_object.full_path_name)))
self.folder_list_view_selection_changed(sel=0, first=True)
def update_folder_and_files_list_boxes(self, folder_object):
for ext in folder_object.contained_file_extensions:
text = "{} - {}".format(ext, folder_object.contained_file_extensions[ext])
self.extensionsFolderListWidget.addItem("{}".format(text))
for ext in folder_object.contained_file_extensions_tree:
text = "{} - {}".format(ext, folder_object.contained_file_extensions_tree[ext])
self.extensionsTreeListWidget.addItem("{}".format(text))
# print("Folders")
if folder_object.contained_folders:
for folder in folder_object.contained_folders:
self.folderListView.addItem(folder.folder_name + " - {}MB of files at this level".format(round(folder.size, 4)))
# print(" " + str(folder.folder_name))
# print("Files")
if folder_object.contained_files:
for file in folder_object.contained_files:
self.fileListView.addItem(file.file_name + " - {}MB".format(round(file.size, 4)))
def folder_list_view_selection_changed(self, sel=None, first=False):
if self.folder_list_being_cleared is not True:
if sel is None:
sel = int(self.folderListView.currentIndex().row())
print("Folder list box selection changed {}".format(sel))
if first is not True:
self.clear_folder_list_view()
self.previous_folder_object_list.append(self.folder_object)
self.currently_folders_deep += 1
self.backPushButton.setEnabled(True)
print("Increased folders deep to {}".format(self.currently_folders_deep))
print("previous folders object list contains: {}".format(len(self.previous_folder_object_list)))
self.update_folders_label()
if self.folder_object.contained_folders:
self.folder_object = self.folder_object.contained_folders[sel]
self.update_folders_label()
self.update_folder_and_files_list_boxes(self.folder_object)
def clear_folder_list_view(self):
self.folder_list_being_cleared = True
self.folderListView.clear()
self.fileListView.clear()
self.extensionsFolderListWidget.clear()
self.folder_list_being_cleared = False
def select_folder_push_button_clicked(self):
self.folder_object = None
self.previous_folder_object = None
self.folderListView.clear()
self.fileListView.clear()
self.extensionsFolderListWidget.clear()
self.extensionsTreeListWidget.clear()
self.getTreeSizePushButton.setEnabled(True)
self.selected_folder_name = os.path.abspath(QtWidgets.QFileDialog.getExistingDirectory())
print("Select folder push button clicked - {}".format(self.selected_folder_name))
self.update_folders_label()
if self.maxFolderDepthLineEdit.text():
max_depth = int(self.maxFolderDepthLineEdit.text())
else:
max_depth=None
self.folder_object = pyDiskX.Folder(full_path_name=self.selected_folder_name, max_depth=max_depth)
self.update_folders_label()
self.folder_list_view_selection_changed(sel=0, first=True)
def update_folders_label(self):
print("Updating folders label")
if self.folder_object:
self.foldersLabel.setText(str("Folders: {}".format(self.folder_object.full_path_name)))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
```
#### File: jpuk/pyDiskX/pyDiskX.py
```python
import os
import pathlib
_OS = int()
def __init__():
get_os()
def get_os():
global _OS
os_choice = {"nt": 0, "posix": 1}
for o in os_choice:
print(o)
if o == os.name:
_OS = os_choice[o]
print("OS is {} setting _OS to {}".format(str(os.name), _OS))
return _OS
print("Unknown OS, assuming unix-like. OS = {}".format(os.name))
_OS = 1
return -1
class TreeSize():
size = 0.0
root_path = ""
def __init__(self, root_path=None):
if root_path:
self.root_path = root_path
def add_folder_size(self, size):
self.size += size
return self.size
class FilesFolders:
id = 0
full_path_name = pathlib.Path()
size = 0
def set_full_path_name(self, full_path_name):
self.full_path_name = pathlib.Path(full_path_name)
def set_id(self, id):
self.id = id
class File(FilesFolders):
file_name = ""
is_file = False
extension = ""
def __init__(self, full_path_name=None):
#print("File object created for {}".format(full_path_name))
if full_path_name != None:
self.set_full_path_name(full_path_name)
if os.path.isfile(full_path_name):
self.is_file = True
self.extract_filename_from_path()
self.get_size()
self.get_extension()
else:
print("Error - {} is not a valid file".format(full_path_name))
def get_size(self):
self.size = (os.path.getsize(self.full_path_name) / 1024) / 1024
return self.size
#print("Got size of file {} - {}MB".format(self.full_path_name, self.size))
def get_extension(self):
if _OS == 0:
self.file_name = str(self.file_name).upper()
else:
self.file_name = str(self.file_name)
#print("Determining file type by extension for {}".format(self.file_name))
if self.file_name.rfind(".") == -1:
#print("Unknown extension")
self.extension = "UNKNOWN_EXT"
else:
self.extension = self.file_name[self.file_name.rfind(".") + 1:]
#print("Extension is type {}".format(self.extension))
return self.extension
def extract_filename_from_path(self):
sep_pos = str(self.full_path_name).rfind(os.sep)
self.file_name = str(self.full_path_name)[sep_pos+1:]
#print("Filename {} extracted from path {}".format(self.file_name, self.full_path_name))
class Folder(FilesFolders):
folder_name = str()
folder_contents_list = []
contained_folders = []
contained_files = []
is_folder = bool()
contained_files_size = 0.0
contained_file_extensions_tree = {}
contained_file_extensions = {}
tree_size = None
parent_folder_object = None
max_depth = None
depth = 0
def __init__(self, parent=None, full_path_name=None, max_depth=None, depth=None):
#print("Folder object created for {}".format(full_path_name))
if full_path_name != None:
folder_name = ""
self.folder_contents_list = []
self.contained_folders = []
self.contained_files = []
self.contained_file_extensions = {}
self.id = 0
self.size = 0
self.is_folder = False
self.contained_files_size = 0
self.contained_file_extensions = {}
self.tree_size = TreeSize(root_path=full_path_name)
self.max_depth = max_depth
if depth:
self.depth = depth
else:
self.depth = 0
if parent:
self.parent_folder_object=parent
else:
self.parent_folder_object = self
self.set_full_path_name(full_path_name)
if os.path.isdir(full_path_name):
self.is_folder = True
self.folder_name = self.full_path_name.name
self.enumerate_content()
self.get_size()
else:
print("Error - {} is not a directory".format(full_path_name))
def enumerate_content(self):
file_count = 0
folder_count = 0
#print("max depth is {}".format(self.max_depth))
if self.max_depth:
if self.depth >= self.max_depth:
print("Maximum folder depth reached! {}".format(self.max_depth))
return -1
for item in os.listdir(self.full_path_name):
self.folder_contents_list.append(item)
full_path = os.path.join(self.full_path_name, item)
#print("Full path is {}".format(full_path))
try:
if os.path.isdir(os.path.abspath(full_path)):
#print("depth is {}".format(self.depth))
folder = Folder(parent=self.parent_folder_object, full_path_name=full_path, depth=self.depth+1,
max_depth=self.parent_folder_object.max_depth)
folder.set_id(folder_count)
folder_count = folder_count + 1
self.contained_folders.append(folder)
except:
print("Error listing folder!")
if os.path.isfile(os.path.abspath(full_path)):
file = File(full_path)
file.set_id(file_count)
self.contained_files_size += (file.size / 1024) / 1024
file_count = file_count + 1
self.contained_files.append(file)
if file.extension not in self.contained_file_extensions:
self.contained_file_extensions[file.extension] = 1
self.contained_file_extensions_tree[file.extension] = 1
#print("added new extension to dict {}".format(file.extension))
else:
self.contained_file_extensions[file.extension] += 1
self.contained_file_extensions_tree[file.extension] += 1
#print("found another file with an extension we've already recorded. {} now {}".format(
# file.extension, self.contained_file_extensions[file.extension]))
#print("Contents enumerated for {} - {} Files - {} Folders".format(self.full_path_name, file_count, folder_count))
def get_size(self):
size = 0
if self.contained_files:
for f in self.contained_files:
size += f.size
self.size = size
self.parent_folder_object.tree_size.add_folder_size(size)
print("Folder {} is {}MB".format(self.full_path_name, size))
return size
# run init function when run as module
__init__()
``` |
{
"source": "jpulakka/nordpool_diff",
"score": 2
} |
#### File: custom_components/nordpool_diff/sensor.py
```python
from __future__ import annotations
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import STATE_UNKNOWN
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util import dt
NORDPOOL_ENTITY = "nordpool_entity"
FILTER_LENGTH = "filter_length"
FILTER_TYPE = "filter_type"
RECTANGLE = "rectangle"
TRIANGLE = "triangle"
UNIT = "unit"
# https://developers.home-assistant.io/docs/development_validation/
# https://github.com/home-assistant/core/blob/dev/homeassistant/helpers/config_validation.py
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(NORDPOOL_ENTITY): cv.entity_id,
vol.Optional(FILTER_LENGTH, default=10): vol.All(vol.Coerce(int), vol.Range(min=2, max=20)),
vol.Optional(FILTER_TYPE, default=TRIANGLE): vol.In([RECTANGLE, TRIANGLE]),
vol.Optional(UNIT, default="EUR/kWh/h"): cv.string
})
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None
) -> None:
nordpool_entity_id = config[NORDPOOL_ENTITY]
filter_length = config[FILTER_LENGTH]
filter_type = config[FILTER_TYPE]
unit = config[UNIT]
add_entities([NordpoolDiffSensor(nordpool_entity_id, filter_length, filter_type, unit)])
class NordpoolDiffSensor(SensorEntity):
_attr_icon = "mdi:flash"
def __init__(self, nordpool_entity_id, filter_length, filter_type, unit):
self._nordpool_entity_id = nordpool_entity_id
self._filter = [-1]
if filter_type == TRIANGLE:
triangular_number = (filter_length * (filter_length - 1)) / 2
for i in range(filter_length - 1, 0, -1):
self._filter += [i / triangular_number]
else: # RECTANGLE
self._filter += [1 / (filter_length - 1)] * (filter_length - 1)
self._attr_unit_of_measurement = unit
self._attr_name = f"nordpool_diff_{filter_type}_{filter_length}"
# https://developers.home-assistant.io/docs/entity_registry_index/ : Entities should not include the domain in
# their Unique ID as the system already accounts for these identifiers:
self._attr_unique_id = f"{filter_type}_{filter_length}_{unit}"
self._state = self._next_hour = STATE_UNKNOWN
@property
def state(self):
return self._state
@property
def extra_state_attributes(self):
# TODO could also add self._nordpool_entity_id etc. useful properties here.
return {"next_hour": self._next_hour}
def update(self):
prices = self._get_next_n_hours(len(self._filter) + 1) # +1 to calculate next hour
self._state = round(sum([a * b for a, b in zip(prices, self._filter)]), 3) # zip cuts off right
self._next_hour = round(sum([a * b for a, b in zip(prices[1:], self._filter)]), 3)
def _get_next_n_hours(self, n):
np = self.hass.states.get(self._nordpool_entity_id)
prices = np.attributes["today"]
hour = dt.now().hour
# Get tomorrow if needed:
if len(prices) < hour + n and np.attributes["tomorrow_valid"]:
prices = prices + np.attributes["tomorrow"]
# Nordpool sometimes returns null prices, https://github.com/custom-components/nordpool/issues/125
# The nulls are typically at (tail of) "tomorrow", so simply removing them is reasonable:
prices = [x for x in prices if x is not None]
# Pad if needed, using last element:
prices = prices + (hour + n - len(prices)) * [prices[-1]]
return prices[hour: hour + n]
``` |
{
"source": "jpulec/blingalytics",
"score": 4
} |
#### File: blingalytics/utils/timezones.py
```python
from datetime import timedelta, tzinfo
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
class UTCTimeZone(tzinfo):
"""Implementation of the UTC timezone."""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return 'UTC'
def dst(self, dt):
return ZERO
utc_tzinfo = UTCTimeZone()
def unlocalize(aware_dt):
"""Converts a timezone-aware datetime into a naive datetime in UTC."""
return aware_dt.astimezone(utc_tzinfo).replace(tzinfo=None)
```
#### File: jpulec/blingalytics/fabfile.py
```python
import os
import re
from fabric.api import hide, lcd, local, prompt, settings
PROJECT_PATH = os.path.dirname(os.path.realpath(__file__))
def test():
"""Runs the blingalytics test suite."""
os.environ['PYTHONPATH'] = PROJECT_PATH
with settings(hide('warnings'), warn_only=True):
local('python test/test_runner.py')
def update_pypi():
"""Updates versions and packages for PyPI."""
# Verify that we want to do this...
sure = prompt('Are you sure you want to release a new version to PyPI? '
'Have you pushed all changes to origin/master? [y/n]',
validate=r'^[yYnN]')
if sure.lower()[0] != 'y':
return
# First update version numbers
with lcd(PROJECT_PATH):
old_version = local('grep version= setup.py', capture=True)
old_version = re.search(r'\'([0-9a-zA-Z.]+)\'', old_version).group(1)
new_version = prompt(
'What version number (previous: {0})?'.format(old_version),
validate=r'^\d+\.\d+\.\d+\w*$')
local('sed -i -r -e "s/{before}/{after}/g" {filename}'.format(
filename=os.path.join(PROJECT_PATH, 'setup.py'),
before=r"version='[0-9a-zA-Z.]+'",
after="version='{0}'".format(new_version)))
local('sed -i -r -e "s/{before}/{after}/g" {filename}'.format(
filename=os.path.join(PROJECT_PATH, 'docs', 'conf.py'),
before=r"version = '[0-9]+\.[0-9]+'",
after="version = '{0}'".format('.'.join(new_version.split('.')[:2]))))
local('sed -i -r -e "s/{before}/{after}/g" {filename}'.format(
filename=os.path.join(PROJECT_PATH, 'docs', 'conf.py'),
before=r"release = '[0-9]+\.[0-9]+\.[0-9a-zA-Z]+'",
after="release = '{0}'".format(new_version)))
# Then tag and push to git
local('git commit -a -m "Revs version to v{0}"'.format(new_version))
local('git tag -f -a v{0} -m "v{0}"'.format(new_version))
local('git push origin master --tags')
# Register new version on PyPI
# Note: copy to /tmp because vagrant shared directories don't handle
# links well, which are part of the sdist process
local('cp -f -r {0} /tmp/'.format(PROJECT_PATH))
with lcd('/tmp/vagrant'):
local('python setup.py register')
local('python setup.py sdist upload')
```
#### File: test/caches/test_redis_cache.py
```python
from datetime import datetime
from decimal import Decimal
import unittest
from blingalytics.caches import InstanceIncompleteError
from blingalytics.caches.redis_cache import RedisCache
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
CREATE_INSTANCE_ARGS = [
'report_name',
'123abc',
[
{'id': 1, 'name': 'Jeff', 'price': Decimal('1.50'), 'count': 40},
{'id': 2, 'name': 'Tracy', 'price': Decimal('3.00'), 'count': 10},
{'id': 3, 'name': 'Connie', 'price': Decimal('0.00'), 'count': 100},
{'id': 4, 'name': 'Megan', 'price': None, 'count': -20},
],
lambda: {'id': None, 'name': '', 'price': Decimal('4.50'), 'count': 32.5},
86400,
]
class TestRedisCache(unittest.TestCase):
def setUp(self):
self.cache = RedisCache(host=REDIS_HOST, port=REDIS_PORT)
self.cache.__enter__()
self.cache.conn.flushall()
def tearDown(self):
self.cache.__exit__(None, None, None)
def test_create_instance(self):
self.cache.create_instance(*CREATE_INSTANCE_ARGS)
self.assertEqual(
set(self.cache.conn.keys()),
set(['report_name:123abc:3', 'report_name:123abc:ids:', 'report_name:123abc:index:0:', 'report_name:123abc:_done:', 'report_name:123abc:1', 'report_name:123abc:', 'report_name:123abc:0', 'report_name:123abc:2', 'report_name:123abc:index:1:', 'report_name:123abc:index:2:', 'report_name:123abc:index:3:', 'report_name:123abc:footer:'])
)
def test_kill_cache(self):
# Instance cache
self.cache.create_instance(*CREATE_INSTANCE_ARGS)
self.assertTrue(self.cache.conn.exists('report_name:123abc:'))
self.cache.kill_instance_cache('report_name', '123abc')
self.assertFalse(self.cache.conn.exists('report_name:123abc:'))
# Report-wide cache
self.cache.create_instance(*CREATE_INSTANCE_ARGS)
self.assertTrue(self.cache.conn.exists('report_name:123abc:'))
self.cache.kill_report_cache('report_name')
self.assertFalse(self.cache.conn.exists('report_name:123abc:'))
def test_instance_stats(self):
# Before creating the instance in cache
self.assertFalse(self.cache.is_instance_started('report_name', '123abc'))
self.assertFalse(self.cache.is_instance_finished('report_name', '123abc'))
self.assertRaises(InstanceIncompleteError, self.cache.instance_row_count, 'report_name', '123abc')
self.assertRaises(InstanceIncompleteError, self.cache.instance_timestamp, 'report_name', '123abc')
# After creating the instance in cache
self.cache.create_instance(*CREATE_INSTANCE_ARGS)
self.assertTrue(self.cache.is_instance_started('report_name', '123abc'))
self.assertTrue(self.cache.is_instance_finished('report_name', '123abc'))
self.assertEqual(self.cache.instance_row_count('report_name', '123abc'), 4)
self.assertTrue(isinstance(self.cache.instance_timestamp('report_name', '123abc'), datetime))
def test_instance_rows(self):
self.cache.create_instance(*CREATE_INSTANCE_ARGS)
rows = self.cache.instance_rows('report_name', '123abc',
sort=('id', 'asc'), limit=2, offset=1)
self.assertEqual(list(rows), [
{'_bling_id': '1', 'id': 2, 'name': 'Tracy', 'price': Decimal('3.00'), 'count': 10},
{'_bling_id': '2', 'id': 3, 'name': 'Connie', 'price': Decimal('0.00'), 'count': 100},
])
rows = self.cache.instance_rows('report_name', '123abc',
sort=('price', 'desc'), limit=None, offset=0)
self.assertEqual(list(rows), [
{'_bling_id': '1', 'id': 2, 'name': 'Tracy', 'price': Decimal('3.00'), 'count': 10},
{'_bling_id': '0', 'id': 1, 'name': 'Jeff', 'price': Decimal('1.50'), 'count': 40},
{'_bling_id': '2', 'id': 3, 'name': 'Connie', 'price': Decimal('0.00'), 'count': 100},
{'_bling_id': '3', 'id': 4, 'name': 'Megan', 'price': None, 'count': -20},
])
def test_instance_footer(self):
self.assertRaises(InstanceIncompleteError, self.cache.instance_footer, 'report_name', '123abc')
self.cache.create_instance(*CREATE_INSTANCE_ARGS)
self.assertEqual(self.cache.instance_footer('report_name', '123abc'),
CREATE_INSTANCE_ARGS[3]())
```
#### File: blingalytics/test/test_helpers.py
```python
import json
import unittest
from blingalytics import helpers
from blingalytics.caches.local_cache import LocalCache
from mock import Mock
from test import reports_basic, reports_django
from test.support_base import mock_cache
CACHE = LocalCache()
class TestFrontendHelpers(unittest.TestCase):
def setUp(self):
report = reports_django.BasicDatabaseReport(CACHE)
report.kill_cache(full=True)
report = reports_basic.SuperBasicReport(CACHE)
report.kill_cache(full=True)
self.mock_runner = Mock()
self.mock_cache = mock_cache()
self.mock_cache.instance_rows.return_value = []
self.mock_cache.instance_footer.return_value = {'id': None}
self.mock_cache.instance_row_count.return_value = 0
def test_report_response_basic(self):
# Test report codename errors
self.assertEqual(helpers.report_response({}, cache=CACHE)[0],
'{"errors": ["Report code name not specified."]}')
self.assertEqual(helpers.report_response({'report': 'nonexistent'}, cache=CACHE)[0],
'{"errors": ["Specified report not found."]}')
# Test metadata request
body, mimetype, headers = helpers.report_response({
'report': 'super_basic_report',
'metadata': '1',
}, cache=CACHE)
metadata = json.loads(body)
self.assertEqual(set(metadata.keys()),
set(['errors', 'widgets', 'header', 'default_sort']))
self.assertEqual(metadata['errors'], [])
# Test user input errors
body, mimetype, headers = helpers.report_response({
'report': 'basic_database_report',
}, cache=CACHE)
response = json.loads(body)
self.assertEqual(len(response['errors']), 1)
# Test correct request
body, mimetype, headers = helpers.report_response({
'report': 'super_basic_report',
'iDisplayStart': '0',
'iDisplayLength': '10',
'sEcho': '1',
}, cache=CACHE)
response = json.loads(body)
self.assertEqual(len(response['errors']), 0)
self.assertEqual(response['iTotalRecords'], 3)
self.assertEqual(response['iTotalDisplayRecords'], 3)
self.assertEqual(response['sEcho'], '1')
self.assertEqual(response['poll'], False)
self.assertEqual(len(response['aaData']), 3)
self.assertEqual(len(response['aaData'][0]), 2)
self.assertEqual(len(response['footer']), 2)
def test_report_response_runner(self):
# Runner gets run
self.mock_cache.is_instance_started.return_value = False
self.mock_cache.is_instance_finished.return_value = False
body, mimetype, headers = helpers.report_response({
'report': 'super_basic_report',
'iDisplayStart': '0',
'iDisplayLength': '10',
'sEcho': '1',
}, runner=self.mock_runner, cache=self.mock_cache)
response = json.loads(body)
self.mock_runner.assert_called_once_with('super_basic_report',
{'sEcho': '1', 'iDisplayStart': '0', 'iDisplayLength': '10'})
self.assertEqual(response['errors'], [])
self.assertEqual(response['poll'], True)
# Runner already running does not get run
self.mock_cache.reset_mock()
self.mock_cache.is_instance_started.return_value = True
self.mock_cache.is_instance_finished.return_value = False
body, mimetype, headers = helpers.report_response({
'report': 'super_basic_report',
'iDisplayStart': '0',
'iDisplayLength': '10',
'sEcho': '1',
}, runner=self.mock_runner, cache=self.mock_cache)
response = json.loads(body)
self.assertFalse(self.mock_cache.called)
self.assertEqual(response['errors'], [])
self.assertEqual(response['poll'], True)
def test_report_response_runner_local_cache(self):
# Cannot use local cache with async runner
self.assertRaises(NotImplementedError, helpers.report_response, {
'report': 'super_basic_report',
'iDisplayStart': '0',
'iDisplayLength': '10',
'sEcho': '1',
}, runner=self.mock_runner, cache=CACHE)
def test_report_response_cache(self):
# Validate that custom cache is used correctly
self.mock_cache.is_instance_started.return_value = False
self.mock_cache.is_instance_finished.return_value = False
body, mimetype, headers = helpers.report_response({
'report': 'super_basic_report',
'iDisplayStart': '0',
'iDisplayLength': '10',
'sEcho': '1',
}, cache=self.mock_cache)
response = json.loads(body)
self.assertTrue(self.mock_cache.instance_rows.called)
self.assertTrue(self.mock_cache.instance_footer.called)
self.assertEqual(response['errors'], [])
self.assertEqual(response['poll'], False)
self.assertEqual(response['aaData'], [])
``` |
{
"source": "jpulec/django-protractor",
"score": 2
} |
#### File: management/commands/protractor.py
```python
import os
import sys
from multiprocessing import Process
from optparse import make_option
import subprocess
from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.db import connection
from django.test.runner import setup_databases
class Command(BaseCommand):
args = '[--protractor-conf] [--runserver-command] [--specs] [--suite] [--addrport]'
help = 'Run protractor tests with a test database'
option_list = BaseCommand.option_list + (
make_option('--protractor-conf',
action='store',
dest='protractor_conf',
default='protractor.conf.js',
help='Specify a destination for your protractor configuration'
),
make_option('--runserver-command',
action='store',
dest='run_server_command',
default='runserver',
help='Specify which command you want to run a server'
),
make_option('--specs',
action='store',
dest='specs',
help='Specify which specs to run'
),
make_option('--suite',
action='store',
dest='suite',
help='Specify which suite to run'
),
make_option('--fixture',
action='append',
dest='fixtures',
help='Specify fixture to load initial data to the database'
),
make_option('--addrport', action='store', dest='addrport',
type='string',
help='port number or ipaddr:port to run the server on'),
)
def handle(self, *args, **options):
options['verbosity'] = int(options.get('verbosity'))
if not os.path.exists(options['protractor_conf']):
raise IOError("Could not find '{}'"
.format(options['protractor_conf']))
self.run_webdriver()
old_config = self.setup_databases(options)
fixtures = options['fixtures']
if fixtures:
call_command('loaddata', *fixtures,
**{'verbosity': options['verbosity']})
if options['addrport'] is None:
options['addrport'] = os.environ.get(
'DJANGO_LIVE_TEST_SERVER_ADDRESS', '8081')
test_server_process = Process(target=self.runserver, args=(options,))
test_server_process.daemon = True
test_server_process.start()
authority = options['addrport']
if ':' not in authority:
authority = 'localhost:' + authority
live_server_url = 'http://%s' % authority
params = {
'live_server_url': live_server_url
}
protractor_command = 'protractor {}'.format(options['protractor_conf'])
protractor_command += ' --baseUrl {}'.format(live_server_url)
if options['specs']:
protractor_command += ' --specs {}'.format(options['specs'])
if options['suite']:
protractor_command += ' --suite {}'.format(options['suite'])
for key, value in params.items():
protractor_command += ' --params.{key}={value}'.format(
key=key, value=value
)
return_code = subprocess.call(protractor_command.split())
# Terminate the live server process before tearing down the databases
# to prevent the error
# django.db.utils.OperationalError: database is being accessed by other users
test_server_process.terminate()
self.teardown_databases(old_config, options)
if return_code:
self.stdout.write('Failed')
sys.exit(1)
else:
self.stdout.write('Success')
def setup_databases(self, options):
return setup_databases(options['verbosity'], False)
def teardown_databases(self, old_config, options):
"""
Destroys all the non-mirror databases.
"""
if len(old_config) > 1:
old_names, mirrors = old_config
else:
old_names = old_config
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, options['verbosity'])
def runserver(self, options):
use_threading = connection.features.test_db_allows_multiple_connections
self.stdout.write('Starting server...')
call_command(
options['run_server_command'],
addrport=options.get('addrport'),
shutdown_message='',
use_reloader=False,
use_ipv6=False,
verbosity=0,
use_threading=use_threading,
stdout=open(os.devnull, 'w')
)
def run_webdriver(self):
self.stdout.write('Starting webdriver...')
with open(os.devnull, 'w') as f:
subprocess.call(['webdriver-manager', 'update'], stdout=f, stderr=f)
subprocess.Popen(['webdriver-manager', 'start'], stdout=f, stderr=f)
``` |
Subsets and Splits