metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jpulec/django-simple-history",
"score": 2
} |
#### File: simple_history/tests/admin.py
```python
from __future__ import unicode_literals
from django.contrib import admin
from simple_history.admin import SimpleHistoryAdmin
from .models import Poll, Choice, Person, Book, Document, Paper, Employee
class PersonAdmin(SimpleHistoryAdmin):
def has_change_permission(self, request, obj=None):
return False
admin.site.register(Poll, SimpleHistoryAdmin)
admin.site.register(Choice, SimpleHistoryAdmin)
admin.site.register(Person, PersonAdmin)
admin.site.register(Book, SimpleHistoryAdmin)
admin.site.register(Document, SimpleHistoryAdmin)
admin.site.register(Paper, SimpleHistoryAdmin)
admin.site.register(Employee, SimpleHistoryAdmin)
``` |
{
"source": "jpulgarin/django-tokenapi",
"score": 2
} |
#### File: django-tokenapi/tokenapi/backends.py
```python
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth import get_user_model
from tokenapi.tokens import token_generator
class TokenBackend(ModelBackend):
def authenticate(self, request=None, pk=None, token=None):
try:
user = get_user_model().objects.get(pk=pk)
except get_user_model().DoesNotExist:
return None
# Reject users with is_active=False. Custom user models that don't have
# that attribute are allowed.
is_active = getattr(user, 'is_active', None)
if (is_active or is_active is None) and token_generator.check_token(user, token):
return user
``` |
{
"source": "jpunkt/blocklenium",
"score": 3
} |
#### File: blocklenium/blocklenium/selenium_worker.py
```python
import configparser
import logging
import threading
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
logger = logging.getLogger(__name__)
class SeleniumWorker(threading.Thread):
def __init__(self, queue, config):
threading.Thread.__init__(self)
self.chromedriver_path = config['CHROMEDRIVER_PATH']
self.desk_url = config['DESK_URL']
self.login_required = config['DESK_LOGIN_REQUIRED']
self.queue = queue
self.chromedriver = None
# Set options for chromedriver
self.chromecaps = webdriver.DesiredCapabilities.CHROME.copy()
# Accept insecure connections
self.chromecaps['acceptInsecureCerts'] = \
config['BROWSER_INSECURE_CERTS']
# Load javascript file
# If the filename ends with '.url', read with config-parser
bookmarklet_path = config['BOOKMARKLET_PATH']
if bookmarklet_path.endswith('.url'):
# parse with config parser
parser = configparser.ConfigParser()
parser.read(bookmarklet_path)
if 'InternetShortcut' in parser:
self.js = parser['InternetShortcut']['URL']
else:
raise ValueError('Bookmarklet file must be a web link!')
else:
with open(bookmarklet_path, "r") as f:
self.js = f.read()
def run(self):
"""Runs in an endless loop until False was put on the queue.
If True is on the queue, opens a browser and runs bookmarklet.
If None is on the queue, closes the browser."""
logger.debug('Thread running.')
while True:
q = self.queue.get()
if q:
logger.info('Starting browser...')
# Instantiate driver (opens browser)
if self.chromedriver is None:
logger.debug('No browser running. Starting browser...')
self.chromedriver = webdriver.Chrome(
self.chromedriver_path,
desired_capabilities=self.chromecaps)
# Open a website
logger.debug('Calling url')
self.chromedriver.get(self.desk_url)
# Log in if needed
if self.login_required:
self.desk_login()
# Execute JavaScript
if self.js is not None:
logger.info('Executing JavaScript...')
# Execute javascript
self.chromedriver.execute_script(self.js)
else:
logger.info('Closing browser...')
# Close browser
if self.chromedriver is not None:
self.chromedriver.quit()
self.chromedriver = None
if q is False:
logger.info('Exiting worker loop...')
break
def desk_login(self):
logger.info('attempting login to desk...')
# the user-input fields have weird ids, so we need to select
# them by searching for ids containing 'Username' or 'Password'
userfields = self.chromedriver.find_elements_by_css_selector(
"input[id*='Username']")
pwdfields = self.chromedriver.find_elements_by_css_selector(
"input[id*='Password']")
if (len(userfields) > 0) and (len(pwdfields) > 0):
userfields[0].send_keys(self.desk_username)
pwdfields[0].send_keys(<PASSWORD>.desk_password)
loginbtn = self.chromedriver.find_element_by_xpath(
"//button[@type='submit']")
loginbtn.click()
# Wait for the new page to be fully loaded
WebDriverWait(self.chromedriver, 10).until(
EC.presence_of_element_located((By.CLASS_NAME,
"timeline-header"))
)
else:
logger.info(
'Expected Login page but found no login fields. Ignored')
``` |
{
"source": "jpunwin/plex-ihop-channel",
"score": 2
} |
#### File: Contents/Code/__init__.py
```python
PREFIX = "/video/ihop"
NAME = "IHOP Plugin"
MAXRESULTS = 50
IHOP_FEED_URL = "http://feed.theplatform.com/f/IfSiAC/5ct7EYhhJs9Z/"
IHOP_FEED_QUERY = "?q=%s&range=%s-%s&=&sort=pubDate|desc&count=true"
IHOP_FEED_FILT_ARTIST = "&byCustomValue={ihopkc$worshipLeader}{%s}"
IHOP_JAVASCRIPT_URL = "http://cdn.ihopkc.org.edgesuite.net/wp-content/themes/ihopkc_theme/_js/ihopkc-main.min.js"
RE_WLEADER = Regex('e.worshipLeaders\[0\]\.toLowerCase\(\)\)return"";var t\=(\[[a-zA-Z0-9\a\A\s",]+\]);')
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0'
TITLE = L('IHOP')
ICON = 'icon-default.png'
ART = 'art-default.jpg'
VIDEOS = 'icon-videos.png'
####################################################################################################
def Start():
#HTTP.CacheTime = CACHE_1HOUR
HTTP.Headers['User-Agent'] = USER_AGENT
Log.Debug("Starting the IHOP Plugin")
Plugin.AddViewGroup("Details", viewMode="InfoList", mediaType="items")
Plugin.AddViewGroup("List", viewMode="List", mediaType="items")
@route(PREFIX + '/thumb')
def GetThumb(url):
Log.Debug(url)
if url:
data = HTTP.Request(url, cacheTime = CACHE_1WEEK).content
return DataObject(data, 'image/jpeg')
else:
return Redirect(R(VIDEOS))
@indirect
def PlayVideo(url):
return IndirectResponse(VideoClipObject, key=url)
def createEpisodeObject(url, title, summary, thumburl, rating_key, originally_available_at=None, duration=None, include_container=False):
container = Container.MP4
video_codec = VideoCodec.H264
audio_codec = AudioCodec.AAC
audio_channels = 2
track_object = VideoClipObject(
key = Callback(
createEpisodeObject,
url=url,
title=title,
summary=summary,
thumburl=thumburl,
rating_key=rating_key,
originally_available_at=originally_available_at,
duration=duration,
include_container=True
),
rating_key = rating_key,
title = title,
summary = summary,
# thumb = thumb,
thumb=Callback(GetThumb, url=thumburl),
originally_available_at = originally_available_at,
duration = duration,
items = [
MediaObject(
parts = [
PartObject(key=Callback(PlayVideo, url=url))
],
container = container,
video_codec = video_codec,
audio_codec = audio_codec,
audio_channels = audio_channels,
)
]
)
if include_container:
return ObjectContainer(objects=[track_object])
else:
return track_object
@route(PREFIX + '/worshiper')
def WorshipLeaderMenu(artist=None):
if artist:
URL = IHOP_FEED_URL + IHOP_FEED_QUERY % (IHOP_FEED_FILT_ARTIST % artist.replace(" ", "+"), 0, MAXRESULTS)
else:
URL = IHOP_FEED_URL
container = Container.MP4
video_codec = VideoCodec.H264
audio_codec = AudioCodec.AAC
audio_channels = 2
Log.Debug(URL)
data = JSON.ObjectFromURL(URL)
Log.Debug(data)
if not artist:
oc = ObjectContainer(title2=data["title"]+" - " + L("All Videos"))
else:
oc = ObjectContainer(title2=data["title"]+" - " + artist)
oc.art = R(ART)
oc.view_group = "Details"
for ent in data.get('entries', []):
if "content" not in ent:
continue
video_url = ""
title = "%s - %s - %s" % (ent.get("title"), ', '.join(ent.get('ihopkc$setType',[''])), ', '.join(ent.get('ihopkc$worshipLeader',[''])))
duration = 0
for c in ent.get("content"):
if c.get("contentType") == "video":
video_url = c.get('downloadUrl')
duration = c.get('duration')
break
if video_url:
Log.Debug(video_url)
oc.add(createEpisodeObject(
url = video_url,
title = title,
summary = title,
originally_available_at=Datetime.FromTimestamp(ent.get("added")/1000),
duration = int(duration*1000),
rating_key = ent.get("guid"),
thumburl = ent.get("defaultThumbnailUrl"),
))
return oc
@handler(PREFIX, NAME, R(ART), R(ICON))
def MainMenu():
oc = ObjectContainer(no_cache = True)
oc.art = R(ART)
Log.Debug("Load Main Menu")
wleaders = []
try:
Log.Debug("Loading Worship Leaders")
IHOP_JAVASCRIPT_RAW = HTTP.Request(IHOP_JAVASCRIPT_URL).content
#Log.Debug(IHOP_JAVASCRIPT_RAW)
wleaders_match = RE_WLEADER.search(IHOP_JAVASCRIPT_RAW).groups()[0]
Log.Debug("Got response: %s" % wleaders_match)
wleaders = JSON.ObjectFromString(wleaders_match)
except Exception, exc:
Log.Exception(exc)
Log.Debug(str(wleaders))
oc.add(DirectoryObject(key=Callback(WorshipLeaderMenu), title = L("All Videos"), thumb = R(VIDEOS)))
for wleader in wleaders:
Log.Debug("Adding Worship Leader: %s" % wleader)
oc.add(DirectoryObject(key = Callback(WorshipLeaderMenu, artist=wleader), title = wleader, thumb = R(VIDEOS)))
return oc
``` |
{
"source": "jp-uom/variant_matrix_wizard",
"score": 3
} |
#### File: be/be_iface/BEIface.py
```python
class BEInterface(object):
""" This class offers methods to interface the Frontend/UI to the Backend """
def __init__(self):
print('Class BackendInterface created')
def get_file_headers(self, file_id):
"""
Gets the headers read from the file represented by the file_id
Arguments:
file_id: filename string
Returns:
List of headers found in the either the csv or the vcf file.
Raises:
Exception
"""
print('get_file_headers called...File id: {0}'.format(file_id))
return []
def get_sample_ids(self):
"""
Gets the file names of each file to be merged by extracting it from the absolute path.
Returns:
List of file names
"""
print('get_sample_ids called')
return []
def get_common_headers(self):
"""
Gets the headers which are common to all files.
Returns:
List of common headers
Raises:
Exception
"""
print('get_common_headers called')
return []
def get_merge_progress(self):
"""
Returns the progress of the merge process, returning a percentage of the current progress.
Returns:
Integer representing a percentage.
Raises:
Exception
"""
print('get_merge_progress called')
return 0
def merge(self):
"""
This method starts the merge process in the backend.
"""
print('merge called')
def set_output_filename(self, output_filename='output.csv'):
"""
Set output file name.
Arguments:
output_filename: the file name to be used for the final result. Default file name is 'output.csv'
"""
print('set_output_filename called...file name: {0}'.format(output_filename))
def set_merge_columns(self, join_columns_list):
"""
Set which columns are going to be used to join all files.
Arguments:
join_columns_list: a list of headers on which to join all files.
"""
print('set_merge_column_fields called...columns: {0}'.format(join_columns_list))
def set_common_output_columns(self, columns_list):
"""
Set those columns which should appear in the output file, given they are common in all files.
Arguments:
columns_list: a list of headers which are common in all files to merge.
"""
print('set_output_columns called...columns: {0}'.format(columns_list))
def set_additional_output_columns(self, file_columns_tuple_list):
"""
Set additional columns which are not common to all files.
Arguments:
file_columns_tuple_list: a list of tuples containing the file_id and corresponding additional fields
"""
print('set_additional_output_columns: {0}'.format(file_columns_tuple_list))
def set_files_to_merge(self, file_list):
"""
Set the files which are going to be merged. This applies to csv or vcf only files.
Arguments:
file_list: a list of files in absolute path format
Raises:
Exception
"""
print('set_files_to_merge called...files: {0}'.format(file_tuple_list))
def set_paired_files_to_merge(self, file_tuple_list):
"""
Set the files which are going to be merged as a pair. This applies when merging csv to vcf files.
Arguments:
file_tuple_list: a list of tuples containing the vcf paired to its equivalent csv annot file
Raises:
Exception
"""
print('set__paired_files_to_merge called...files: {0}'.format(file_tuple_list))
```
#### File: be/utils/utils.py
```python
import csv
import gzip
import allel
import psutil
import pathlib
def get_cpu_count():
""" Return physical (without hyperthreading) cpu core count """
return psutil.cpu_count(logical=False)
def get_memory():
return psutil.virtual_memory()
``` |
{
"source": "j-puri/random-uk-bank-account",
"score": 3
} |
#### File: random_uk_bank_account/client/vocalink.py
```python
from random_uk_bank_account.client import HttpSession
from random_uk_bank_account.utils.config import VOCALINK_URL
from random_uk_bank_account.utils.exceptions import IncompatibleVocalinkVersion
class VocalinkApi(HttpSession):
def __init__(self):
super().__init__()
def get_vocalink_modulus_media(self, version) -> str:
data = self.session.get(url=f"{VOCALINK_URL}{version}.txt")
data.raise_for_status()
if "Sorry, we can't find that page" in data.text:
raise IncompatibleVocalinkVersion(data.url)
return data.text
```
#### File: random_uk_bank_account/vocalink/vocalink_data.py
```python
from random_uk_bank_account.client.vocalink import VocalinkApi
from random_uk_bank_account.utils.config import LOGGER_NAME
from random_uk_bank_account.vocalink.vocalink_model import \
(VocalinkRule, VocalinkRuleCollection, VocalinkArrayMap, VocalinkSortCodeSubstitution,
VocalinkSortCodeSubstitutionCollection)
import logging
log = logging.getLogger(LOGGER_NAME)
def vocalink_raw_to_array(raw_data: str) -> []:
split_array = [entry.split() for entry in raw_data.splitlines()]
return [entry for entry in split_array if len(entry) != 0]
def get_vocalink_data(version) -> VocalinkRuleCollection:
raw_data = VocalinkApi().get_vocalink_modulus_media(version)
array_data = vocalink_raw_to_array(raw_data)
vocalink_rule_array = []
for rule in array_data:
try:
exception = rule[VocalinkArrayMap.EXCEPTION]
except:
exception = 0
vocalink_rule_array.append(
VocalinkRule(
sort_code_from=rule[VocalinkArrayMap.SORT_CODE_FROM],
sort_code_to=rule[VocalinkArrayMap.SORT_CODE_TO],
algorithm=rule[VocalinkArrayMap.ALGORITHM],
sort_code_pos_1=rule[VocalinkArrayMap.SORT_CODE_RULE_1],
sort_code_pos_2=rule[VocalinkArrayMap.SORT_CODE_RULE_2],
sort_code_pos_3=rule[VocalinkArrayMap.SORT_CODE_RULE_3],
sort_code_pos_4=rule[VocalinkArrayMap.SORT_CODE_RULE_4],
sort_code_pos_5=rule[VocalinkArrayMap.SORT_CODE_RULE_5],
sort_code_pos_6=rule[VocalinkArrayMap.SORT_CODE_RULE_6],
account_number_pos_1=rule[VocalinkArrayMap.ACCOUNT_NUMBER_RULE_1],
account_number_pos_2=rule[VocalinkArrayMap.ACCOUNT_NUMBER_RULE_2],
account_number_pos_3=rule[VocalinkArrayMap.ACCOUNT_NUMBER_RULE_3],
account_number_pos_4=rule[VocalinkArrayMap.ACCOUNT_NUMBER_RULE_4],
account_number_pos_5=rule[VocalinkArrayMap.ACCOUNT_NUMBER_RULE_5],
account_number_pos_6=rule[VocalinkArrayMap.ACCOUNT_NUMBER_RULE_6],
account_number_pos_7=rule[VocalinkArrayMap.ACCOUNT_NUMBER_RULE_7],
account_number_pos_8=rule[VocalinkArrayMap.ACCOUNT_NUMBER_RULE_8],
exception=exception
)
)
return VocalinkRuleCollection(rules=vocalink_rule_array)
def get_vocalink_substitutions(version) -> VocalinkSortCodeSubstitutionCollection:
raw_data = VocalinkApi().get_vocalink_modulus_media(version)
array_data = vocalink_raw_to_array(raw_data)
vocalink_substitition_array = []
for substitution in array_data:
vocalink_substitition_array.append(
VocalinkSortCodeSubstitution(
original_sort_code=substitution[0],
substituted_sort_code=substitution[1]
)
)
return VocalinkSortCodeSubstitutionCollection(substitutions=vocalink_substitition_array)
```
#### File: random_uk_bank_account/vocalink/vocalink_db_init.py
```python
from random_uk_bank_account.vocalink.vocalink_model import VocalinkRuleCollection, VocalinkSortCodeSubstitutionCollection
from random_uk_bank_account.client.sqlite3 import SqlLite
class CreateVocalinkDb:
def __init__(self, DB_LOCATION: str, vocalink_rules: VocalinkRuleCollection,
vocalink_sort_code_substitutions: VocalinkSortCodeSubstitutionCollection):
self.db = SqlLite(local_db=DB_LOCATION)
self.vocalink_rules = vocalink_rules
self.vocalink_sort_code_substitutions = vocalink_sort_code_substitutions
def build(self):
self._create_table()
self._insert_data()
def _insert_data(self):
self.db.conn.executemany("INSERT INTO vocalink_rules VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",
self.vocalink_rules.to_ordered_tuple_list())
self.db.conn.executemany("INSERT INTO vocalink_sort_code_substitutions VALUES (?,?)",
self.vocalink_sort_code_substitutions.to_ordered_tuple_list())
self.db.commit()
def _create_table(self):
self.db.conn.executescript("""
DROP TABLE IF EXISTS vocalink_rules;
CREATE TABLE vocalink_rules (
sort_code_from VARCHAR(6) NOT NULL,
sort_code_to VARCHAR(6) NOT NULL,
algorithm VARCHAR(10) NOT NULL,
sort_code_pos_1 INT NOT NULL,
sort_code_pos_2 INT NOT NULL,
sort_code_pos_3 INT NOT NULL,
sort_code_pos_4 INT NOT NULL,
sort_code_pos_5 INT NOT NULL,
sort_code_pos_6 INT NOT NULL,
account_number_pos_1 INT NOT NULL,
account_number_pos_2 INT NOT NULL,
account_number_pos_3 INT NOT NULL,
account_number_pos_4 INT NOT NULL,
account_number_pos_5 INT NOT NULL,
account_number_pos_6 INT NOT NULL,
account_number_pos_7 INT NOT NULL,
account_number_pos_8 INT NOT NULL,
exception VARCHAR(10)
)
""")
self.db.conn.executescript("""
DROP TABLE IF EXISTS vocalink_sort_code_substitutions;
CREATE TABLE vocalink_sort_code_substitutions (
original_sort_code VARCHAR(10) NOT NULL,
substituted_sort_code VARCHAR(10) NOT NULL
)
""")
self.db.commit()
```
#### File: random_uk_bank_account/vocalink/vocalink.py
```python
from random_uk_bank_account.vocalink.vocalink_db import VocalinkDataAccess
from random_uk_bank_account.vocalink.vocalink_model import \
(VocalinkRuleCollection, VocalinkRule, VocalinkSortCodeSubstitution, VocalinkSortCodeSubstitutionCollection)
from random_uk_bank_account.utils import validators
def get_vocalink_rules_for_sort_code(sort_code: str, db_location: str) -> VocalinkRuleCollection:
validators.check_sort_code_correct_format(sort_code)
vocalink_db = VocalinkDataAccess(DB_LOCATION=db_location)
rules = vocalink_db.get_rules_for_sort_code(sort_code)
vocalink_rule_collection = VocalinkRuleCollection()
if rules:
for rule in rules:
vocalink_rule_collection.rules.append(VocalinkRule(**rule))
return vocalink_rule_collection
def get_vocalink_sort_code_substitution_for_sort_code(sort_code: str, db_location: str) -> VocalinkSortCodeSubstitution:
vocalink_db = VocalinkDataAccess(DB_LOCATION=db_location)
substitution = vocalink_db.get_sort_code_substitution(sort_code)
if substitution:
return VocalinkSortCodeSubstitution(**substitution[0])
else:
return VocalinkSortCodeSubstitution()
def get_all_vocalink_sort_code_substutions(db_location: str) -> VocalinkSortCodeSubstitutionCollection:
vocalink_db = VocalinkDataAccess(DB_LOCATION=db_location)
subs = vocalink_db.get_all_sort_code_substitutions()
subsitution_collection = VocalinkSortCodeSubstitutionCollection()
if subs:
for sub in subs:
subsitution_collection.substitutions.append(VocalinkSortCodeSubstitution(**sub))
return subsitution_collection
if __name__=="__main__":
test = get_vocalink_rules_for_sort_code('200412')
print('break')
```
#### File: api/get/test_get_vocalink_rules.py
```python
from random_uk_bank_account import VocalinkRuleCollection, GenerateUkBankAccount, VocalinkSortCodeSubstitutionCollection, \
VocalinkSortCodeSubstitution
import pytest
from test.utils.test_fixtures.classes_under_test import generator
def test_get_rules_for_existing_sort_code(generator: GenerateUkBankAccount):
rules = generator.get_vocalink_rules("040004")
assert isinstance(rules, VocalinkRuleCollection)
assert isinstance(rules.to_dict(), dict)
assert isinstance(rules.to_json(), str)
def test_get_rules_for_non_existing_sort_code(generator: GenerateUkBankAccount):
rules = generator.get_vocalink_rules("040001")
assert len(rules.rules) == 0
@pytest.mark.parametrize('sort_code', ['0404', "04000a"])
def test_get_rules_for_invalid_sort_code(generator: GenerateUkBankAccount, sort_code):
with pytest.raises(AttributeError):
generator.get_vocalink_rules(sort_code)
def test_get_sort_code_substitutions_for_sort_code_with_no_substitutions(generator: GenerateUkBankAccount):
substitution = generator.get_vocalink_substitution('040004')
assert not substitution.substituted_sort_code
assert not substitution.original_sort_code
def test_get_sort_code_substituion_for_sort_code_with_substition(generator: GenerateUkBankAccount):
substitution = generator.get_vocalink_substitution('938618')
assert substitution.substituted_sort_code
assert substitution.original_sort_code
assert isinstance(substitution, VocalinkSortCodeSubstitution)
def test_get_all_sort_code_substitutions(generator: GenerateUkBankAccount):
substitutions = generator.get_all_vocalink_substitutions()
assert isinstance(substitutions, VocalinkSortCodeSubstitutionCollection)
assert len(substitutions.substitutions) > 0
assert isinstance(substitutions.substitutions[0], VocalinkSortCodeSubstitution)
```
#### File: api/validate/test_validator_for_known_accounts.py
```python
import pytest
from test.utils.test_fixtures.classes_under_test import generator
@pytest.mark.parametrize(
"sort_code,account_number", [
("118765", "64371389")
])
def test_known_values_for_exception_1(generator, sort_code, account_number):
assert generator.validate(sort_code=sort_code, account_number=account_number)
@pytest.mark.parametrize(
"sort_code,account_number", [
("309070", "02355688"),
("309070", "12345668"),
("309070", "12345677"),
("309070", "99345694")
])
def test_known_values_for_exception_2_and_9(generator, sort_code, account_number):
assert generator.validate(sort_code=sort_code, account_number=account_number)
@pytest.mark.parametrize(
"sort_code,account_number", [
("820000", "73688637"),
("827999", "73988638"),
("827101", "28748352"),
])
def test_known_values_for_exception_3(generator, sort_code, account_number):
assert generator.validate(sort_code=sort_code, account_number=account_number)
@pytest.mark.parametrize(
"sort_code,account_number", [
("134020", "63849203")
])
def test_known_values_for_exception_4(generator, sort_code, account_number):
assert generator.validate(sort_code=sort_code, account_number=account_number)
@pytest.mark.parametrize(
"sort_code,account_number", [
("938611", "07806039"),
("938611", "42368003"),
("938063", "55065200")
])
def test_known_values_for_exception_5(generator, sort_code, account_number):
assert generator.validate(sort_code=sort_code, account_number=account_number)
@pytest.mark.parametrize(
"sort_code,account_number", [
("200915", "41011166")
])
def test_known_values_for_exception_6(generator, sort_code, account_number):
assert generator.validate(sort_code=sort_code, account_number=account_number)
@pytest.mark.parametrize(
"sort_code,account_number", [
("772798", "99345694")
])
def test_known_values_for_exception_7(generator, sort_code, account_number):
assert generator.validate(sort_code=sort_code, account_number=account_number)
@pytest.mark.parametrize(
"sort_code,account_number", [
("086090", "06774744")
])
def test_known_values_for_exception_8(generator, sort_code, account_number):
assert generator.validate(sort_code=sort_code, account_number=account_number)
@pytest.mark.parametrize(
"sort_code,account_number", [
("871427", "46238510"),
("872427", "46238510"),
("871427", "09123496"),
("871427", "99123496"),
])
def test_known_values_for_exception_10_and_11(generator, sort_code, account_number):
assert generator.validate(sort_code=sort_code, account_number=account_number)
@pytest.mark.parametrize(
"sort_code,account_number", [
("074456", "12345112"),
("070116", "34012583"),
("074456", "11104102")
])
def test_known_values_for_exception_12_and_13(generator, sort_code, account_number):
assert generator.validate(sort_code=sort_code, account_number=account_number)
@pytest.mark.parametrize(
"sort_code,account_number", [
("180002", "00000190")
])
def test_known_values_for_exception_14(generator, sort_code, account_number):
assert generator.validate(sort_code=sort_code, account_number=account_number)
``` |
{
"source": "jpuris/udacity-data-engineering-submissions",
"score": 3
} |
#### File: plugins/helpers/sparkify_dim_subdag.py
```python
from airflow import DAG
from operators import LoadDimensionOperator
def load_dim_subdag(
parent_dag_name: str,
task_id: str,
redshift_conn_id: str,
sql_statement: str,
do_truncate: bool,
table_name: str,
**kwargs,
):
"""
Airflow's subdag wrapper. Implements LoadDimensionOperator operator.
Subdag's name will be f'{parent_dag_name}.{task_id}'
Subdag related keyword arguments:
- parent_dag_name -- Parent DAG name
- task_id -- Task ID for the subdag to use
Keyword arguments:
redshift_conn_id -- Airflow connection name for Redshift detail
sql_statement -- SQL statement to run
do_truncate -- Does the table need to be truncated before running
SQL statement
table_name -- Dimension table name
All keyword arguments will be passed to LoadDimensionOperator
"""
dag = DAG(f'{parent_dag_name}.{task_id}', **kwargs)
load_dimension_table = LoadDimensionOperator(
task_id=task_id,
dag=dag,
redshift_conn_id=redshift_conn_id,
sql_query=sql_statement,
do_truncate=do_truncate,
table_name=table_name,
)
load_dimension_table
return dag
``` |
{
"source": "jpush/jmessage-api-python-client",
"score": 3
} |
#### File: jmessage/groups/groups.py
```python
from jmessage import *
from jmessage import url
import json
class Group(object):
def __init__(self,jmessage):
self.jmessage=jmessage;
def build_group(self, owner_username=None, name=None, members_username=None, desc=None):
group = {}
if owner_username is not None:
group["owner_username"] = owner_username
if name is not None:
group["name"] = name
if members_username is not None:
group["members_username"] = members_username
if desc is not None:
group["desc"] = desc
return group
def create_group(self,group):
#print group
group_url=url.IM_URL+url.GROUPS_URL
#print group_url
body=json.dumps(group)
response = self.jmessage._request("POST", body, group_url)
return response
def get_group(self,gid):
#print gid
group_url=url.IM_URL+url.GROUPS_URL+gid
#print group_url
body=None
response = self.jmessage._request("GET", body, group_url)
return response
def put_group(self,gid,group):
#print gid
group_url=url.IM_URL+url.GROUPS_URL+gid
#print group_url
body=json.dumps(group)
response = self.jmessage._request("PUT", body, group_url)
return response
def delete_group(self, gid):
#print gid
group_url=url.IM_URL+url.GROUPS_URL+gid
#print group_url
body=None
response = self.jmessage._request("DELETE", body, group_url)
return response
def put_group_members(self, gid, add , remove=None):
#print gid
group_url=url.IM_URL+url.GROUPS_URL+gid+"/members"
#print group_url
members={}
members["add"]=add
body= json.dumps(members)
#print body
response = self.jmessage._request("POST", body, group_url)
return response
def get_group_members(self, gid):
#print gid
group_url=url.IM_URL+url.GROUPS_URL+gid+"/members"
#print group_url
body= None
response = self.jmessage._request("GET", body, group_url)
return response
def get_groups_by_username(self, username):
#print username
group_url=url.IM_URL+url.REGIST_USER_URL+username+"/groups/"
#print group_url
body= None
response = self.jmessage._request("GET", body, group_url)
return response
def get_groups_list(self, start, count):
group_url=url.IM_URL+url.GROUPS_URL+"?start="+start+"&count="+count
#print group_url
body= None
response = self.jmessage._request("GET", body, group_url)
return response
``` |
{
"source": "jputlock/NocBot",
"score": 2
} |
#### File: NocBot/bot/ghost.py
```python
import sys
import discord
from discord import ChannelType
import json
import subprocess
import asyncio
import traceback
from .triggers import msg_triggers, reaction_triggers
from .triggers import utils
from .triggers.commands import invalid_command
class GhostClient(discord.Client):
def __init__(
self,
config_filename="config/config.json",
roles_filename="config/roles.json",
messages_filename="config/messages.json",
log_filename="nocbot.log",
path=sys.path[0] + "/"
):
super().__init__()
config_filename = path + config_filename
roles_filename = path + roles_filename
messages_filename = path + messages_filename
self.log_filename = path + log_filename
open(log_filename, "w")
with open(config_filename, "r") as config_file:
self.config = json.load(config_file)
with open(roles_filename, "r") as roles_file:
self.roles = json.load(roles_file)
with open(messages_filename, "r") as messages_file:
self.messages = json.load(messages_file)
self.command_channels_only = len(self.config["command_channels"]) > 0
utils.setup_data_dragon(self)
print("[+] Initialization complete.")
if self.command_channels_only:
print("[#] COMMAND CHANNEL ONLY ENABLED: Commands can only be run in specified channels. Edit config.json to add/remove channels.")
async def on_ready(self):
if len(sys.argv) > 1:
args = ["kill", "-9"]
args.extend(sys.argv[1:])
subprocess.call(args)
self.SERVER = self.get_guild(self.config["SERVER_ID"])
print(f"[+] Connected to {self.SERVER.name} as {self.user}!")
async def on_message(self, msg):
if msg.author.bot:
return
replied = False
for trigger in msg_triggers:
try:
matches, idx = await trigger.matches_call(self, msg)
if matches:
await trigger.execute_message(self, msg, idx)
replied = True
break
except Exception as e:
await utils.log_traceback(self, self)
replied = True
break
"""
if not replied:
if not await invalid_command(self, msg):
utils.print_error("", f"A valid command was not replied to:\n{msg.content}")
"""
async def on_raw_reaction_add(self, reaction):
user = self.SERVER.get_member(reaction.user_id)
if not user: # user is not in the cache
user = await self.fetch_user(reaction.user_id)
if user.bot:
return
channel = self.get_channel(reaction.channel_id)
if not channel: # channel is not in the cache
channel = await self.fetch_channel(reaction.channel_id)
msg = await channel.fetch_message(reaction.message_id)
for trigger in reaction_triggers:
try:
result = await trigger.execute_reaction(
self, reaction, channel, msg, user
)
# if you delete the message reacted to, return False
if result is False:
break
except Exception as e:
utils.log_traceback(self, self)
```
#### File: triggers/commands/league.py
```python
from . import Command
from .. import utils
import discord
from requests import HTTPError
from math import isclose
class League(Command):
names = ["league"]
description = "Get the stats of the current league game of a summoner."
usage = "!league <summoner>"
tiers = {
"Iron": "I",
"Bronze": "B",
"Silver": "S",
"Gold": "G",
"Platinum": "P",
"Diamond": "D",
"Master": "M",
"Grandmaster": "GM",
"Challenger": "C"
}
romans = {
"I": "1",
"II": "2",
"III": "3",
"IV": "4",
"V": "5",
}
def get_embed(self, target, client, game):
time = game['gameLength']
hrs = time // 3600
mins = time // 60
secs = time % 60
desc = "Game Time: " + (f"{mins:2d}:{secs:02d}" if hrs == 0 else f"{hrs}:{mins:02d}:{secs:02d}") + "\n"
team_1, team_2 = [], []
for player in game['participants']:
summoner = self.dragon.watcher.league.by_summoner(client.config['region'], player['summonerId'])
rank = "Unranked"
win_rate = 0
for league_entry in summoner:
wins = league_entry['wins']
losses = league_entry['losses']
print(league_entry['queueType'])
if league_entry['queueType'] == "RANKED_FLEX_5x5":
rank = self.tiers[league_entry['tier'].title()] + self.romans[league_entry['rank']]
win_rate = 100 * ( wins / ( wins + losses ) )
if league_entry['queueType'] == "RANKED_SOLO_5x5":
rank = self.tiers[league_entry['tier'].title()] + self.romans[league_entry['rank']]
win_rate = 100 * ( wins / ( wins + losses ) )
break
player['rank'] = rank
player['win_rate'] = win_rate
if player['teamId'] == 100:
team_1.append(player)
else:
team_2.append(player)
desc += utils.team_names[0] + "\n" + "\n".join(
f"{player['summonerName']} ({player['rank']}): {self.dragon.champions[player['championId']]['name']} " +
f"[{self.dragon.summoners[player['spell1Id']]['name']}/{self.dragon.summoners[player['spell2Id']]['name']}] " +
f"({self.dragon.runes[player['perks']['perkStyle']][player['perks']['perkIds'][0]]})\n" +
"└─ Overall Win Rate: " + (f"{player['win_rate']:2.0f}%" if isclose(player['win_rate'] % 1, 0, rel_tol=0.1) else f"{player['win_rate']:2.1f}%")
for player in team_1
)
desc += "\n" + utils.team_names[1] + "\n" + "\n".join(
f"{player['summonerName']} ({player['rank']}): {self.dragon.champions[player['championId']]['name']} " +
f"[{self.dragon.summoners[player['spell1Id']]['name']}/{self.dragon.summoners[player['spell2Id']]['name']}] " +
f"({self.dragon.runes[player['perks']['perkStyle']][player['perks']['perkIds'][0]]})\n" +
"└─ Overall Win Rate: " + (f"{player['win_rate']:2.0f}%" if isclose(player['win_rate'] % 1, 0, rel_tol=0.1) else f"{player['win_rate']:2.1f}%")
for player in team_2
)
embed = discord.Embed(
title=f"{target}'s Current Game:",
description=desc
)
return embed
async def execute_command(self, client, msg, content):
if not content:
await msg.channel.send(
f"Usage: {self.usage}"
)
return
self.dragon = utils.global_dragon
lookup_summoner = None
lookup_summoner = self.dragon.watcher.summoner.by_name(client.config["region"], content)
async with msg.channel.typing():
if not lookup_summoner:
utils.log(self, "Can not receive summoner from League API Endpoint", client)
return
game = None
try:
game = self.dragon.watcher.spectator.by_summoner(client.config["region"], lookup_summoner['id'])
except HTTPError as e:
utils.log(self, "Player is not in a game.", client)
await msg.channel.send(
"That player is not in a game (or is in a bot game)."
)
return
if not game:
utils.log(self, "Can not receive game from League API Endpoint", client)
return
await msg.channel.send(
content="",
embed=self.get_embed(content, client, game)
)
```
#### File: triggers/commands/purge.py
```python
from . import Command
from .. import utils
class Purge(Command):
names = ["purge"]
description = "MOD+ ONLY: Purges the last x messages from a channel."
usage = "!purge <number of messages>"
requires_mod = True
requires_server = True
async def execute_command(self, client, msg, content):
if not content:
await msg.author.send(
f"Usage: {self.usage}"
)
return
arr = content.split(" ")
try:
num = int(arr[0]) + 1
await msg.channel.purge(limit=num)
except:
utils.log(self, f"Could not parse # for purge command, content = \'{content}\'", client)
```
#### File: triggers/commands/randomize.py
```python
from . import Command
from .. import utils
from random import sample
class Randomize(Command):
names = ["randomize"]
description = "Assign random teams and random roles to the 10 players in your voice channel. Sorts you into two calls."
usage = "!randomize"
requires_mod = True
requires_server = True
async def execute_command(self, client, msg, content):
if msg.author.voice is None:
await msg.author.send(
client.messages["caller_not_connected"]
)
return
connected_members = msg.author.voice.channel.members
if len(connected_members) != 10:
utils.log(self, "Need exactly 10 to randomize", client)
await msg.channel.send(
client.messages["need_ten_players"]
)
return
temp = sample(connected_members, k=len(connected_members))
roles_list = ["Top", "Jgl", "Mid", "Bot", "Sup"]
team_1, team_2 = temp[:5], temp[5:]
await msg.channel.send(
"**Team 1**:\n" +
"".join(role + ": " + member.name + "\n" for role, member in zip(roles_list, team_1)) +
"\n**Team 2**:\n" +
"".join(role + ": " + member.name + "\n" for role, member in zip(roles_list, team_2))
)
team_1_vc = client.get_channel(client.config["TEAM_1_ID"])
team_2_vc = client.get_channel(client.config["TEAM_2_ID"])
for member in team_1:
try:
await member.move_to(team_1_vc)
except:
utils.log(self, f"Could not move {member.name} to Team 1", client)
for member in team_2:
try:
await member.move_to(team_2_vc)
except:
utils.log(self, f"Could not move {member.name} to Team 2", client)
```
#### File: triggers/commands/reset.py
```python
from . import Command
from .. import utils
class Reset(Command):
names = ["reset"]
description = "Reset players from their team channels to the designated lobby channel."
usage = "!reset"
requires_mod = True
requires_server = True
async def execute_command(self, client, msg, content):
lobby_vc = client.get_channel(client.config["LOBBY_ID"])
team_1_vc = client.get_channel(client.config["TEAM_1_ID"])
team_2_vc = client.get_channel(client.config["TEAM_2_ID"])
all_members = team_1_vc.members + team_2_vc.members
utils.log(self, "Moving these users to the lobby:" + str([member.name for member in all_members]), client)
for member in all_members:
try:
await member.move_to(lobby_vc)
except:
utils.log(self, f"Could not move {member.name} to the lobby channel.", client)
```
#### File: NocBot/utils/datadragon.py
```python
from riotwatcher import LolWatcher, ApiError
import os
class DataDragon():
watcher = LolWatcher(os.getenv("LEAGUE_TOKEN"))
def __init__(self, client):
self.champions = {}
self.summoners = {}
self.items = {}
self.runes = {}
# all types ['item', 'rune', 'mastery', 'summoner', 'champion', 'profileicon', 'map', 'language', 'sticker']
types = ["champion", "summoner", "rune"]
version_dict = self.watcher.data_dragon.versions_for_region(client.config["region"])["n"]
for data_type in types:
# get league's latest version
latest = version_dict[data_type]
# get info
info = None
if data_type == "champion":
info = self.watcher.data_dragon.champions(latest, False, 'en_US')
for key in info['data']:
self.champions[int(info['data'][key]['key'])] = info['data'][key]
elif data_type == "summoner":
info = self.watcher.data_dragon.summoner_spells(latest, 'en_US')
for key in info['data']:
self.summoners[int(info['data'][key]['key'])] = info['data'][key]
elif data_type == "rune":
latest = version_dict["champion"] # riot fucked this up in /realms/na.json
info = self.watcher.data_dragon.runes_reforged(latest, 'en_US')
for rune_category in info:
current_runes = {}
for rune in rune_category['slots'][0]['runes']:
current_runes[int(rune['id'])] = rune['name']
self.runes[int(rune_category['id'])] = current_runes
``` |
{
"source": "jputlock/Riot-Watcher",
"score": 3
} |
#### File: _apis/team_fight_tactics/SummonerApi.py
```python
from .. import BaseApi, NamedEndpoint
from .urls import SummonerApiUrls
class SummonerApi(NamedEndpoint):
"""
This class wraps the TFT Summoner Api calls provided by the Riot API.
See https://developer.riotgames.com/apis#tft-summoner-v1 for more detailed information.
"""
def __init__(self, base_api: BaseApi):
"""
Initializes a new SummonerApi which uses the provided base_api
:param BaseApi base_api: the root API object to use for making all requests.
"""
super().__init__(base_api, self.__class__.__name__)
def by_account(self, region: str, encrypted_account_id: str):
return self._request_endpoint(
self.by_account.__name__,
region,
SummonerApiUrls.by_account,
encrypted_account_id=encrypted_account_id,
)
def by_name(self, region: str, summoner_name: str):
return self._request_endpoint(
self.by_name.__name__,
region,
SummonerApiUrls.by_name,
summoner_name=summoner_name,
)
def by_puuid(self, region: str, puuid: str):
return self._request_endpoint(
self.by_puuid.__name__, region, SummonerApiUrls.by_puuid, puuid=puuid
)
def by_id(self, region: str, encrypted_summoner_id: str):
return self._request_endpoint(
self.by_id.__name__,
region,
SummonerApiUrls.by_id,
encrypted_summoner_id=encrypted_summoner_id,
)
``` |
{
"source": "jputrino/f5-common-python",
"score": 2
} |
#### File: cm/autodeploy/software_images.py
```python
import os
from f5.bigip.mixins import FileUploadMixin
from f5.bigip.resource import PathElement
from f5.sdk_exception import F5SDKError
class ImageFilesMustHaveDotISOExtension(F5SDKError):
def __init__(self, filename):
super(ImageFilesMustHaveDotISOExtension, self).__init__(filename)
class Software_Image_Uploads(PathElement, FileUploadMixin):
"""Software image upload resource."""
def __init__(self, autodeploy):
super(Software_Image_Uploads, self).__init__(autodeploy)
def upload_image(self, filepathname, **kwargs):
filename = os.path.basename(filepathname)
if os.path.splitext(filename)[-1] != '.iso':
raise ImageFilesMustHaveDotISOExtension(filename)
self.file_bound_uri = self._meta_data['uri'] + filename
self._upload_file(filepathname, **kwargs)
#
#
# class Software_Image_Downloads(PathElement):
# """Software image download resource."""
# def __init__(self, autodeploy):
# super(Software_Image_Downloads, self).__init__(autodeploy)
#
# def download_image(self, filepathname, **kwargs):
# filename = os.path.basename(filepathname)
# session = self._meta_data['icr_session']
# chunk_size = kwargs.pop('chunk_size', 512 * 1024)
# self.file_bound_uri = self._meta_data['uri'] + filename
# with open(filepathname, 'wb') as writefh:
# start = 0
# end = chunk_size - 1
# size = 0
# current_bytes = 0
#
# while True:
# content_range = "%s-%s/%s" % (start, end, size)
# headers = {'Content-Range': content_range,
# 'Content-Type': 'application/octet-stream'}
# req_params = {'headers': headers,
# 'verify': False,
# 'stream': True}
# response = session.get(self.file_bound_uri,
# requests_params=req_params)
# if response.status_code == 200:
# # If the size is zero, then this is the first time through
# # the loop and we don't want to write data because we
# # haven't yet figured out the total size of the file.
# if size > 0:
# current_bytes += chunk_size
# for chunk in response.iter_content(chunk_size):
# writefh.write(chunk)
#
# # Once we've downloaded the entire file, we can break out of
# # the loop
# if end == size:
# break
#
# crange = response.headers['Content-Range']
#
# #Determine the total number of bytes to read.
# if size == 0:
# size = int(crange.split('/')[-1]) - 1
#
# # If the file is smaller than the chunk_size, the BigIP
# # will return an HTTP 400. Adjust the chunk_size down to
# # the total file size...
# if chunk_size > size:
# end = size
#
# # ...and pass on the rest of the code.
# continue
#
# start += chunk_size
#
# if (current_bytes + chunk_size) > size:
# end = size
# else:
# end = start + chunk_size - 1
```
#### File: bigip/test/test_mixins.py
```python
import json
import pytest
from f5.bigip.mixins import CommandExecutionMixin
from f5.bigip.mixins import ToDictMixin
from f5.bigip.mixins import UnnamedResourceMixin
from f5.bigip.mixins import UnsupportedMethod
class MixinTestClass(ToDictMixin):
def __init__(self):
pass
def test_int():
MTCobj = MixinTestClass()
MTCobj.x = 1
mtc_as_dict = MTCobj.to_dict()
assert json.dumps(mtc_as_dict) == '{"x": 1}'
def test_list():
MTCobj = MixinTestClass()
MTCobj.x = [1, 'a']
mtc_as_dict = MTCobj.to_dict()
assert json.dumps(mtc_as_dict) == '{"x": [1, "a"]}'
def test_list_and_int():
MTCobj = MixinTestClass()
MTCobj.x = [1, 'a']
MTCobj.y = 1
mtc_as_dict = MTCobj.to_dict()
assert json.dumps(mtc_as_dict) == '{"y": 1, "x": [1, "a"]}'
def test_list_and_int_and_list2():
MTCobj = MixinTestClass()
MTCobj.x = [1, 'a']
MTCobj.y = 1
MTCobj.z = [1, 'a']
mtc_as_dict = MTCobj.to_dict()
assert json.dumps(mtc_as_dict) == '{"y": 1, "x": [1, "a"], "z": [1, "a"]}'
def test_two_refs():
MTCobj = MixinTestClass()
MTCobj.x = [1, 'a']
MTCobj.z = MTCobj.x
mtc_as_dict = MTCobj.to_dict()
assert json.dumps(mtc_as_dict) ==\
'{"x": [1, "a"], "z": ["TraversalRecord", "x"]}'
def test_tuple():
MTCobj = MixinTestClass()
MTCobj.x = (1, 'a')
mtc_as_dict = MTCobj.to_dict()
assert json.dumps(mtc_as_dict) == '{"x": [1, "a"]}'
class ToDictMixinAttribute(ToDictMixin):
def __init__(self):
pass
def test_ToDictMixinAttribute():
MTCobj = MixinTestClass()
TDMAttrObj = ToDictMixinAttribute()
MTCobj.x = TDMAttrObj
mtc_as_dict = MTCobj.to_dict()
assert json.dumps(mtc_as_dict) == '{"x": {}}'
def test_ToDictMixinAttribute_Nested():
MTCobj = MixinTestClass()
TDMAttrObj = ToDictMixinAttribute()
TDMAttrObj.y = {'a': 3}
MTCobj.x = TDMAttrObj
mtc_as_dict = MTCobj.to_dict()
assert json.dumps(mtc_as_dict) == '{"x": {"y": {"a": 3}}}'
class DictableClass(object):
def __init__(self):
self.test_attribute = 42
def test_TestClass_Basic():
TDMAttrObj = ToDictMixinAttribute()
TDMAttrObj.y = DictableClass()
mtc_as_dict = TDMAttrObj.to_dict()
assert json.dumps(mtc_as_dict) == '{"y": {"test_attribute": 42}}'
class TestUnnamedResourceMixin(object):
def test_create_raises(self):
unnamed_resource = UnnamedResourceMixin()
with pytest.raises(UnsupportedMethod):
unnamed_resource.create()
def test_delete_raises(self):
unnamed_resource = UnnamedResourceMixin()
with pytest.raises(UnsupportedMethod):
unnamed_resource.create()
class TestCommandExecutionMixin(object):
def test_create_raises(self):
command_resource = CommandExecutionMixin()
with pytest.raises(UnsupportedMethod):
command_resource.create()
def test_delete_raises(self):
command_resource = CommandExecutionMixin()
with pytest.raises(UnsupportedMethod):
command_resource.delete()
def test_load_raises(self):
command_resource = CommandExecutionMixin()
with pytest.raises(UnsupportedMethod):
command_resource.load()
```
#### File: tm/auth/__init__.py
```python
from f5.bigip.resource import OrganizingCollection
from f5.bigip.tm.auth.password_policy import Password_Policy
from f5.bigip.tm.auth.user import Users
class Auth(OrganizingCollection):
def __init__(self, tm):
super(Auth, self).__init__(tm)
self._meta_data['allowed_lazy_attributes'] = [
Password_Policy,
Users
]
```
#### File: tm/cm/trust_domain.py
```python
from f5.bigip.resource import Collection
from f5.bigip.resource import Resource
class Trust_Domains(Collection):
"""BIG-IP® cluster trust-domain collection."""
def __init__(self, cm):
super(Trust_Domains, self).__init__(cm)
endpoint = 'trust-domain'
self._meta_data['uri'] =\
self._meta_data['container']._meta_data['uri'] + endpoint + '/'
self._meta_data['allowed_lazy_attributes'] = [Trust_Domain]
self._meta_data['attribute_registry'] = \
{'tm:cm:trust-domain:trust-domainstate': Trust_Domain}
class Trust_Domain(Resource):
"""BIG-IP® cluster trust-domain resource"""
def __init__(self, trust_domains):
super(Trust_Domain, self).__init__(trust_domains)
self._meta_data['required_json_kind'] =\
'tm:cm:trust-domain:trust-domainstate'
self._meta_data['required_creation_parameters'].update(('partition',))
```
#### File: tm/ltm/virtual.py
```python
from f5.bigip.resource import Collection
from f5.bigip.resource import Resource
class Virtuals(Collection):
"""BIG-IP® LTM virtual collection"""
def __init__(self, ltm):
super(Virtuals, self).__init__(ltm)
self._meta_data['allowed_lazy_attributes'] = [Virtual]
self._meta_data['attribute_registry'] =\
{'tm:ltm:virtual:virtualstate': Virtual}
class Virtual(Resource):
"""BIG-IP® LTM virtual resource"""
def __init__(self, virtual_s):
super(Virtual, self).__init__(virtual_s)
self._meta_data['allowed_lazy_attributes'] = [Profiles_s]
self._meta_data['required_json_kind'] = 'tm:ltm:virtual:virtualstate'
self._meta_data['attribute_registry'] =\
{'tm:ltm:virtual:profiles:profilescollectionstate': Profiles_s}
class Profiles(Resource):
'''A Resource concrete subclass.'''
def __init__(self, Profiles_s):
'''Autogenerated constructor.'''
super(Profiles, self).__init__(Profiles_s)
self._meta_data['template_generated'] = True
self._meta_data['required_json_kind'] =\
u"tm:ltm:virtual:profiles:profilesstate"
self._meta_data['attribute_registry'] =\
{}
class Profiles_s(Collection):
'''A Collection concrete subclass docstring.'''
def __init__(self, virtual):
'''Auto generated constructor.'''
super(Profiles_s, self).__init__(virtual)
self._meta_data['allowed_lazy_attributes'] = [Profiles]
self._meta_data['attribute_registry'] =\
{u'tm:ltm:virtual:profiles:profilesstate': Profiles}
self._meta_data['template_generated'] = True
```
#### File: tm/sys/db.py
```python
from f5.bigip.resource import Collection
from f5.bigip.resource import Resource
from f5.bigip.resource import UnsupportedOperation
class Dbs(Collection):
"""BIG-IP® db collection"""
def __init__(self, sys):
super(Dbs, self).__init__(sys)
self._meta_data['allowed_lazy_attributes'] = [Db]
self._meta_data['attribute_registry'] =\
{'tm:sys:db:dbstate': Db}
class Db(Resource):
"""BIG-IP® db resource
.. note::
db objects are read-only.
"""
def __init__(self, dbs):
super(Db, self).__init__(dbs)
self._meta_data['required_json_kind'] = 'tm:sys:db:dbstate'
def create(self, **kwargs):
'''Create is not supported for db resources.
:raises: UnsupportedOperation
'''
raise UnsupportedOperation(
"DB resources doesn't support create, only load and refresh"
)
def delete(self, **kwargs):
'''Delete is not supported for db resources.
:raises: UnsupportedOperation
'''
raise UnsupportedOperation(
"DB resources doesn't support delete, only load and refresh"
)
```
#### File: f5/multi_device/trust_domain.py
```python
from f5.multi_device.exceptions import DeviceAlreadyInTrustDomain
from f5.multi_device.exceptions import DeviceNotTrusted
from f5.multi_device.device_group import DeviceGroup
from f5.multi_device.utils import get_device_info
from f5.multi_device.utils import get_device_names_to_objects
from f5.multi_device.utils import pollster
class TrustDomain(object):
'''Manages the trust domain of a BIG-IP® device.'''
iapp_actions = {'definition': {'implementation': None, 'presentation': ''}}
def __init__(self, **kwargs):
'''Initialize a trusted peer manager object.
The device_group_name set below is the default trust group that exists
on all BIG-IP® devices. We are fixing it here to that group.
:param kwargs: dict -- keyword args for devices and partition
'''
self.domain = {}
if kwargs:
self._set_attributes(**kwargs)
self.validate()
def _set_attributes(self, **kwargs):
'''Set attributes for instance in one place
:param kwargs: dict -- dictionary of keyword arguments
'''
self.devices = kwargs['devices'][:]
self.partition = kwargs['partition']
self.device_group_name = 'device_trust_group'
self.device_group_type = 'sync-only'
def validate(self):
'''Validate that devices are each trusted by one another
:param kwargs: dict -- keyword args for devices and partition
:raises: DeviceNotTrusted
'''
self._populate_domain()
missing = []
for domain_device in self.domain:
for truster, trustees in self.domain.iteritems():
if domain_device not in trustees:
missing.append((domain_device, truster, trustees))
if missing:
msg = ''
for item in missing:
msg += '\n%r is not trusted by %r, which trusts: %r' % \
(item[0], item[1], item[2])
raise DeviceNotTrusted(msg)
self.device_group = DeviceGroup(
devices=self.devices,
device_group_name=self.device_group_name,
device_group_type=self.device_group_type,
device_group_partition=self.partition
)
def _populate_domain(self):
'''Populate TrustDomain's domain attribute.
This entails an inspection of each device's certificate-authority
devices in its trust domain and recording them. After which, we
get a dictionary of who trusts who in the domain.
'''
self.domain = {}
for device in self.devices:
device_name = get_device_info(device).name
ca_devices = \
device.tm.cm.trust_domains.trust_domain.load(
name='Root'
).caDevices
self.domain[device_name] = [
d.replace('/%s/' % self.partition, '') for d in ca_devices
]
def create(self, **kwargs):
'''Add trusted peers to the root bigip device.
When adding a trusted device to a device, the trust is reflexive. That
is, the truster trusts the trustee and the trustee trusts the truster.
So we only need to add the trusted devices to one device.
:param kwargs: dict -- devices and partition
'''
self._set_attributes(**kwargs)
for device in self.devices[1:]:
self._add_trustee(device)
pollster(self.validate)()
def teardown(self):
'''Teardown trust domain by removing trusted devices.'''
for device in self.devices:
self._remove_trustee(device)
self._populate_domain()
self.domain = {}
def _add_trustee(self, device):
'''Add a single trusted device to the trust domain.
:param device: ManagementRoot object -- device to add to trust domain
'''
device_name = get_device_info(device).name
if device_name in self.domain:
msg = 'Device: %r is already in this trust domain.' % device_name
raise DeviceAlreadyInTrustDomain(msg)
self._modify_trust(self.devices[0], self._get_add_trustee_cmd, device)
def _remove_trustee(self, device):
'''Remove a trustee from the trust domain.
:param device: MangementRoot object -- device to remove
'''
trustee_name = get_device_info(device).name
name_object_map = get_device_names_to_objects(self.devices)
delete_func = self._get_delete_trustee_cmd
for truster in self.domain:
if trustee_name in self.domain[truster] and \
truster != trustee_name:
truster_obj = name_object_map[truster]
self._modify_trust(truster_obj, delete_func, trustee_name)
self._populate_domain()
for trustee in self.domain[trustee_name]:
if trustee_name != trustee:
self._modify_trust(device, delete_func, trustee)
self.devices.remove(name_object_map[trustee_name])
def _modify_trust(self, truster, mod_peer_func, trustee):
'''Modify a trusted peer device by deploying an iapp.
:param truster: ManagementRoot object -- device on which to perform
commands
:param mod_peer_func: function -- function to call to modify peer
:param trustee: ManagementRoot object or str -- device to modify
'''
iapp_name = 'trusted_device'
mod_peer_cmd = mod_peer_func(trustee)
iapp_actions = self.iapp_actions.copy()
iapp_actions['definition']['implementation'] = mod_peer_cmd
self._deploy_iapp(iapp_name, iapp_actions, truster)
self._delete_iapp(iapp_name, truster)
def _delete_iapp(self, iapp_name, deploying_device):
'''Delete an iapp service and template on the root device.
:param iapp_name: str -- name of iapp
:param deploying_device: ManagementRoot object -- device where the
iapp will be deleted
'''
iapp = deploying_device.tm.sys.applications
iapp_serv = iapp.services.service.load(
name=iapp_name, partition=self.partition
)
iapp_serv.delete()
iapp_tmpl = iapp.templates.template.load(
name=iapp_name, partition=self.partition
)
iapp_tmpl.delete()
def _deploy_iapp(self, iapp_name, actions, deploying_device):
'''Deploy iapp to add trusted device
:param iapp_name: str -- name of iapp
:param actions: dict -- actions definition of iapp sections
:param deploying_device: ManagementRoot object -- device where the
iapp will be created
'''
tmpl = deploying_device.tm.sys.applications.templates.template
serv = deploying_device.tm.sys.applications.services.service
tmpl.create(name=iapp_name, partition=self.partition, actions=actions)
pollster(deploying_device.tm.sys.applications.templates.template.load)(
name=iapp_name, partition=self.partition
)
serv.create(
name=iapp_name,
partition=self.partition,
template='/%s/%s' % (self.partition, iapp_name)
)
def _get_add_trustee_cmd(self, trustee):
'''Get tmsh command to add a trusted device.
:param trustee: ManagementRoot object -- device to add as trusted
:returns: str -- tmsh command to add trustee
'''
trustee_info = pollster(get_device_info)(trustee)
print('Adding following peer to root: %s' % trustee_info.name)
username = trustee._meta_data['username']
password = trustee._meta_data['password']
return 'tmsh::modify cm trust-domain Root ca-devices add ' \
'\\{ %s \\} name %s username %s password %s' % \
(trustee_info.managementIp, trustee_info.name, username, password)
def _get_delete_trustee_cmd(self, trustee_name):
'''Get tmsh command to delete a trusted device.
:param trustee_name: str -- name of device to remove
:returns: str -- tmsh command to delete trusted device
'''
return 'tmsh::modify cm trust-domain Root ca-devices delete ' \
'\\{ %s \\}' % trustee_name
```
#### File: utils/test/test_iapp_parser.py
```python
from f5.utils import iapp_parser as ip
import pytest
good_templ = '''sys application template good_templ {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
presentation {
# APL presentation language
}
role-acl { hello test }
run-as <user context>
}
}
description <template description>
partition <partition name>
requires-modules { ltm }
}'''
brace_in_quote_templ = '''sys application template good_templ {
actions {
definition {
html-help {
# HTML Help for "" the template
}
implementation {
# TMSH"{}{{}}}}}""{{{{}}"implementation code
}
presentation {
# APL"{}{}{{{{{{" presentation language
}
role-acl { hello test }
run-as <user context>
}
}
description <template description>
partition <partition name>
requires-modules { ltm }
}'''
no_desc_templ = '''sys application template good_templ {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
presentation {
# APL presentation language
}
role-acl { hello test }
run-as <user context>
}
}
partition <partition name>
requires-modules { ltm }
}'''
empty_rm_templ = '''sys application template good_templ {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
presentation {
# APL presentation language
}
role-acl { hello test }
run-as <user context>
}
}
partition <partition name>
requires-modules { }
}'''
whitespace_rm_templ = '''sys application template good_templ {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
presentation {
# APL presentation language
}
role-acl { hello test }
run-as <user context>
}
}
partition <partition name>
requires-modules {}
}'''
none_rm_templ = '''sys application template good_templ {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
presentation {
# APL presentation language
}
role-acl { hello test }
run-as <user context>
}
}
partition <partition name>
requires-modules none
}'''
no_open_brace_templ = '''sys application template no_open_brace_templ {
actions {
definition {
html-help
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
presentation {
# APL presentation language
}
role-acl {security role}
run-as <user context>
}
}
description <template description>
partition <partition name>
}'''
no_close_brace_templ = '''sys application template no_close_brace_template {
actions {
definition {
html-help {
# HTML Help for the template
# Missing closing braces
implementation {
# TMSH implementation code
'''
no_pres_templ = '''sys application template no_pres_templ {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
role-acl {<security role>}
run-as <user context>
}
}
description <template description>
partition <partition name>
}'''
no_name_templ = '''sys application template {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
run-as <user context>
}
}
description <template description>
partition <partition name>
}'''
bad_name_templ = '''sys application template bad#updown {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
role-acl {<security role>}
run-as <user context>
}
}
description <template description>
partition <partition name>
}'''
name_brace_templ = '''sys application template name_next_to_brace{
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
role-acl {security role}
run-as <user context>
}
}
description <template description>
partition <partition name>
}'''
good_attr_templ = '''sys application template good_templ {
actions {
definition {
html-help {}
implementation {}
presentation {}
}
}
description <template description>
partition just_a_partition name
}'''
no_help_templ = '''sys application template good_templ {
actions {
definition {
implementation {
# TMSH implementation code
}
presentation {
# APL presentation language
}
role-acl { hello test }
run-as <user context>
}
}
description <template description>
partition <partition name>
requires-modules { ltm asm }
}'''
dot_name_templ = '''sys application template good.dot.templ {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
presentation {
# APL presentation language
}
role-acl { hello test }
run-as <user context>
}
}
description <template description>
partition <partition name>
requires-modules { ltm }
}'''
dot_hyphen_name_templ = '''sys application template good.-dot-hyphen.-templ {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
presentation {
# APL presentation language
}
role-acl { hello test }
run-as <user context>
}
}
description <template description>
partition <partition name>
requires-modules { ltm }
}'''
good_templ_dict = {
u'name': u'good_templ',
u'description': u'<template description>',
u'partition': u'<partition name>',
u'requiresModules': [u'ltm'],
'actions': {
'definition': {
u'htmlHelp': u'# HTML Help for the template',
u'roleAcl': [u'hello', u'test'],
u'implementation': u'# TMSH implementation code',
u'presentation': u'# APL presentation language'
}
}
}
brace_in_quote_templ_dict = {
u'name': u'good_templ',
u'description': u'<template description>',
u'partition': u'<partition name>',
u'requiresModules': [u'ltm'],
'actions': {
'definition': {
u'htmlHelp': u'# HTML Help for "" the template',
u'roleAcl': [u'hello', u'test'],
u'implementation': u'# TMSH"{}{{}}}}}""{{{{}}"implementation code',
u'presentation': u'# APL"{}{}{{{{{{" presentation language'
}
}
}
no_help_templ_dict = {
u'name': u'good_templ',
u'description': u'<template description>',
u'partition': u'<partition name>',
u'requiresModules': [u'ltm', u'asm'],
'actions': {
'definition': {
u'roleAcl': [u'hello', u'test'],
u'implementation': u'# TMSH implementation code',
u'presentation': u'# APL presentation language'
}
}
}
none_rm_templ_dict = {
u'name': u'good_templ',
u'partition': u'<partition name>',
u'requiresModules': u'none',
'actions': {
'definition': {
u'htmlHelp': u'# HTML Help for the template',
u'roleAcl': [u'hello', u'test'],
u'implementation': u'# TMSH implementation code',
u'presentation': u'# APL presentation language'
}
}
}
dot_name_templ_dict = {
u'name': u'good.dot.templ',
u'description': u'<template description>',
u'partition': u'<partition name>',
u'requiresModules': [u'ltm'],
'actions': {
'definition': {
u'htmlHelp': u'# HTML Help for the template',
u'roleAcl': [u'hello', u'test'],
u'implementation': u'# TMSH implementation code',
u'presentation': u'# APL presentation language'
}
}
}
dot_hyphen_name_templ_dict = {
u'name': u'good.-dot-hyphen.-templ',
u'description': u'<template description>',
u'partition': u'<partition name>',
u'requiresModules': [u'ltm'],
'actions': {
'definition': {
u'htmlHelp': u'# HTML Help for the template',
u'roleAcl': [u'hello', u'test'],
u'implementation': u'# TMSH implementation code',
u'presentation': u'# APL presentation language'
}
}
}
@pytest.fixture
def TemplateSectionSetup(request):
def tearDown():
prsr.template_sections.remove('notfound')
request.addfinalizer(tearDown)
prsr = ip.IappParser(good_templ)
prsr.template_sections.append('notfound')
return prsr
def test__init__():
prsr = ip.IappParser(good_templ)
assert prsr.template_str == good_templ
def test__init__error():
prsr = None
with pytest.raises(ip.EmptyTemplateException) as EmptyTemplateExceptInfo:
prsr = ip.IappParser('')
assert EmptyTemplateExceptInfo.value.message == \
'Template empty or None value.'
assert prsr is None
def test_get_section_end_index():
prsr = ip.IappParser(good_templ)
impl_start = prsr._get_section_start_index(u'implementation')
impl_end = prsr._get_section_end_index(u'implementation', impl_start)
templ_impl = unicode('''{
# TMSH implementation code
}''')
assert good_templ[impl_start:impl_end+1] == templ_impl
def test_get_section_start_index_no_open_brace_error():
prsr = ip.IappParser(no_open_brace_templ)
with pytest.raises(ip.NonextantSectionException) as \
NonextantSectionExceptInfo:
prsr._get_section_start_index(u'html-help')
assert NonextantSectionExceptInfo.value.message == \
'Section html-help not found in template'
def test_get_section_end_no_close_brace_error():
prsr = ip.IappParser(no_close_brace_templ)
with pytest.raises(ip.CurlyBraceMismatchException) as \
CurlyBraceMismatchExceptInfo:
help_start = prsr._get_section_start_index(u'html-help')
prsr._get_section_end_index(u'html_help', help_start)
assert CurlyBraceMismatchExceptInfo.value.message == \
'Curly braces mismatch in section html_help.'
def test_get_template_name():
prsr = ip.IappParser(good_templ)
assert prsr._get_template_name() == u'good_templ'
def test_get_template_name_next_to_brace():
prsr = ip.IappParser(name_brace_templ)
assert prsr._get_template_name() == u'name_next_to_brace'
def test_get_template_name_error():
prsr = ip.IappParser(no_name_templ)
with pytest.raises(ip.NonextantTemplateNameException) as \
NonextantTemplateNameExceptInfo:
prsr._get_template_name()
assert NonextantTemplateNameExceptInfo.value.message == \
'Template name not found.'
def test_get_template_name_bad_name_error():
prsr = ip.IappParser(bad_name_templ)
with pytest.raises(ip.NonextantTemplateNameException) as \
NonextantTemplateNameExceptInfo:
prsr._get_template_name()
assert NonextantTemplateNameExceptInfo.value.message == \
'Template name not found.'
def test_get_template_name_with_dot():
prsr = ip.IappParser(dot_name_templ)
assert prsr.parse_template() == dot_name_templ_dict
def test_get_template_name_with_dot_hyphen():
prsr = ip.IappParser(dot_hyphen_name_templ)
assert prsr.parse_template() == dot_hyphen_name_templ_dict
def test_parse_template():
prsr = ip.IappParser(good_templ)
assert prsr.parse_template() == good_templ_dict
def test_parse_template_brace_in_quote():
prsr = ip.IappParser(brace_in_quote_templ)
assert prsr.parse_template() == brace_in_quote_templ_dict
def test_parse_template_no_section_found(TemplateSectionSetup):
with pytest.raises(ip.NonextantSectionException) as \
NonextantSectionExceptInfo:
TemplateSectionSetup.parse_template()
assert 'notfound' in TemplateSectionSetup.template_sections
assert 'Section notfound not found in template' in \
NonextantSectionExceptInfo.value.message
def test_parse_template_no_section_found_not_required():
prsr = ip.IappParser(no_help_templ)
templ_dict = prsr.parse_template()
assert templ_dict == no_help_templ_dict
def test_get_template_attr():
prsr = ip.IappParser(good_attr_templ)
attr = prsr._get_template_attr(u'partition')
assert attr == u'just_a_partition name'
def test_get_template_attr_attr_not_exists():
prsr = ip.IappParser(good_attr_templ)
attr = prsr._get_template_attr(u'bad_attr')
assert attr is None
def test_attr_no_description():
prsr = ip.IappParser(no_desc_templ)
templ_dict = prsr.parse_template()
assert 'description' not in templ_dict
def test_attr_empty_rm_error():
prsr = ip.IappParser(empty_rm_templ)
with pytest.raises(ip.MalformedTCLListException) as ex:
prsr.parse_template()
assert 'requires-modules' in ex.value.message
def test_attr_whitespace_rm_error():
prsr = ip.IappParser(whitespace_rm_templ)
with pytest.raises(ip.MalformedTCLListException) as ex:
prsr.parse_template()
assert 'TCL list for "requires-modules" is malformed. If no elements are '\
'needed "none" should be used without curly braces.' in \
ex.value.message
def test_attr_none_rm():
prsr = ip.IappParser(none_rm_templ)
templ_dict = prsr.parse_template()
assert templ_dict == none_rm_templ_dict
``` |
{
"source": "jputrino/nginx-sphinx",
"score": 2
} |
#### File: src/nginx_sphinx/__init__.py
```python
import os
from os import path
def get_html_theme_path():
"""Return the html theme path for this template library.
:returns: List of directories to find template files in
"""
curdir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
return [curdir]
def setup(app):
"""Set up the theme for distribution as a python package
:return: Adds nginx-sphinx to the html_themes path in Sphinx
"""
app.add_html_theme('nginx_sphinx', path.abspath(path.dirname(__file__)))
``` |
{
"source": "jpvaldes/brainowl",
"score": 3
} |
#### File: brainowl/brainowl/brainowl.py
```python
import numpy as np
from scipy.special import expit, logit
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.isotonic import isotonic_regression
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.validation import check_array, check_is_fitted, check_X_y
from .solver import sparsa_bb
def sigmoid(t):
"""
Returns 1 / (1 + np.exp(-t))
"""
t *= -1
t = np.exp(t, t)
t += 1
t = np.reciprocal(t, t)
return t
def prox_owl(v, w):
r"""
OWL norm proximal operator
From pyowl: https://github.com/vene/pyowl/
Author: <NAME> <<EMAIL>>
The weights of the OWL norm can change its behavior:
- For l1, \lambda_1 = w_1 = w_2 = ... = w_n
- For l∞, \lambda_1 = w_1 > w_2 = w_3 ... = w_n = 0
- For OSCAR, w_i = λ1 + λ2(n - 1), for i = 1, ..., n, λ1 > 0, λ2 > 0
References
----------
<NAME>, <NAME>, The Ordered Weighted $l_1$ Norm:
Atomic Formulation, Projections, and Algorithms.
<NAME>, <NAME>, <NAME>, and <NAME>, Statistical Estimation and
Testing via the Ordered $l_1$ Norm.
"""
# === remove signs ===
s = np.abs(v)
# === sort permutation matrix ===
ix = np.argsort(s)[::-1]
# === u = sorted s ===
u = s[ix]
# === projection on the monotone, non-negative decreasing cone ===
x = isotonic_regression(u - w, y_min=0, increasing=False)
# === unsort ===
inv_ix = np.zeros_like(ix)
inv_ix[ix] = np.arange(len(v))
x = x[inv_ix]
# === restore signs ===
res = np.sign(v) * x
return res
def owl_weights(alpha, beta, n_features):
"""
Return weights for the OWL norm
Parameters
----------
alpha : float
For l1 and l∞ , regularization strength. For OSCAR, controls
regularization together with beta
beta : float or None
If None, selects l1 regularization. If 0, select l∞ regularization.
Otherwise, OSCAR where it controls regularization together with
alpha.
n_features : int
Number of features
Returns
-------
weights : array, n_features
Notes
-----
In summary,
- For l1, alpha = w_1 = w_2 = ... = w_n, beta is None
- For l∞, alpha = w_1 > w_2 = w_3 ... = w_n = 0, beta = 0
- For OSCAR, w_i = alpha + beta(n - 1), for i = 1, ..., n,
alpha > 0, beta > 0
"""
if beta is not None:
if beta != 0:
# OSCAR weights
return alpha + beta * np.arange(n_features, dtype=np.double)[::-1]
if beta == 0:
# l∞
coeffs = np.zeros(n_features, dtype=np.double)
coeffs[0] = alpha
return coeffs
else:
# l1
return np.full(n_features, alpha, dtype=np.double)
def prox_l1(x, thr):
"""
Compute the L1 proximal operator (soft-thresholding)
Parameters
----------
x : array
coefficients
thr : float
non-zero threshold
"""
return np.sign(x) * np.maximum(np.abs(x) - thr, 0)
def prox_l2(x, thr):
"""
Compute the L2 proximal operator
"""
norm = np.sqrt((x ** 2).sum())
return x * np.maximum(1 - (thr / norm), 0)
def log_loss(X, y, w, return_grad=True):
"""
Compute the log loss
"""
scores = X @ w
y_scores = y * scores
idx = y_scores > 0
obj = np.empty_like(y_scores)
obj[idx] = np.log1p(np.exp(-y_scores[idx]))
obj[~idx] = -y_scores[~idx] + np.log1p(np.exp(y_scores[~idx]))
obj = obj.sum()
if not return_grad:
return obj
prob = expit(y_scores)
grad = np.empty_like(w)
grad = X.T @ ((prob - 1) * y)
return obj, grad
def sq_hinge_loss(X, y, w, return_grad=True):
"""
Compute the squared hinge loss
"""
scores = X @ w
z = np.maximum(0, 1 - y * scores)
obj = np.sum(z ** 2)
if not return_grad:
return obj
grad = X.T @ (-2 * y * z)
return obj, grad
def modified_huber_loss(X, y, w, return_grad=True):
"""
See Elements of Statistical Learning, p. 427 and "Hybrid huberized
support vector machines for microarray classification and gene selection",
by Wang et al. 2008 in Bioinformatics.
The loss function is
0, if z > 1
(1 - z) ** 2, if 1 - a < z <= 1
2 * a * (1 - z) - a ** 2, if z <= 1 - a
where the constant a >= 0.
"""
scores = X @ w
z = y * scores
lower_bound = -1
# using np.piecewise to get rid of numerical instabilities that appeared
# sometimes
obj = np.piecewise(
z,
[z <= lower_bound, z >= 1],
[lambda z: -4 * z,
lambda z: 0,
lambda z: (1 - z) ** 2
]
)
obj = obj.sum()
if not return_grad:
return obj
grad = np.piecewise(
z,
[z <= lower_bound, z >= 1],
[lambda z: -4,
lambda z: 0,
lambda z: 2 * (z - 1)
]
)
grad *= y
grad = X.T @ grad
return obj, grad
class SparsaClassifier(BaseEstimator, ClassifierMixin):
"""
Classifier based on the sparsa_bb solver.
Parameters
----------
loss : str, 'log', 'modified_huber', or 'squared_hinge'
loss function to use.
penalty : str, 'owl', 'l1', or, 'l2'
norm used for the penalty term
alpha : float
regularization strength
beta : float
regularization strength, only used by the OWL norm (see notes)
max_iter : int
maximum number of iterations.
max_inner_iter : int
maximum number of iterations for the line search
eta : float
step factor for the line search, example values are 2 or 3
tol : float
tolerance for the stopping criterion.
memory : int
number of objective values to store for the line search, typically 10
verbose : bool
whether to show extra information
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Sample training data passed during :meth:`fit`
y_ : array, shape = [n_samples]
Target labels passed during :meth:`fit`
Examples
--------
scl = StandardScaler()
X_train = scl.fit_transform(X_train)
X_test = scl.transform(X_test)
sclf = SparsaClassifier()
scl.fit(X_train, y_train)
y_pred = scl.predict(X_test)
Notes
-----
The OWL norm behaves differently depending on the values of alpha and beta:
- For l1, set alpha > 0, beta is None
- For l∞, set alpha > 0, beta = 0
- For OSCAR, set alpha > 0, beta > 0
"""
losses = {'log': log_loss,
'modified_huber': modified_huber_loss,
'squared_hinge': sq_hinge_loss}
penalties = {'owl': prox_owl,
'l1': prox_l1,
'l2': prox_l2,
}
def __init__(self, loss="log", penalty='owl', alpha=1e-3, beta=1e-3,
max_iter=100, max_inner_iter=10, eta=2, tol=1e-3, memory=10,
verbose=False):
self.loss = loss
self.penalty = penalty
self.alpha = alpha
self.beta = beta
self.max_iter = max_iter
self.max_inner_iter = max_inner_iter
self.eta = eta
self.tol = tol
self.memory = memory
self.verbose = verbose
def _get_penalty_weights(self):
"""
Return the penalty weights
"""
if self.penalty == 'owl':
penalty_weights = owl_weights(self.alpha, self.beta,
self.n_features)
elif self.penalty == 'l1':
penalty_weights = self.alpha
elif self.penalty == 'l2':
penalty_weights = self.alpha
else:
raise ValueError(f"No penalty found named {self.penalty}")
return penalty_weights
def fit(self, X, y):
"""
Fit the classifier.
Parameters
----------
X : array, shape = [n_samples, n_features]
Training input samples
y : array, shape = [n_samples]
Target labels consisting of an array of int.
"""
X, y = check_X_y(X, y)
self.n_samples, self.n_features = X.shape
self.X_ = X
x_init = np.zeros(self.n_features)
loss_ = self.losses.get(self.loss)
prox_ = self.penalties.get(self.penalty)
penalty_weights = self._get_penalty_weights()
self.lb_ = LabelBinarizer(pos_label=1, neg_label=-1)
self.y_ = self.lb_.fit_transform(y)
self.classes_ = self.lb_.classes_
if self.y_.shape[1] > 2:
# multi-class, do OvR
self.coef_ = self._fit_multiclass(self.X_, self.y_, x_init,
penalty_weights, loss_,
prox_)
else:
self.coef_ = sparsa_bb(self.X_, self.y_.ravel(), x_init,
penalty_weights, loss_, prox_,
self.max_iter, self.max_inner_iter,
self.eta, self.tol, self.memory,
self.verbose)
return self
def _fit_multiclass(self, X, y, x_init, penalty_weights, loss_, prox_):
"""
Use a one vs rest scheme to fit multiclass problems.
The first dimension of the returned coefficient matrix corresponds to
the number of classes.
"""
n_classes = y.shape[1]
n_voxels = X.shape[1]
coeffs = np.zeros((n_classes, n_voxels))
for class_num, y_b in enumerate(y.T):
this_w = sparsa_bb(X, y_b, x_init, penalty_weights, loss_, prox_,
self.max_iter, self.max_inner_iter,
self.eta, self.tol, self.memory,
self.verbose)
coeffs[class_num] = this_w
return coeffs
def predict(self, X):
"""
Predict the class of each sample
Parameters
----------
X : array, (n_samples, n_features)
Data samples.
Returns
-------
predictions : array, (n_samples, n_classes)
"""
check_is_fitted(self, ['X_', 'y_'])
X = check_array(X)
if self.loss == 'log':
pp = self.predict_proba(X)
y_pred = np.argmax(pp, axis=1)
else:
scores = self.decision_function(X)
if len(scores.shape) > 1:
y_pred = scores.argmax(axis=1)
else:
y_pred = (scores > 0).astype(int)
return self.classes_[y_pred]
def predict_proba(self, X):
"""
Predict the class probability of samples
Parameters
----------
X : array, (n_samples, n_features)
Data samples.
Returns
-------
probabilities : array, (n_samples, n_clases)
"""
error = ("predict_proba only implemented for loss='log'"
" or loss='modified_huber', but"
f" {self.loss} given")
if self.loss == 'log':
pred_prob = self._predict_proba_logloss(X)
elif self.loss == 'modified_huber':
pred_prob = self._predict_proba_modhuber(X)
else:
raise NotImplementedError(error)
return pred_prob
def _predict_proba_logloss(self, X):
"""
Predict the class probability of samples is the loss used is the log
loss.
Parameters
----------
X : array, (n_samples, n_classes)
Data samples.
Returns
-------
probabilities : array, (n_samples, n_classes)
"""
check_is_fitted(self, ['X_', 'y_'])
X = check_array(X)
if len(self.coef_.shape) > 1:
probabilities = []
for col in self.coef_:
this_proba = expit(X @ col)
probabilities.append(this_proba)
predicted_probabilities = np.array(probabilities).T
return predicted_probabilities
else:
probabilities = expit(X @ self.coef_.T)
return np.vstack((1 - probabilities, probabilities)).T
def _predict_proba_modhuber(self, X):
"""
Predict the class probability of samples is the loss used is the
modified Huber loss.
Parameters
----------
X : array, (n_samples, n_classes)
Data samples.
Returns
-------
probabilities : array, (n_samples, n_classes)
Notes
-----
The modified huber loss ("huberised" square hinge loss in Elements of
Statistical Learning) estimates a linear transformation of the
posterior probabilities.
That means that we can return well calibrated probabilities like we do
for the log loss.
The probabilities are not so straightforward to compute. This code is
based on the SGD classifier from scikit-learn. The two references there
are:
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
scores = self.decision_function(X)
binary = len(self.coef_.shape) == 1
if binary:
prob_ = np.ones((scores.shape[0], 2))
prob = prob_[:, 1]
else:
prob = scores
# from Zhang 2002: class_prob = (truncated(scores) + 1) / 2
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob_[:, 0] -= prob
prob = prob_
else:
# work around to produce uniform probabilities because the above
# might assign zero prob to all classes
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
def decision_function(self, X):
"""
Predict the signed distance of samples to the hyperplane
Parameters
----------
X : array, (n_samples, n_features)
Data samples.
Returns
-------
scores : array, n_samples or (n_samples, n_classes) if multiclass
"""
check_is_fitted(self, ['X_', 'y_'])
X = check_array(X)
scores = X @ self.coef_.T
if len(scores.shape) > 1:
return scores
else:
return scores.ravel()
``` |
{
"source": "jpvanhal/postgresql-audit",
"score": 2
} |
#### File: jpvanhal/postgresql-audit/setup.py
```python
import os
import re
from setuptools import find_packages, setup
HERE = os.path.dirname(os.path.abspath(__file__))
def get_version():
filename = os.path.join(HERE, 'postgresql_audit', '__init__.py')
with open(filename) as f:
contents = f.read()
pattern = r"^__version__ = '(.*?)'$"
return re.search(pattern, contents, re.MULTILINE).group(1)
setup(
name='PostgreSQL-Audit',
version=get_version(),
url='https://github.com/kvesteri/postgresql-audit',
license='BSD',
author='<NAME>',
author_email='<EMAIL>',
description=(
'Versioning and auditing extension for PostgreSQL and SQLAlchemy.'
),
packages=find_packages('.', exclude=['tests', 'tests.*']),
long_description=__doc__,
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'SQLAlchemy>=0.9.4',
'SQLAlchemy-Utils>=0.29.8'
],
python_requires='>=3.6',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
``` |
{
"source": "jpvanhal/sqlalchemy-searchable",
"score": 2
} |
#### File: jpvanhal/sqlalchemy-searchable/setup.py
```python
import os
import re
from setuptools import setup
HERE = os.path.dirname(os.path.abspath(__file__))
def get_version():
filename = os.path.join(HERE, 'sqlalchemy_searchable', '__init__.py')
with open(filename) as f:
contents = f.read()
pattern = r"^__version__ = '(.*?)'$"
return re.search(pattern, contents, re.MULTILINE).group(1)
setup(
name='SQLAlchemy-Searchable',
version=get_version(),
url='https://github.com/kvesteri/sqlalchemy-searchable',
license='BSD',
author='<NAME>',
author_email='<EMAIL>',
description=(
'Provides fulltext search capabilities for declarative SQLAlchemy'
' models.'
),
long_description=__doc__,
packages=['sqlalchemy_searchable'],
zip_safe=False,
include_package_data=True,
platforms='any',
python_requires='>=3.6',
install_requires=[
'SQLAlchemy>=1.3.0',
'SQLAlchemy-Utils>=0.37.5',
'validators>=0.3.0',
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
```
#### File: sqlalchemy-searchable/tests/test_searchable.py
```python
from sqlalchemy_searchable import search, search_manager
from tests import create_test_cases, TestCase
class SearchQueryMixinTestCase(TestCase):
def setup_method(self, method):
TestCase.setup_method(self, method)
self.items = [
self.TextItem(
name=u'index',
content=u'some content'
),
self.TextItem(
name=u'admin',
content=u'admin content'
),
self.TextItem(
name=u'home',
content=u'this is the home page of <EMAIL>'
),
self.TextItem(
name=u'not a some content',
content=u'not a name'
)
]
self.session.add_all(self.items)
self.session.commit()
def test_searches_through_all_fulltext_indexed_fields(self):
assert (
self.TextItemQuery(self.TextItem, self.session)
.search('admin').count() == 1
)
def test_search_supports_term_splitting(self):
assert (
self.TextItemQuery(self.TextItem, self.session)
.search('content').count() == 3
)
def test_term_splitting_supports_multiple_spaces(self):
query = self.TextItemQuery(self.TextItem, self.session)
assert query.search('content some').first().name == u'index'
assert query.search('content some').first().name == u'index'
assert query.search(' ').count() == 4
def test_search_by_email(self):
assert self.TextItemQuery(
self.TextItem, self.session
).search('<EMAIL>').count()
def test_supports_regconfig_parameter(self):
query = self.TextItemQuery(self.TextItem, self.session)
query = query.search(u'orrimorri', regconfig='finnish')
assert (
'parse_websearch(%(parse_websearch_1)s, %(parse_websearch_2)s)'
in str(query.statement.compile(self.session.bind))
)
def test_supports_vector_parameter(self):
vector = self.TextItem.content_search_vector
query = self.TextItemQuery(self.TextItem, self.session)
query = query.search('content', vector=vector)
assert query.count() == 2
def test_search_specific_columns(self):
query = search(self.session.query(self.TextItem.id), 'admin')
assert query.count() == 1
def test_sorted_search_results(self):
query = self.TextItemQuery(self.TextItem, self.session)
sorted_results = query.search('some content', sort=True).all()
assert sorted_results == self.items[0:2] + [self.items[3]]
class TestUsesGlobalConfigOptionsAsFallbacks(TestCase):
def setup_method(self, method):
search_manager.options['regconfig'] = 'pg_catalog.simple'
TestCase.setup_method(self, method)
self.items = [
self.TextItem(
name=u'index',
content=u'some content'
),
self.TextItem(
name=u'admin',
content=u'admin content'
),
self.TextItem(
name=u'home',
content=u'this is the home page of <EMAIL>'
),
self.TextItem(
name=u'not a some content',
content=u'not a name'
)
]
self.session.add_all(self.items)
self.session.commit()
def teardown_method(self, method):
TestCase.teardown_method(self, method)
search_manager.options['regconfig'] = 'pg_catalog.english'
def test_uses_global_regconfig_as_fallback(self):
query = search(self.session.query(self.TextItem.id), 'the')
assert query.count() == 1
create_test_cases(SearchQueryMixinTestCase)
class TestSearchableInheritance(TestCase):
def setup_method(self, method):
TestCase.setup_method(self, method)
self.session.add(self.Article(name=u'index', content=u'some content'))
self.session.add(self.Article(name=u'admin', content=u'admin content'))
self.session.add(
self.Article(name=u'home', content=u'this is the home page')
)
self.session.commit()
def test_supports_inheritance(self):
assert (
self.TextItemQuery(self.Article, self.session)
.search('content').count() == 2
)
``` |
{
"source": "jpvanhal/tasks-backend",
"score": 2
} |
#### File: migrations/versions/b818a9fce157_create_table_tasks.py
```python
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'b818a9fce157'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.execute('CREATE EXTENSION "uuid-ossp"')
op.create_table(
'tasks',
sa.Column('id', postgresql.UUID(as_uuid=True), server_default=sa.text('uuid_generate_v4()'), nullable=False),
sa.Column('title', sa.Text(), nullable=False),
sa.Column('is_completed', sa.Boolean(), server_default=sa.text('false'), nullable=False),
sa.Column('created_at', sa.TIMESTAMP(), server_default=sa.text('now()'), nullable=False),
sa.PrimaryKeyConstraint('id'),
)
def downgrade():
op.drop_table('tasks')
op.execute('DROP EXTENSION "uuid-ossp"')
```
#### File: tasks-backend/tasks/resources.py
```python
from flask_rest_jsonapi import ResourceDetail, ResourceList
from flask_rest_jsonapi.exceptions import ObjectNotFound
from .extensions import db
from .models import Task
from .schemas import TaskSchema
class TaskList(ResourceList):
schema = TaskSchema
data_layer = {
'session': db.session,
'model': Task,
}
class TaskDetail(ResourceDetail):
schema = TaskSchema
data_layer = {
'session': db.session,
'model': Task,
}
def after_get(self, result):
if result['data'] is None:
raise ObjectNotFound(detail='', source={})
``` |
{
"source": "jpvantassel/gi-website",
"score": 3
} |
#### File: construction/course/0_process_responses_clean.py
```python
import pandas as pd
import re
import json
# data = pd.read_csv("2019_08_responses_clean.csv")
data = pd.read_csv("2020_01_responses_clean.csv")
with open("classes_by_dept.json", "r") as f:
classes_by_dept = json.load(f)
def find_dept_from_name(depths, name):
for dept, courses in depths.items():
if name in courses:
return dept
return ValueError(f"name={name}, not found")
# Courses is dictinary: courses[dept][course][rating]
courses = {}
# Initialize variables to defaults
index = 0
# Loop across coursework
for header in data:
if header.startswith("Coursework"):
name = re.findall(r"\[(.*)\]", header)[0]
dept = find_dept_from_name(classes_by_dept, name)
# dept_sum = 0
if dept not in courses.keys():
courses.update({dept: {}})
courses[dept].update({name: {}})
# course_sum = 0
for rating in range(1, 6):
rating_str = str(rating)
courses[dept][name].update({rating_str: 0})
for response in data[header]:
if not pd.isna(response):
if response[0] == rating_str:
courses[dept][name][rating_str] += 1
# course_sum += 1
# dept_sum += 1
# if course_sum == 0:
# del courses[dept][name]
# if dept_sum == 0:
# del courses[dept]
# with open("2019_08_survey_results.json", "w") as f:
with open("2020_01_survey_results.json", "w") as f:
json.dump(courses, f, indent=2)
``` |
{
"source": "jpvantassel/seisflows",
"score": 3
} |
#### File: plugins/line_search/bracket.py
```python
import numpy as np
# Local imports
from seisflows.plugins.line_search import Base
from seisflows.tools.math import backtrack2, polyfit2
class Bracket(Base):
""" Implements bracketing line search
Variables
x - list of step lenths from current line search
f - correpsonding list of function values
gtg - dot product of gradient with itself
gtp - dot product of gradient and search direction
Status codes
status > 0 : finished
status == 0 : not finished
status < 0 : failed
"""
def calculate_step(self):
""" Determines step length and search status
"""
x, f, gtg, gtp, step_count, update_count = self.search_history()
if step_count == 0 and update_count == 0:
# based on idea from Dennis and Schnabel
alpha = gtg[-1]**-1
status = 0
elif step_count == 0:
# based on the first equation in sec 3.5 of Nocedal and Wright 2ed
idx = np.argmin(self.func_vals[:-1])
alpha = self.step_lens[idx] * gtp[-2]/gtp[-1]
status = 0
elif _check_bracket(x, f) and _good_enough(x, f):
alpha = x[f.argmin()]
status = 1
elif _check_bracket(x, f):
alpha = polyfit2(x, f)
status = 0
elif step_count <= self.step_count_max and all(f <= f[0]):
# we need a larger step length
alpha = 1.618034*x[-1]
status = 0
elif step_count <= self.step_count_max:
# we need a smaller step length
slope = gtp[-1]/gtg[-1]
alpha = backtrack2(f[0], slope, x[1], f[1], b1=0.1, b2=0.5)
status = 0
else:
# failed because step_count_max exceeded
alpha = None
status = -1
# apply optional step length safeguard
if alpha > self.step_len_max and \
step_count == 0:
alpha = 0.618034*self.step_len_max
status = 0
elif alpha > self.step_len_max:
# stop because safeguard prevents us from going further
alpha = self.step_len_max
status = 1
return alpha, status
def _check_bracket(step_lens, func_vals):
""" Checks if minimum has been bracketed
"""
x, f = step_lens, func_vals
imin, fmin = f.argmin(), f.min()
if (fmin < f[0]) and any(f[imin:] > fmin):
return 1
else:
return 0
def _good_enough(step_lens, func_vals, thresh=np.log10(1.2)):
""" Checks if step length is reasonably close to quadratic estimate
"""
x, f = step_lens, func_vals
if not _check_bracket(x, f):
return 0
x0 = polyfit2(x, f)
if any(np.abs(np.log10(x[1:]/x0)) < thresh):
return 1
else:
return 0
```
#### File: seisflows/postprocess/base.py
```python
import sys
# Import Numpy
import numpy as np
# Local imports
from os.path import join
from seisflows.tools import unix
from seisflows.tools.tools import exists
from seisflows.config import ParameterError
PAR = sys.modules['seisflows_parameters']
PATH = sys.modules['seisflows_paths']
system = sys.modules['seisflows_system']
solver = sys.modules['seisflows_solver']
class base(object):
""" Regularization, smoothing, sharpening, masking and related operations
on models or gradients
"""
def check(self):
""" Checks parameters and paths
"""
if 'SMOOTH' not in PAR:
setattr(PAR, 'SMOOTH', 0.)
if 'MASK' not in PATH:
setattr(PATH, 'MASK', None)
if PATH.MASK:
assert exists(PATH.MASK)
def setup(self):
""" Placeholder for initialization or setup tasks
"""
pass
def write_gradient(self, path):
"""
Combines contributions from individual sources and material parameters
to get the gradient, and optionally applies user-supplied scaling
:input path: directory from which kernels are read and to which
gradient is written
"""
if not exists(path):
raise Exception
# because processing operations can be quite expensive, they must be
# run through the HPC system interface; processing does not involve
# embarassingly parallel tasks, we use system.run_single instead of
# system.run
system.run_single('postprocess', 'process_kernels',
path=path+'/kernels',
parameters=solver.parameters)
gradient = solver.load(
path+'/'+'kernels/sum', suffix='_kernel')
# merge into a single vector
gradient = solver.merge(gradient)
# convert to absolute perturbations, log dm --> dm
# see Eq.13 Tromp et al 2005
gradient *= solver.merge(solver.load(path + '/' + 'model'))
if PATH.MASK:
# to scale the gradient, users can supply "masks" by exactly
# mimicking the file format in which models are stored
mask = solver.merge(solver.load(PATH.MASK))
# while both masking and preconditioning involve scaling the
# gradient, they are fundamentally different operations:
# masking is ad hoc, preconditioning is a change of variables;
# see Modrak & Tromp 2016 GJI Seismic waveform inversion best
# practices: regional,global and exploration test cases
solver.save(solver.split(gradient),
path + '/' + 'gradient_nomask',
parameters=solver.parameters,
suffix='_kernel')
solver.save(solver.split(gradient*mask),
path + '/' + 'gradient',
parameters=solver.parameters,
suffix='_kernel')
else:
solver.save(solver.split(gradient),
path + '/' + 'gradient',
parameters=solver.parameters,
suffix='_kernel')
def process_kernels(self, path, parameters):
"""
Sums kernels from individual sources, with optional smoothing
:input path: directory containing sensitivity kernels
:input parameters: list of material parameters e.g. ['vp','vs']
"""
if not exists(path):
raise Exception
if PAR.SMOOTH > 0:
solver.combine(
input_path=path,
output_path=path+'/'+'sum_nosmooth',
parameters=parameters)
solver.smooth(
input_path=path+'/'+'sum_nosmooth',
output_path=path+'/'+'sum',
parameters=parameters,
span=PAR.SMOOTH)
else:
solver.combine(
input_path=path,
output_path=path+'/'+'sum',
parameters=parameters)
```
#### File: seisflows/tools/err.py
```python
class ParameterError(ValueError):
def __init__(self, *args):
if len(args) == 0:
msg = 'Bad parameter.'
super(ParameterError, self).__init__(msg)
elif len(args) == 1:
msg = 'Bad parameter: %s' % args[0]
super(ParameterError, self).__init__(msg)
elif args[1] not in args[0]:
msg = '%s is not defined.' % args[1]
super(ParameterError, self).__init__(msg)
elif key in obj:
msg = '%s has bad value: ' % args[0], args[1].__getattr__(args[0])
super(ParameterError, self).__init__(msg)
``` |
{
"source": "jpvantassel/tictactoe",
"score": 3
} |
#### File: jpvantassel/tictactoe/plot.py
```python
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import unittest
def determinecell(board, pt, bounds_x, bounds_y):
# Determine X cell
for col in range(len(bounds_x)-1):
if ((pt[0] > bounds_x[col]) & (pt[0] < bounds_x[col+1])):
break
# Determine Y cell
for row in range(len(bounds_y)-1):
if ((pt[1] > bounds_y[row]) & (pt[1] < bounds_y[row+1])):
break
# Check if cell is empty and its a allowable move
good_move = True if board[row][col] == 0 else False
return (row, col, good_move)
def updateplot(s):
print(s)
plt.title(s, fontsize=16)
plt.draw()
def plot_state(board, ax=None):
x = [1, 2, 3]
y = [3, 2, 1]
ax_was_provided = True if ax is not None else False
if ax == None:
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(5, 5))
fig.canvas.manager.window.wm_geometry("+500+100")
# Show current board state
cmap = plt.get_cmap("plasma")
# colors = [["#deeaee"], #violet
# ["#b1cbbb"], # pink
# ["#eea29a"]] # green
# colors = tuple(int(colors[i:i+2], 16) for i in (0, 2, 4)))
# cmap = mpl.colors.ListedColormap(colors)
ax.imshow(board, cmap=cmap)
linewidth = 5
ax.plot([0.5, 0.5], [-0.5, 2.5], '-k', linewidth=linewidth)
ax.plot([1.5, 1.5], [-0.5, 2.5], '-k', linewidth=linewidth)
ax.plot([-.5, 2.5], [0.5, 0.5], '-k', linewidth=linewidth)
ax.plot([-.5, 2.5], [1.5, 1.5], '-k', linewidth=linewidth)
# Loop over data dimensions and create text showing the values.
for row in range(len(x)):
for col in range(len(y)):
if board[row][col] == 1:
txt = 'X'
elif board[row][col] == 2:
txt = 'O'
else:
txt = ' '
ax.text(col, row, txt, ha="center", va="center",
color="k", size=50)
ax.axis('off')
return (ax) if ax_was_provided else (fig, ax)
def plot_utility(utility, ax=None):
x = [1, 2, 3]
y = [3, 2, 1]
if ax is None:
_, ax = plt.subplots(nrows=1, ncols=1, figsize=(5, 5))
cmap = plt.get_cmap("winter")
ax.imshow(utility, cmap=cmap)
# Loop over data dimensions and create text showing the value.
for row in range(len(y)):
for col in range(len(x)):
if utility[row, col] == -np.inf:
txt = ' '
else:
txt = int(utility[row, col])
ax.text(col, row, txt, ha="center", va="center",
color="k", size=30)
ax.set_title("Board Utility")
ax.axis('off')
return ax
def plotforhuman(board, current_player):
bounds_x = [-0.5, 0.5, 1.5, 2.5]
bounds_y = [-0.5, 0.5, 1.5, 2.5]
_, ax = plot_state(board)
updateplot('Click Anywhere to Begin')
plt.waitforbuttonpress()
good_move = False
txt = 'X' if current_player == 1 else 'O'
while True:
updateplot('Select the Desired Cell')
while not good_move:
pt = plt.ginput(1, timeout=-1)
if len(pt) == 0:
good_move = False
else:
row, col, good_move = determinecell(board, pt[-1],
bounds_x, bounds_y)
if not good_move:
updateplot('Move is not Allowed. Select Again')
text = ax.text(col, row, txt, ha="center", va="center",
color="k", size=50)
updateplot('Happy with Move? Key=Yes, Mouse=No.')
if plt.waitforbuttonpress():
break
text.remove()
good_move = False
return (row, col)
def view_state_and_utility(state, utility):
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
plot_state(state, axs[0])
plot_utility(utility, axs[1])
fig.tight_layout()
plt.show()
class Test_Plot(unittest.TestCase):
def test_determinecell(self):
bounds_x = [-0.5, 0.5, 1.5, 2.5]
bounds_y = [-0.5, 0.5, 1.5, 2.5]
board = np.array([[1, 2, 0], [0, 0, 0], [1, 2, 2]])
pt = (0, 0)
row, col, good = determinecell(board, pt, bounds_x, bounds_y)
self.assertEqual(row, 0)
self.assertEqual(col, 0)
self.assertFalse(good)
pt = (1, 1)
row, col, good = determinecell(board, pt, bounds_x, bounds_y)
self.assertEqual(row, 1)
self.assertEqual(col, 1)
self.assertTrue(good)
pt = (2, 2)
row, col, good = determinecell(board, pt, bounds_x, bounds_y)
self.assertEqual(row, 2)
self.assertEqual(col, 2)
self.assertFalse(good)
def test_plotforhuman(self):
# Test move for player 1
board = np.array([[1, 2, 0], [0, 0, 0], [1, 2, 2]])
row, col = plotforhuman(board, 1)
self.assertEqual(row, 1)
self.assertEqual(col, 0)
# Test move for player 2
board = np.array([[1, 2, 0], [0, 0, 0], [1, 2, 2]])
row, col = plotforhuman(board, 2)
self.assertEqual(row, 1)
self.assertEqual(col, 1)
if __name__ == "__main__":
unittest.main()
```
#### File: jpvantassel/tictactoe/updateutility.py
```python
import numpy as np
import unittest
from transform import board_transform, board_itransform
from evalboard import diffuse_utility, nd3_to_tuple, tuple_to_nd3
def update_utility(boards_played, game_boards, game_moves,
player, winner, loser, reward, punishment,
flag_indirect=True):
# Apply direct reward/punishment to player
for key, move in zip(game_boards[player], game_moves[player]):
stimulis = reward if player is winner else punishment
boards_played[player][key][move[0]][move[1]] += stimulis
# Apply indirect reward/punishment based on opponent's movement
if flag_indirect:
opponent = loser if player == winner else winner
stimulis = punishment if player == winner else reward
for key, move in zip(game_boards[opponent], game_moves[opponent]):
row, col = move
# Check if board (or transformation) exists in player
for trans_number in range(8):
trans_state = board_transform(tuple_to_nd3(key), trans_number)
flip_trans_state = flip_player(trans_state)
test_key = nd3_to_tuple(flip_trans_state)
# If it it, update.
if test_key in boards_played[player]:
boards_played[player][test_key][row][col] += stimulis
break
# If not, append.
else:
state = tuple_to_nd3(key)
flipped_state = flip_player(state)
utility = diffuse_utility(flipped_state, p1=player, p2=opponent)
utility[row][col] += stimulis
boards_played[player].update({nd3_to_tuple(flipped_state): utility})
return boards_played
def flip_player(state, p1=1, p2=2):
p1_rows, p1_cols = np.where(state == p1)
p2_rows, p2_cols = np.where(state == p2)
for row, col in zip(p1_rows, p1_cols):
state[row][col] = p2
for row, col in zip(p2_rows, p2_cols):
state[row][col] = p1
return state
class Test_UpdateUtility(unittest.TestCase):
def assertArrayEqual(self, array1, array2):
self.assertListEqual(array1.tolist(), array2.tolist())
def test_flip_player(self):
state = np.array([[1, 2, 1], [2, 1, 2], [1, 2, 1]])
flip_true = np.array([[2, 1, 2], [1, 2, 1], [2, 1, 2]])
flip_test = flip_player(state, p1=1, p2=2)
self.assertArrayEqual(flip_true, flip_test)
def test_update(self):
x = -1*np.inf
boards_played = {1: {((1, 2, 0),
(2, 0, 0),
(1, 2, 1)): np.array([[x, x, 0],
[x, 0, 0],
[x, x, x]])},
2: {((1, 2, 0),
(2, 0, 1),
(1, 2, 1)): np.array([[x, x, 0],
[x, 0, x],
[x, x, x]])}}
game_boards = {1: [((1, 2, 0), (2, 0, 0), (1, 2, 1))],
2: [((1, 2, 0), (2, 0, 1), (1, 2, 1))]}
game_moves = {1: [(1, 2)],
2: [(1, 1)]}
winner = 2
loser = 1
reward = 1
punishment = -1
# Update utlity for player 1 -> i.e. the loser
boards = update_utility(boards_played, game_boards, game_moves,
1, winner, loser, reward, punishment, flag_indirect=True)
# Check loss was punished.
self.assertArrayEqual(boards[loser][game_boards[loser][0]],
np.array([[x, x, 0], [x, 0, -1], [x, x, x]]))
# Check that observed win was rewarded.
winning_board = nd3_to_tuple(flip_player(np.array(game_boards[winner][0])))
self.assertArrayEqual(boards[loser][winning_board],
np.array([[x, x, 0], [x, 1, x], [x, x, x]]))
# Update utility for player 2 -> i.e. the winner
updated_boards_played = update_utility(boards_played, game_boards, game_moves,
2, winner, loser, reward, punishment, flag_indirect=True)
# Check win was rewarded.
self.assertArrayEqual(updated_boards_played[winner][game_boards[winner][0]],
np.array([[x, x, 0], [x, 1, x], [x, x, x]]))
# Check that observed loss was punished.
losing_board = nd3_to_tuple(flip_player(tuple_to_nd3(game_boards[loser][0])))
self.assertArrayEqual(updated_boards_played[winner][losing_board],
np.array([[x, x, 0], [x, 0, -1], [x, x, x]]))
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "jpvelsamy/hotdog",
"score": 2
} |
#### File: jpvelsamy/hotdog/askjunoapi.py
```python
import logging
import pandas as pd
import numpy as np
from pandas import DataFrame
from tensorflow import keras
from hotdogconfig import Configuration
logger = logging.getLogger("ACE")
class AskJunoAPI:
def __init__(self, config_object: Configuration):
self.config_object = config_object
# self.features = ['reach','impressions','results','amount','frequency','clicks','cpc','ctr','cpreach','cpm','engagement']
eval_features_config = self.config_object.get_cpl_features()
self.eval_features = eval_features_config.split(",")
test_features_config = self.config_object.get_test_feature_names()
self.test_features = test_features_config.split(",")
self.model_save_path = self.config_object.get_model_save_path() + '/ace_cpl.h5'
self.outcome_file = self.config_object.get_cpl_outcome()
# https://www.tensorflow.org/guide/keras/save_and_serialize
def restore_model(self):
self.model = keras.models.load_model(self.model_save_path)
def test(self, sigma_folder):
source_file_as_sigma = sigma_folder + self.config_object.get_path_separator() + self.config_object.get_input_file_name()
source_data:DataFrame = pd.read_csv(source_file_as_sigma, engine='c', dtype='float64', names=self.test_features, header=0,
skiprows=0)
cpl_data_frame:DataFrame = pd.read_csv(source_file_as_sigma, engine='c', dtype='float64', names=['cpr'], header=0,
skiprows=0)
cpl_mean = cpl_data_frame.mean(axis=0)
cpl_std = cpl_data_frame.std(axis=0)
mean = source_data.mean(axis=0)
std = source_data.std(axis=0)
test_file_as_gamma = sigma_folder + self.config_object.get_path_separator() + self.config_object.get_test_file_name()
test_data:DataFrame = pd.read_csv(test_file_as_gamma, engine='c', dtype='float64', names=self.test_features, header=0,
skiprows=0)
new_test_data:DataFrame = pd.read_csv(test_file_as_gamma, engine='c', dtype='float64', names=self.test_features, header=0,
skiprows=0)
new_test_data -= mean
new_test_data /= std
logger.info(f'inbound test_data # {test_data.head}')
outcome = self.model.predict(new_test_data)
logger.info(f'outcome before synthesizing# {outcome}')
index = ['Row_' + str(i)
for i in range(1, len(outcome) + 1)]
# defining column headers for the
# Pandas dataframe
columns = ['Column_' + str(i)
for i in range(1, len(outcome[0]) + 1)]
#np.multiply(outcome, cpl_std.get(key='cpr'))
#np.add(outcome, cpl_mean.get(key='cpr'))
df_outcome = pd.DataFrame(outcome, index=index, columns=columns)
df_out = pd.merge(test_data, df_outcome, how='left', left_index=True, right_index=True)
df_out.to_csv(
self.outcome_file + self.config_object.get_path_separator() + self.config_object.get_test_file_name() + '_outcome.csv',
float_format='%.2f')
``` |
{
"source": "jpvelsamy/tubby",
"score": 2
} |
#### File: tubby/python/jamie.py
```python
import logging
import os
import sys
import subprocess
import pwd
import io
import jamcommons
import spur
import spur.ssh
import time
logger = logging.getLogger("Jam-Commcenter")
class Jamie:
def __init__(self, configObject):
self.configObject=configObject
self.machineList=self.configObject.getMachineList()
self.destFolder=self.configObject.getCheckoutDirectory()
self.user=self.configObject.getDeployUser()
self.archiveFolder=self.configObject.getArchiveFolder()
self.machineListArr=self.machineList.split(',')
self.privateKey=self.configObject.getPrivateKeyPath()
self.buildCommand =self.configObject.getJamieMavenBuildCommand()
self.startCommand=self.configObject.getJamieMavenStartCommand()
self.serverName=self.configObject.getJamieServerName()
self.gitUrl=self.configObject.getJamieGit()
self.remoteHome=self.configObject.getHomefolder()
self.branch=self.configObject.getBranch()
def installjamie(self):
logger.info("Starting to perform jamie install sequence using user %s with privateKey %s",self.user, self.privateKey)
for machine in self.machineListArr:
logger.info("Initiating installation seq(pushcode->stopapp->build->startapp) for %s",machine)
self.pushstopScript(machine)
logger.info("Completed pushing commands and configuration for %s and initiating install",machine)
self.install(machine)
logger.info("Completed jamie install sequence using user %s with privateKey %s",self.user, self.privateKey)
def startjamie(self):
logger.info("Starting to perform jamie-START using user %s with private key %s", self.user, self.privateKey)
for machine in self.machineListArr:
logger.info("Pushing remote script for %s",machine)
self.pushstopScript(machine)
logger.info("Completed pushing commands and configuration for %s and initiating START",machine)
self.start(machine)
logger.info("Started jamie in machine %s", machine)
logger.info("Completed start sequence for jamie-START using user %s with private key %s", self.user, self.privateKey)
def stopjamie(self):
logger.info("Starting to perform jamie-STOP using user %s with private key %s", self.user, self.privateKey)
for machine in self.machineListArr:
logger.info("Pushing remote script for %s",machine)
self.pushstopScript(machine)
logger.info("Completed pushing commands and configuration for %s and initiating STOP",machine)
self.stop(machine)
logger.info("Stopped jamie in machine %s", machine)
logger.info("Completed stop sequence for jamie-STOP using user %s with private key %s", self.user, self.privateKey)
def pushstopScript(self,machine):
scpCommand="scp -v python/remote.jam.py ./jam.cfg "+self.user+"@"+machine+":"+self.remoteHome+os.sep
logger.info("Executing scp command %s for machine %s",scpCommand,machine)
jamcommons.makeoscall(scpCommand)
return self
def install(self,machine):
command=["python2.7","remote.jam.py","--command","installjamie","--serverName","in.juno.bonsaicrm.Application","--branch ",self.branch,"--targetfolder",self.destFolder]
logger.info("Install sequence jamie application in machine %s using command %s",machine, command)
self.executeRemoteCommand(command, machine)
return self
def stop(self,machine):
command=["python2.7","remote.jam.py","--command","stopjamie","--serverName","in.juno.bonsaicrm.Application","--targetfolder",self.destFolder]
logger.info("Stopping jamie application in machine %s using command %s once",machine, command)
self.executeRemoteCommand(command, machine)
logger.info("Stopping jamie application in machine %s using command %s twice",machine, command)
self.executeRemoteCommand(command, machine)
logger.info("Stopping jamie application in machine %s using command %s thrice",machine, command)
self.executeRemoteCommand(command, machine)
return self
def build(self,machine):
command=["python2.7", "remote.jam.py", "--command", "buildjamie","--branch",self.branch,"--targetfolder",self.destFolder,"--giturl ",self.gitUrl]
logger.info("Building jamie application in machine %s using command %s",machine,command)
self.executeRemoteCommand(command, machine)
return self
def start(self,machine):
command=["python2.7", "remote.jam.py", "--command", "startjamie","--serverName","in.juno.bonsaicrm.Application","--targetfolder",self.destFolder]
logger.info("Starting jamie application in machine %s using command %s",machine,command)
self.executeRemoteCommand(command, machine)
return self
def executeRemoteCommand(self, command, machine):
logger.info("Executing command %s in machine %s",command,machine)
try:
shell=self.createConnection(machine)
with shell:
result=shell.run(command,cwd=self.remoteHome,allow_error=True)
response=result.output
logger.info(response)
except spur.ssh.ConnectionError as error:
print(error.original_traceback)
raise
return self
#this method is duplicated, it cannot be done this way, please refactor it
def createConnection(self, machine):
rShell = spur.SshShell(hostname=machine,username=self.user,private_key_file=self.privateKey)
return rShell
```
#### File: tubby/python/jam.py
```python
import os
import sys
import subprocess
import pwd
import ConfigParser
import io
import logging
import logging.handlers
import ConfigParser
import argparse
from jamconfig import Configuration
from jamprompt import JamPrompt
LOG_FILE_NAME="jam.log"
CFG_FILE_NAME="jam.cfg"
logger = logging.getLogger("Jam-Commcenter")
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger.setLevel(logging.DEBUG)
fileHandler = logging.handlers.RotatingFileHandler(LOG_FILE_NAME,
maxBytes=1000000,
backupCount=5
)
fileHandler.setLevel(logging.INFO)
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.DEBUG)
consoleHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)
#Refered this link - http://pymotw.com/2/argparse/
def readConfig(configFile):
config = ConfigParser.ConfigParser()
if(configFile and not configFile.isspace()):
if os.path.exists(configFile):
config.read(configFile)
else:
raise RuntimeError("If you intend to use your own config file"+
"(by default commcenter has its own config file),"+
"then ensure the path is correct."+
"The config file path you gave is absent="+configFile)
else:
config.read(CFG_FILE_NAME)
return config
def loadParsedArgs():
parser = argparse.ArgumentParser(description='Humingo Command center application')
parser.add_argument('-c','--command',
action="store",
dest="command",
help="Possible commands(installcandyman,pushcandymanpatch,startcandyman,stopcandyman,healthcheck,stopjamie,startjamie)",
required=False)
parser.add_argument('-m','--machine',
action="store",
dest="machineList",
help="If you want to specifically target a machine(s),"+
"then use this to key in the ip address",
required=False)
parser.add_argument('-f','--config-file',
action="store",
dest="configFilePath",
help ="Provide the configuration file path,"+
"by default it is not required as it will be present along "+
"with the commandcenter module itself",
required=True)
return parser
def initialize(commandLineConfig):
configFile = commandLineConfig.configFilePath
config = readConfig(configFile)
configObj=Configuration(commandLineConfig,config)
return configObj
def main():
logger.info("Booting Comm center ")
parser = loadParsedArgs()
inputConfig=parser.parse_args()
configObj = initialize(inputConfig)
logger.info("Initialized configuration object %s",configObj)
logger.info("Starting the repl for %s", configObj.getMachineList())
prompt = JamPrompt()
prompt.configObj=configObj
prompt.prompt = '> '
prompt.cmdloop('Starting prompt...')
main()
``` |
{
"source": "JPVentura135/astropy",
"score": 3
} |
#### File: astropy/visualization/time.py
```python
import numpy as np
from datetime import datetime
from astropy.time import Time
from astropy import units as u
__all__ = ['time_support']
__doctest_requires__ = {'time_support': ['matplotlib']}
UNSUPPORTED_FORMATS = ('datetime', 'datetime64')
YMDHMS_FORMATS = ('fits', 'iso', 'isot', 'yday')
STR_FORMATS = YMDHMS_FORMATS + ('byear_str', 'jyear_str')
def time_support(*, scale=None, format=None, simplify=True):
"""
Enable support for plotting `astropy.time.Time` instances in
matplotlib.
May be (optionally) used with a ``with`` statement.
>>> import matplotlib.pyplot as plt
>>> from astropy import units as u
>>> from astropy import visualization
>>> with visualization.time_support(): # doctest: +IGNORE_OUTPUT
... plt.figure()
... plt.plot(Time(['2016-03-22T12:30:31', '2016-03-22T12:30:38', '2016-03-22T12:34:40']))
... plt.draw()
Parameters
----------
scale : str, optional
The time scale to use for the times on the axis. If not specified,
the scale of the first Time object passed to Matplotlib is used.
format : str, optional
The time format to use for the times on the axis. If not specified,
the format of the first Time object passed to Matplotlib is used.
simplify : bool, optional
If possible, simplify labels, e.g. by removing 00:00:00.000 times from
ISO strings if all labels fall on that time.
"""
import matplotlib.units as units
from matplotlib.ticker import MaxNLocator, ScalarFormatter
from astropy.visualization.wcsaxes.utils import select_step_hour, select_step_scalar
class AstropyTimeLocator(MaxNLocator):
# Note: we default to AutoLocator since many time formats
# can just use this.
def __init__(self, converter, *args, **kwargs):
kwargs['nbins'] = 4
super().__init__(*args, **kwargs)
self._converter = converter
def tick_values(self, vmin, vmax):
# Where we put the ticks depends on the format we are using
if self._converter.format in YMDHMS_FORMATS:
# If we are here, we need to check what the range of values
# is and decide how to find tick locations accordingly
vrange = vmax - vmin
if (self._converter.format != 'yday' and vrange > 31) or vrange > 366: # greater than a month
# We need to be careful here since not all years and months have
# the same length
# Start off by converting the values from the range to
# datetime objects, so that we can easily extract the year and
# month.
tmin = Time(vmin, scale=self._converter.scale, format='mjd').datetime
tmax = Time(vmax, scale=self._converter.scale, format='mjd').datetime
# Find the range of years
ymin = tmin.year
ymax = tmax.year
if ymax > ymin + 1: # greater than a year
# Find the step we want to use
ystep = int(select_step_scalar(max(1, (ymax - ymin) / 3)))
ymin = ystep * (ymin // ystep)
# Generate the years for these steps
times = []
for year in range(ymin, ymax + 1, ystep):
times.append(datetime(year=year, month=1, day=1))
else: # greater than a month but less than a year
mmin = tmin.month
mmax = tmax.month + 12 * (ymax - ymin)
mstep = int(select_step_scalar(max(1, (mmax - mmin) / 3)))
mmin = mstep * max(1, mmin // mstep)
# Generate the months for these steps
times = []
for month in range(mmin, mmax + 1, mstep):
times.append(datetime(year=ymin + month // 12,
month=month % 12, day=1))
# Convert back to MJD
values = Time(times, scale=self._converter.scale).mjd
elif vrange > 1: # greater than a day
self.set_params(steps=[1, 2, 5, 10])
values = super().tick_values(vmin, vmax)
else:
# Determine ideal step
dv = (vmax - vmin) / 3 * 24 << u.hourangle
# And round to nearest sensible value
dv = select_step_hour(dv).to_value(u.hourangle) / 24
# Determine tick locations
imin = np.ceil(vmin / dv)
imax = np.floor(vmax / dv)
values = np.arange(imin, imax + 1, dtype=np.int64) * dv
else:
values = super().tick_values(vmin, vmax)
# Get rid of values outside of the input interval
values = values[(values >= vmin) & (values <= vmax)]
return values
def __call__(self):
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
class AstropyTimeFormatter(ScalarFormatter):
def __init__(self, converter, *args, **kwargs):
super().__init__(*args, **kwargs)
self._converter = converter
self.set_useOffset(False)
self.set_scientific(False)
def __call__(self, value, pos=None):
# Needed for Matplotlib <3.1
if self._converter.format in STR_FORMATS:
return self.format_ticks([value])[0]
else:
return super().__call__(value, pos=pos)
def format_ticks(self, values):
if len(values) == 0:
return []
if self._converter.format in YMDHMS_FORMATS:
times = Time(values, format='mjd', scale=self._converter.scale)
formatted = getattr(times, self._converter.format)
if self._converter.simplify:
if self._converter.format in ('fits', 'iso', 'isot'):
if all([x.endswith('00:00:00.000') for x in formatted]):
split = ' ' if self._converter.format == 'iso' else 'T'
formatted = [x.split(split)[0] for x in formatted]
elif self._converter.format == 'yday':
if all([x.endswith(':001:00:00:00.000') for x in formatted]):
formatted = [x.split(':', 1)[0] for x in formatted]
return formatted
elif self._converter.format == 'byear_str':
return Time(values, format='byear', scale=self._converter.scale).byear_str
elif self._converter.format == 'jyear_str':
return Time(values, format='jyear', scale=self._converter.scale).jyear_str
else:
return super().format_ticks(values)
class MplTimeConverter(units.ConversionInterface):
def __init__(self, scale=None, format=None, simplify=None):
super().__init__()
self.format = format
self.scale = scale
self.simplify = simplify
# Keep track of original converter in case the context manager is
# used in a nested way.
self._original_converter = units.registry.get(Time)
units.registry[Time] = self
@property
def format(self):
return self._format
@format.setter
def format(self, value):
if value in UNSUPPORTED_FORMATS:
raise ValueError('time_support does not support format={0}'.format(value))
self._format = value
def __enter__(self):
return self
def __exit__(self, type, value, tb):
if self._original_converter is None:
del units.registry[Time]
else:
units.registry[Time] = self._original_converter
def default_units(self, x, axis):
if isinstance(x, tuple):
x = x[0]
if self.format is None:
self.format = x.format
if self.scale is None:
self.scale = x.scale
return 'astropy_time'
def convert(self, value, unit, axis):
"""
Convert a Time value to a scalar or array.
"""
# For Matplotlib < 2.2
if not isinstance(value, Time):
return value
scaled = getattr(value, self.scale)
if self.format in YMDHMS_FORMATS:
return scaled.mjd
elif self.format == 'byear_str':
return scaled.byear
elif self.format == 'jyear_str':
return scaled.jyear
else:
return getattr(scaled, self.format)
def axisinfo(self, unit, axis):
"""
Return major and minor tick locators and formatters.
"""
majloc = AstropyTimeLocator(self)
majfmt = AstropyTimeFormatter(self)
return units.AxisInfo(majfmt=majfmt,
majloc=majloc,
label='Time ({0})'.format(self.scale))
return MplTimeConverter(scale=scale, format=format, simplify=simplify)
``` |
{
"source": "jpventura/BikeShareAnalysis",
"score": 3
} |
#### File: jpventura/BikeShareAnalysis/babs_datacheck.py
```python
import numpy as np
import pandas as pd
from babs_visualizations import usage_stats
def question_3(data):
"""
This function will check that the sample data has been wrangled properly.
"""
n_correct = 0
# Check that there are a correct number of lines in the dataset.
if data.shape[0] != 27345:
print("Eram esperados 27,345 pontos de dados, Encontrados apenas {:d}.".format(data.shape[0]))
else:
n_correct += 1
# Check that the durations have been converted into terms of minutes.
data_duration_stats = usage_stats(data, verbose = False)
expected_duration_stats = np.array([6.816667, 10.716667, 17.28333])
if not np.allclose(data_duration_stats, expected_duration_stats):
print("Os dados de duração não batem com o esperado (em minutos).")
if np.allclose(data_duration_stats, np.array([409, 643, 1037])):
print(" Parece que as unidades ainda se encontram em segundos.")
elif np.allclose(data_duration_stats, np.array([24520, 38580, 62220])):
print(" Parece que você usou o operador matemático errado para a sua conversão.")
print(" Lembre-se que existem 60 segundos em um minuto.")
else:
n_correct += 1
# Check that the timestamps have been wrangled properly.
expected_time_vals = {'start_month': [25243, 2102],
'start_hour': [2851, 2291, 2219, 2171, 2131, 1976,
1833, 1799, 1791, 1644, 1359, 1269,
1071, 797, 644, 440, 394, 276,
153, 65, 55, 45, 42, 29],
'weekday': [4712, 4493, 4370, 3860, 3637, 3138, 3135]}
for column in expected_time_vals.keys():
col_data = data[column].value_counts().values
n_values = len(col_data)
n_values_expected = len(expected_time_vals[column])
if not n_values == n_values_expected:
print("Número errado de valores únicos encontrados para a coluna : {}".format(column))
print(" {:d} valores únicos esperados; {:d} valores encontrados.".format(n_values_expected, n_values))
elif not np.array_equal(col_data, expected_time_vals[column]):
expected_max = expected_time_vals[column][0]
expected_min = expected_time_vals[column][-1]
print("Contagem de valores erradas para a coluna: {}".format(column))
print(" Valor mais comum esperado {:d} pontos de dados; {:d} viagens encontradas.".format(expected_max, col_data[0]))
print(" Valor menos esperado {:d} pontos de dados; {:d} viagens enconrtadas.".format(expected_min, col_data[-1]))
else:
n_correct += 1
if n_correct == len(expected_time_vals.keys()) + 2:
print("Todas as contagens estão como esperadas.")
``` |
{
"source": "jpventura/weather-app",
"score": 3
} |
#### File: apps/weather/serializers.py
```python
from rest_framework import serializers
from .models import Weather, Temperature
ISO_DATE_FORMAT = '%Y-%m-%d'
TEMPERATURE_MIN = -273.15
TEMPERATURE_MAX = 122.00
class TemperatureSerializer(serializers.BaseSerializer):
def update(self, instance, validated_data):
Temperature.objects.update(**validated_data)
def __init__(self, *args, **kwargs):
super(TemperatureSerializer, self).__init__(*args, **kwargs)
def to_internal_value(self, data):
if type(data) in [int, float, str]:
data = {'actual': float(data)}
if 'actual' not in data:
raise serializers.ValidationError({
'actual': 'This field is required a temperature.'
})
return data
def to_representation(self, instance):
return instance.actual
def create(self, validated_data):
return Temperature.objects.create(**validated_data)
class WeatherSerializer(serializers.ModelSerializer):
temperatures = TemperatureSerializer(many=True)
class Meta:
model = Weather
fields = ('id', 'city', 'date', 'state', 'lat', 'lon', 'temperatures')
def create(self, validated_data):
temperatures = validated_data.pop('temperatures')
weather = Weather.objects.create(**validated_data)
for t in temperatures:
Temperature.objects.create(
weather=weather,
**WeatherSerializer.format_temperature(t)
)
return weather
@classmethod
def format_temperature(cls, temperature):
return {'actual': float(temperature)} if type(temperature) in [int, float, str] else temperature
```
#### File: weather/tests/__init__.py
```python
from copy import deepcopy
from django.urls import reverse
from os import path
from rest_framework import status
from rest_framework.test import APITestCase
import json
FIXTURES_DIR = path.join(
path.dirname(path.dirname(path.abspath(__file__))),
'fixtures'
)
RESPONSE_EMPTY_PAGE = {
'count': 0,
'next': None,
'previous': None,
'results': []
}
class WeatherAPTestCase(APITestCase):
def setUp(self):
self.url = reverse('weather-list')
self.response = deepcopy(RESPONSE_EMPTY_PAGE)
with open(path.join(FIXTURES_DIR, 'weather.json')) as f:
self.body = json.load(f)
self.response['results'].extend([self.client.post(self.url, w, format='json').data for w in self.body])
self.response['count'] = len(self.response['results'])
``` |
{
"source": "jpvergnes/pybrgmlib",
"score": 2
} |
#### File: pybrgmlib/src/era5.py
```python
import os
import csv
import secrets
import datetime
from collections import defaultdict
import numpy as np
import pandas as pd
import rasterio
import rasterio.warp as warp
import cdsapi
import eccodes as ecs
"""
Packages requis :
- numpy
- pandas
- rasterio (dépend de gdal - librairie C à installer via gestionnaire de package) :
utile pour la reprojection vers la grille SAFRAN
- cdsapi (les instructions se trouvent ici :https://cds.climate.copernicus.eu/api-how-to)
- eccodes (dispo sur pypi - librairie C à installer via gestionnaire de package)
Lors de l'exécution des programmes, un dossier "cache" est créé dans le répertoire
avec un fichier .cacherc et des fichiers grib téléchargés. Ce fichier.cacherc permet
la mise en correspondance d'une requête avec ledit fichier grib. Si on demande la même requête,
le fichier ne sera pas retéléchargé car déjà présent dans le cache. Compte tenu du nombre
de requêtes possibles, j'ai trouvé ce système de requête - nom de fichier.grib (en hexadecimal)
utile pour ne pas avoir des noms à ralonge.
"""
class AbstractReproj(object):
"""
Outil pour reprojeter d'une projection (src) à une autre (dst)
"""
src_transform = ''
src_crs = ''
src_grid = {}
def __init__(self, product):
"""
Argument
- product : nom du produit du climate data store
"""
self.product = product
def set_dst_grid(self, Xeast, Ysouth, Xwest, Ynorth, NX, NY, crs):
"""
Configure la grille de destination
Arguments :
- Xeast : limite est
- Ysouth : limite sud
- Xwest : limite ouest
- Ynorth : limite nord
- NX : nombre de pixels en X
- NY : nombre de pixels en Y
- crs : système de projection de coordonnées, en format
dictionnaire de rasterio (exemple : {'init':'EPSG:4326'})
"""
self.dst_transform = rasterio.transform.from_bounds(
Xeast, Ysouth, Xwest, Ynorth, NX, NY
)
self.dst_grid = {}
self.dst_grid['Xeast'] = Xeast
self.dst_grid['Ysouth'] = Ysouth
self.dst_grid['Xwest'] = Xwest
self.dst_grid['Ynorth'] = Ynorth
self.dst_grid['NX'] = NX
self.dst_grid['NY'] = NY
self.dst_crs = crs
def reproject_to_dst(self, src, dst, src_nodata=9999., dst_nodata=9999.):
"""
Reprojection de src à dst
Arguments:
- src : 2D or 3D np.array ou rasterio band
- dst : 2D or 3D np.array ou rasterio band
- src_nodata : valeurs nodata dans src
- dst_nodata : valeurs nodata dans dst
"""
new_dst = dst.copy()
warp.reproject(
src,
new_dst,
src_transform=self.src_transform,
src_crs=self.src_crs,
dst_transform=self.dst_transform,
dst_crs=self.dst_crs,
resampling=warp.Resampling.bilinear,
src_nodata=src_nodata,
dst_nodata=dst_nodata,
)
new_dst = np.where(dst == dst_nodata, dst_nodata, new_dst)
return new_dst
def drop_nodata(self, df, zones):
"""
Supprime les zones à 9999.
Arguments :
- df : pd.DataFrame avec l'ensemble des zones en colonnes
- zones : zones à garder/supprimer
"""
indices, = np.where(zones.flatten() != 9999.)
indices = indices + 1
df = df.loc[:, (slice(None), indices)]
df = df.sort_index(axis=1)
variables = df.columns.get_level_values('variable').unique()
columns = pd.MultiIndex.from_product(
[variables, list(range(1, len(indices) + 1))]
)
df.columns = columns
return df
def write_year_in_csv(self, df, year, variable, hydro_year=False, **kwargs):
"""
Ecriture en format csv
Arguments
- df: dataframe à écrire
- year: année à écrire
- variable : variable à écrire
- hydro_year : si année hydrologique ou pas
- **kwargs : dictionnaires d'arguments fournis à df.to_csv
"""
df = df.round(2)
if hydro_year:
start = '{0}-8-1'.format(year)
end = '{0}-7-31'.format(year + 1)
filename = 'csv_files/{0}_{1}_{2}_{3}.csv'.format(
self.product, variable, year, year+1
)
else:
start = '{0}-1-1'.format(year)
end = '{0}-12-31'.format(year)
filename = 'csv_files/{0}_{1}_{2}.csv'.format(
self.product, variable, year
)
if not os.path.isdir('csv_files'):
os.makedirs('csv_files')
df = df.loc[start:end, variable]
df.index.name = len(df.columns) + 1
df = df.reset_index().sort_index(axis=1)
df.to_csv(
filename, index=None, sep=' ', float_format='%.2f', **kwargs
)
def read_year_in_csv(self, year, variable):
"""
Lecture d'un fichier csv
Arguments :
- year: année à lire
- variable : variable à lire
"""
filename = 'csv_files/{0}_{1}_{2}.csv'.format(
self.product, variable, year
)
df = pd.read_csv(
filename, delim_whitespace=True, index_col=-1, parse_dates=True,
quoting=csv.QUOTE_NONE
)
df.columns = list(map(int, df.columns))
return df
def write_monthly_mean_raster(self, df, variable, grid, src_crs, src_transform, suffix):
"""
Ecriture d'une moyenne mensuelle en raster
Arguments :
- df : pd.DataFrame des variables
- variable : variable à écrire
- grid : grille correspondante
- src_crs : système de projection de la grille
- src_transform : transformation de la grille
- suffix : suffixe au nom de variable
"""
if not os.path.isdir('rasters'):
os.makedirs('rasters')
df = df.loc[:, variable]
mask = df == 9999.
cols = mask.all()[mask.all()].index
dfmonth = df.resample('M').mean()
dfmonth.loc[:, cols] = 9999.
assert dfmonth.iloc[0, :].shape[0] == grid['NY']*grid['NX']
for _, row in dfmonth.iterrows():
src = row.values.reshape(grid['NY'], grid['NX'])
save_raster_as_tiff(
'rasters/{0}_{1}_{2}_{3}_{4}.tiff'.format(
self.product, suffix, variable,
row.name.year, row.name.month
),
src, src_crs, src_transform
)
def write_dst_monthly_mean_raster(self, df, variable):
"""
Ecriture d'une moyenne mensuelle de dst
Arguments :
- df : pd.DataFrame des variables
- variable : variable à écrire
"""
self.write_monthly_mean_raster(
df, variable, self.dst_grid, self.dst_crs, self.dst_transform, 'dst'
)
def write_src_monthly_mean_raster(self, df, variable):
"""
Ecriture d'une moyenne mensuelle de src
Arguments :
- df : pd.DataFrame des variables
- variable : variable à écrire
"""
self.write_monthly_mean_raster(
df, variable, self.src_grid, self.src_crs, self.src_transform, 'src'
)
class EOBS(AbstractReproj):
def set_src_grid_from_netcdf(self, filename, nc_lon='longitude', nc_lat='latitude'):
rootgrp = Dataset(filename)
lats = rootgrp[nc_lat]
lons = rootgrp[nc_lon]
self.src_grid = {}
self.src_grid['Xwest'] = np.round(lons[0], 2)
self.src_grid['Xeast'] = np.round(lons[-1], 2)
self.src_grid['Ysouth'] = np.round(lats[0], 2)
self.src_grid['Ynorth'] = np.round(lats[-1], 2)
self.src_grid['NX'] = len(lons)
self.src_grid['NY'] = len(lats)
self.src_grid['Xres'] = np.round((lons[-1]-lons[0])/len(lons), 2)
self.src_grid['Yres'] = np.round((lats[-1]-lats[0])/len(lats), 2)
self.src_transform = rasterio.transform.from_bounds(
self.src_grid['Xwest'] - self.src_grid['Xres'] / 2.,
self.src_grid['Ysouth'] - self.src_grid['Yres'] / 2.,
self.src_grid['Xeast'] + self.src_grid['Xres'] / 2.,
self.src_grid['Ynorth'] + self.src_grid['Yres'] / 2.,
self.src_grid['NX'], self.src_grid['NY']
)
self.src_crs = {'init':'EPSG:4326'}
class ERA5(AbstractReproj):
def __init__(self, *, product, area, cum_period=1):
"""
Arguments
- product : nom du produit à demander à cdsapi
- area : domaine demandé
- cum_period : période sur laquelle les variables sont cumulés (en heure)
"""
AbstractReproj.__init__(self, product)
self.c = cdsapi.Client()
self.area = area
self.cum_period = cum_period
if not os.path.isdir('cache'):
os.makedirs('cache')
with open('cache/.cacherc', 'w') as _:
pass
def _build_name(self, **kwargs):
components = ['{0}'.format(self.product)]
for key in sorted(kwargs):
if type(kwargs[key]) is list:
if 'area' not in key:
kwargs[key].sort()
kwargs[key] = '_'.join(list(map(str, kwargs[key])))
components.append(
'{0}_{1}'.format(key, kwargs[key])
)
return '_'.join(components)
def _build_cache(self):
dcache = {}
with open('cache/.cacherc', 'r') as f:
for line in f:
key, val = line.split()
dcache[key] = val
return dcache
def retrieve(self, filename, **kwargs):
self.c.retrieve(
self.product,
{
'format':'grib',
'product_type':'reanalysis',
**kwargs
},
filename)
def download_grib(self, name, force=False, **kwargs):
dcache = self._build_cache()
if name not in dcache:
filename = 'cache/{0}.grib'.format(secrets.token_hex(16))
self.retrieve(filename, **kwargs)
with open('cache/.cacherc', 'a+') as f:
f.write('{0} {1}\n'.format(name, filename))
elif name in dcache and force:
filename = dcache['name']
self.retrieve(filename, **kwargs)
def grid_from_grib(self, name):
grid = {}
dcache = self._build_cache()
with open(dcache[name], 'rb') as f:
gid = ecs.codes_grib_new_from_file(f)
grid['NX'] = ecs.codes_get(gid, 'numberOfColumns')
grid['NY'] = ecs.codes_get(gid, 'numberOfRows')
grid['Xres'] = ecs.codes_get(gid, 'iDirectionIncrementInDegrees')
grid['Yres'] = ecs.codes_get(gid, 'jDirectionIncrementInDegrees')
grid['Ynorth'] = ecs.codes_get(gid, 'latitudeOfFirstGridPointInDegrees')
grid['Xwest'] = ecs.codes_get(gid, 'longitudeOfFirstGridPointInDegrees')
grid['Ysouth'] = ecs.codes_get(gid, 'latitudeOfLastGridPointInDegrees')
grid['Xeast'] = ecs.codes_get(gid, 'longitudeOfLastGridPointInDegrees')
values = ecs.codes_get_values(gid)
return grid, values
def set_src_grid_from_grib(self, variable):
"""
Définit les informations géographique à partir d'un fichier grib
Argument :
- variable : variable unique pour récupération du fichier grib
"""
kwargs = dict(year=2000, month=1, day=1, time='01:00',
variable=variable, area=self.area)
name = self._build_name(**kwargs)
self.download_grib(name, **kwargs)
self.src_grid, _ = self.grid_from_grib(name)
self.src_transform = rasterio.transform.from_bounds(
self.src_grid['Xwest'] - self.src_grid['Xres'] / 2.,
self.src_grid['Ysouth'] - self.src_grid['Yres'] / 2.,
self.src_grid['Xeast'] + self.src_grid['Xres'] / 2.,
self.src_grid['Ynorth'] + self.src_grid['Yres'] / 2.,
self.src_grid['NX'], self.src_grid['NY']
)
self.src_crs = '+proj=latlong'
def get_values_from_grib(self, name):
def build_dataframe(values, dates):
variable_names = values.keys()
df = []
for key in variable_names:
df.append(
pd.DataFrame(values[key], index=dates[key])
)
df = pd.concat(df, axis=1)
columns = pd.MultiIndex.from_product(
[
variable_names,
np.arange(
self.src_grid['NX']*self.src_grid['NY']
).astype('int')
]
)
columns = columns.set_names(['variable', 'zone'])
df.columns = columns
return df
dcache = self._build_cache()
with open(dcache[name], 'rb') as f:
values = defaultdict()
dates = defaultdict()
nb = ecs.codes_count_in_file(f)
for _ in range(nb):
gid = ecs.codes_grib_new_from_file(f)
variable_name = ecs.codes_get(gid, 'shortName')
if variable_name not in values:
values[variable_name] = []
values[variable_name].append(
ecs.codes_get_values(gid)
)
if variable_name not in dates:
dates[variable_name] = []
date = ecs.codes_get_string(gid, 'validityDate')
tim = '{0:04}'.format(ecs.codes_get(gid, 'validityTime'))
dates[variable_name].append(datetime.datetime(
int(date[:4]), int(date[4:6]), int(date[6:]),
int(tim[:2]), int(tim[2:])
))
assert np.unique([len(dates[key]) for key in dates]).shape[0] == 1
assert np.unique([dates[key][-1] for key in dates]).shape[0] == 1
return build_dataframe(values, dates)
def request_values_from_api(self, **kwargs):
name = self._build_name(**kwargs)
self.download_grib(name, **kwargs)
return self.get_values_from_grib(name)
def request_period(self, variable, **kwargs):
"""
Requête à cds pour une période spécifiée dans kwargs
(voir le site cdsapi pour la syntaxe des arguments)
Arguments:
- variable : variable à récupérer
- kwargs : dictionnaire d'arguments à passer à cdsapi
"""
return self.request_values_from_api(
variable=variable, area=self.area, **kwargs
)
def request_extended_period(self, variable, **kwargs):
"""
Requête à cds pour une période spécifiée dans kwargs
(voir le site cdsapi pour la syntaxe des arguments)
Ajoute 1 jour en plus (valeur à 00:00 du jour suivant)
Arguments:
- variable : variable à récupérer
- kwargs : dictionnaire d'arguments à passer à cdsapi
"""
df = self.request_values_from_api(
variable=variable, area=self.area, **kwargs
)
next_day = df.index[-1] + datetime.timedelta(hours=1)
if next_day.hour == 0:
df2 = self.request_values_from_api(
variable=variable, area=self.area,
year=next_day.year, month=next_day.month,
day=next_day.day, time='00:00',
)
df = pd.concat([df, df2], axis=0)
return df
def decum_variable(self, df):
decum = df.where(df == 9999., df - df.shift(1))
ic = int(self.cum_period)
decum.iloc[1::ic, :] = df.iloc[1::ic, :]
decum.iloc[0, :] = 0
return decum
def get_hourly_accumulated_variable(self, df):
"""
Décumule des variables cumulées
"""
if self.cum_period > 1:
assert df.index[0].hour == 0 # Période commmence en début de journée
df = self.decum_variable(df)
return df
def get_daily_variable(self, df2, func, is_cum=False, **kwargs):
"""
Retourn des variables journalières
Arguments :
- df2 : pd.DataFrame de variables
- func : 'sum' ou 'mean'
- is_cum : si la variable est cumulé sur plusieurs heures ou non.
Fait appel à decum_variable dans ce cas
TO DO : pour des données cumulées sur 24 h ,ne sert à rien de
décumuler puis de recumuler à nouveau
"""
if 'variable' in kwargs:
assert type(kwargs['variable']) is list
variable = kwargs.get('variable', slice(None))
df = df2.loc[:, variable]
assert df.index[0].hour == 0 # Période commmence en début de journée
mask = df == 9999.
cols = mask.all()[mask.all()].index
if is_cum and self.cum_period > 1:
df = self.decum_variable(df)
df.index = df.index - datetime.timedelta(hours=1) # Décalage d'1 heure
if func == 'sum':
df = df.resample('D').sum() # Somme après décalage
elif func == 'mean':
df = df.resample('D').mean() # Moyenne après décalage
df.loc[:, cols] = 9999.
return df.iloc[1:, :] # 1er jour dû au décalage d'1 heure
def reproject(self, df, dst):
dfout = []
variables = df.columns.get_level_values('variable').unique()
for variable in variables:
df2 = df.xs(key=variable, axis=1)
dfout2 = []
for _, row in df2.iterrows():
row = row.values.reshape(
(self.src_grid['NY'], self.src_grid['NX'])
)
dst = self.reproject_to_dst(row, dst)
dfout2.append(dst.flatten())
dfout.append(pd.DataFrame(dfout2, index=df.index))
dfout = pd.concat(dfout, axis=1)
columns = pd.MultiIndex.from_product(
[
variables,
np.arange(
self.dst_grid['NX']*self.dst_grid['NY']
).astype('int')
]
)
columns = columns.set_names(['variable', 'zone'])
dfout.columns = columns
return dfout
def add_nodata(self, df, zones):
"""
Les zones masquées de SAFRAN sont ajoutées à df
"""
variables = df.columns.get_level_values('variable').unique()
columns = pd.MultiIndex.from_product(
[variables, list(range(len(zones.flatten())))]
)
u, w = np.where(zones != 9999.)
dfout = []
z = zones.copy()
for variable in variables:
df2 = []
for _, row in df.iterrows():
z[u, w] = row[variable].squeeze().values
df2.append(z.flatten())
df2 = pd.DataFrame(df2)
dfout.append(df2)
dfout = pd.concat(dfout, axis=1)
dfout.columns = columns
dfout.index = df.index
return dfout
def multiply_by_factor(self, df, factor, **kwargs):
"""
Multiplie les variables par un facteur
"""
if 'variable' in kwargs:
assert type(kwargs['variable']) is list
variable = kwargs.get('variable', slice(None))
df = df.sort_index(axis=1)
dfv = df.loc[:, variable]
dfv = dfv.where(dfv == 9999., dfv * factor)
df.loc[:, variable] = dfv.values
return df
def add_factor(self, df, factor, **kwargs):
"""
Ajoute un facteur à des variables
"""
variable = kwargs.get('variable', slice(None))
if 'variable' in kwargs:
assert type(kwargs['variable']) is list
dfv = df.loc[:, variable]
dfv = dfv.where(dfv == 9999., dfv + factor)
df.loc[:, variable] = dfv.values
return df
def save_raster_as_tiff(filename, dst, crs, transform):
with rasterio.open(
filename, 'w', driver='GTiff',
width=dst.shape[1], height=dst.shape[0],
count=1, dtype=dst.dtype,
crs=crs, transform=transform,
nodata=9999.
) as new_dataset:
new_dataset.write(dst, 1)
```
#### File: src/meteobrgm/pymeteobrgm.py
```python
import datetime
import os
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
import numpy as np
import pandas as pd
import geopandas as gpd
import xarray
from tqdm import tqdm
from joblib import Parallel, delayed
from shapely.geometry import Polygon
import meteobrgm
X = (56000, 1200000)
Y = (1613000, 2685000)
RES = 8000.
NX, NY = 143, 134
EPSG = 27572
def build_polygon_safran(res=8000.):
"""
Build the shapefile
Parameter
---------
res: int
Resolution of the polygons (need to be multiple of 2)
"""
assert res <= RES
assert RES % res == 0
nres = int(RES / res)
coord = pkg_resources.open_text(meteobrgm, 'coord_9892')
df = pd.read_csv(coord, header=None, delim_whitespace=True)
Xleft = df[4] - int(RES/2.)
Xright = df[4] + int(RES/2.)
Ybottom = df[5] - int(RES/2.)
Ytop = df[5] + int(RES/2.)
polygons = []
for i in range(9892):
for j in range(nres):
Xleft2 = Xleft[i] + j*res
Xright2 = Xright[i] - (nres - (j +1))*res
for k in range(nres):
Ybottom2 = Ybottom[i] + k*res
Ytop2 = Ytop[i] - (nres - (k +1))*res
polygon = Polygon(
(
(Xleft2, Ybottom2),
(Xright2, Ybottom2),
(Xright2, Ytop2),
(Xleft2, Ytop2),
(Xleft2, Ybottom2)
)
)
polygons.append(polygon)
return polygons
def build_shapefile_safran(res=8000.):
"""
Build the shapefile safran
Parameter
---------
res: int
Resolution of the polygons (need to be multiple of 2)
Return
------
geopandas.GeoDataFrame
"""
safran = meteobrgm.build_polygon_safran(res)
gdf_safran = gpd.GeoDataFrame(
{'zone': np.arange(1, len(safran) + 1)},
geometry=safran,
crs='EPSG:27572'
)
if not gdf_safran.has_sindex:
gdf_safran.sindex
return gdf_safran
def build_grid_safran():
"""
Build the mask array of the SAFRAN grid.
Returns
-------
numpy.array
Array with the zone numbers from the SAFRAN grid. No data cells are
equal to 9999.
"""
Xcentre, Ycentre, num_safran = meteobrgm.return_xy_safran()
XYcentre = [(x, y) for x, y in zip(Xcentre, Ycentre)]
raster = np.ones((NY, NX))*9999
for i in range(NY):
for j in range(NX):
x = X[0] + RES/2. + j*RES
y = Y[1] - RES/2. - i*RES
if (x, y) in XYcentre:
index = XYcentre.index((x, y))
raster[i, j] = num_safran[index]
return raster
def extract_zones_from_shapefile(shp_input):
gdf = gpd.read_file(shp_input)
if not gdf.has_sindex:
gdf.sindex
gdf = gdf.to_crs('EPSG:27572')
safran = meteobrgm.build_shapefile_safran()
return safran.overlay(gdf)
def return_indices_safran(return_raster=False):
"""
Return indices X and Y from SAFRAN
Returns
-------
indices : list of tuple (int, int)
raster (optional) : numpy.array
"""
raster = meteobrgm.build_grid_safran()
yj, xi = np.indices(raster.shape)
yj = yj + 1
xi = xi + 1
xi = np.ma.masked_where(raster == 9999., xi).compressed()
yj = np.ma.masked_where(raster == 9999., yj).compressed()
indices = [(j, i) for j, i in zip(yj, xi)]
if return_raster:
return indices, raster
else:
return indices
def return_xy_safran():
"""
Return X and Y from SAFRAN
Returns
-------
list
"""
coord = pkg_resources.open_text(meteobrgm, 'coord_9892')
df = pd.read_csv(coord, header=None, delim_whitespace=True)
Xcentre = df[4]
Ycentre = df[5]
return Xcentre, Ycentre, df[1]
def read_meteo_brgm_format(fname, ystart, zones=9892, skiprows=1):
"""
Read data from the formatted file used in BRGM for storing
meteorological data available on the SAFRAN grid over France
(only 1 hydrological year starting from ystart)
Parameters
----------
fname : str
File name to read
ystart : int
Starting year of the file
zones : list of int (optional)
SAFRAN zone numbers to extract (default to 9892)
skiprows : (optional)
rows to skip (default to 1)
Return
------
pandas.DataFrame
"""
ystart = int(ystart)
zones = np.array(zones).astype('int')
df = pd.read_csv(
fname,
skiprows=skiprows,
delim_whitespace=True,
header=None,
encoding='ISO-8859-1'
)
df = df.iloc[:, zones - 1] # -1 car indice python
df.columns = zones
df.columns.name = 'Zone'
df.index = pd.date_range(
'{0}-8-1'.format(ystart),
'{0}-7-31'.format(ystart + 1),
)
return df
def read_meteofrance_format(fname, zones, variables=['PS', 'PL', 'ETP', 'T']):
"""
Read the SAFRAN data provided by Météo France and extract
the requested zones.
Parameters
----------
fname: str
File name to read
zones: list of integers
List of the zone numbers to extract
variables: list of str, default=['PS', 'PL', 'ETP', 'T']
List of variables as they appear in columns in the file.
Return
------
pandas.DataFrame
"""
date_parser = lambda x: datetime.datetime.strptime(x, '%Y%m%d')
df = pd.read_csv(fname, delimiter=';', header=None, parse_dates=True,
date_parser=date_parser, index_col=0)
champs = ['Zone'] + variables
df.columns = pd.Index(champs)
df = df.pivot(columns='Zone', values=champs[1:])
df.columns = df.columns.set_names('Champ', 0)
df.index = df.index.set_names('Date')
selection = pd.MultiIndex.from_product([champs[1:], zones],
names=['Champ', 'Zone'])
return df[selection]
def write_meteo_brgm_format(fname, data, header):
"""
Write data in brgm format (no column date)
Parameters
----------
fname : filename of file handle
data : 1D or 2D array_like
header : str
"""
np.savetxt(fname,
data,
header=header,
delimiter=' ',
fmt='%.3f')
def write_meteo_brgm_format_with_date(fname, df, header='# '):
"""
Write data in brgm format (with column date at the end)
Parameters
----------
fname : filename of file handle
df : pandas.DataFrame
Needs to have datetime index
"""
df.index.name = 'index'
df = df.reset_index()
dates = df.pop('index')
df.insert(len(df.columns), 'Date', dates)
with open(fname, 'w', newline='') as f:
f.write(header)
df.to_csv(
f,
sep=' ',
index=None,
date_format='%d/%m/%Y',
float_format='%.3f'
)
def write_excel_simultane_format(fname, df):
"""
Write pandas.dataframe in excel simultane format.
Parameters
----------
fname : filename of file handle
df : pandas.DataFrame
Needs to have datetime index
"""
with open(fname, 'w', newline='') as f:
f.write('# ')
df.to_csv(f, sep=' ', date_format='%d/%m/%Y', index_label='Date')
class MFSafranNetcdfDataset():
def __init__(self, paths, xdim_name='X', ydim_name='Y', parallel=False):
indices = meteobrgm.return_indices_safran()
df = xarray.open_mfdataset(paths, parallel)
if 'i' in df.coords.dims and 'j' in df.coords.dims:
xdim_name, ydim_name = 'i', 'j'
df[xdim_name] = np.arange(1, 144)
df[ydim_name] = np.arange(134, 0, -1)
df = df.stack(NUM_ZONE=(ydim_name, xdim_name))
df = df.loc[{'NUM_ZONE':indices}]
self.df = df.fillna(0)
self.nbzones = len(indices)
self.paths = paths
def get_hydrological_year(self, variable, year):
return self.df[variable].sel(
time=slice(
'{0}-8-1'.format(year),
'{0}-7-31'.format(year + 1)
),
)
def convert_to_meteo_brgm_format(self, paths, variable, year_start, year_end, convert_unit=(1, '')):
"""
Convert netcdf file in brgm format
Parameters
----------
paths: str or list of str
variable: str
year_start: int
year_end: int
convert_unit: tuple (int, str)
"""
for year in range(year_start, year_end):
data = self.get_hydrological_year(variable, year)
long_name = variable
units = 'Unknown'
if hasattr(data, 'long_name'):
long_name = data.long_name
if hasattr(data, 'units'):
units = data.units
if convert_unit[0] != 1:
data = data * convert_unit[0]
units = convert_unit[1]
header = (
"Données de la variable {0} convertie en {1} pour l'année hydrologique "
"{2}/{3} des {4} zones du ou des fichiers :"
"{5}".format(
long_name, units, year, year + 1, self.nbzones, os.path.basename(self.paths)
)
)
meteobrgm.write_meteo_brgm_format(
'{0}_{1}_{2}'.format(
paths, year, year +1
),
data,
header
)
class ExtractSafran():
variables = {
'ETP': 'ETP_Jou_v2017_Safran_{0}_{1}',
'Plu+Neige': 'Plu+Neige_Jou_v2017_Safran_{0}_{1}',
'Pl_Neige': 'Pl_Neige_Jou_v2017_Safran_{0}_{1}',
'Pluie_Liq': 'Pluie_Liq_Jou_v2017_Safran_{0}_{1}',
'Temper': 'Temper_Jou_v2017_Safran_{0}_{1}',
}
host_dir = "\\\\brgm.fr\\Données\\Modélisation\\Hydrogéol_Aqui-FR\\Safran_v2017"
def __init__(self, output_dir, name, zones=9892):
self.input_dir = self.host_dir
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
self.output_dir = output_dir
self.name = name
self.zones = zones
def treatment(self, year, key, value):
fname = value.format(year, year + 1)
fname = '{0}/{1}'.format(self.input_dir, fname)
df = meteobrgm.read_meteo_brgm_format(fname, year, zones=self.zones)
fname = '{0}/{1}_{2}_{3}_{4}'.format(
self.output_dir, key, self.name, year, year + 1
)
meteobrgm.write_meteo_brgm_format_with_date(fname, df)
def get_parameters(self, start_year, end_year):
parameters = []
for year in range(start_year, end_year):
for key, value in self.variables.items():
parameters.append((year, key, value))
return parameters
def extract_parallel_loop(self, start_year, end_year, n_jobs=1):
parameters = self.get_parameters(start_year, end_year)
inputs = tqdm(parameters)
Parallel(n_jobs=n_jobs)(delayed(self.treatment)(*args) for args in inputs)
def extract_safran(output_dir, name, zones, start_year, end_year, n_jobs=1):
"""
Extract zones SAFRAN from the files hosted on the BRGM server
Parameters
----------
output_dir : str
Output directory where new files are stored
name : str
Name that is part of the new file name
zones : list of int
Zones to extract
start_year : int
First year of data
end_year : int
Last year of data
n_jobs (default 1) : int
Number of processes to execute
"""
exs = ExtractSafran(
output_dir,
name,
zones
)
exs.extract_parallel_loop(start_year, end_year, n_jobs)
``` |
{
"source": "jpverkamp/schempy",
"score": 3
} |
#### File: jpverkamp/schempy/Decorators.py
```python
def Bouncy(f):
try:
import stackless
def new_f(*args, **kwargs):
def wrap(channel, f, args, kwargs):
try:
result = f(*args, **kwargs)
channel.send(result)
except Exception as e:
channel.send(e)
channel = stackless.channel()
stackless.tasklet(wrap)(channel, f, args, kwargs)
result = channel.receive()
if isinstance(result, Exception):
raise result
else:
return result
new_f.__name__ = f.__name__
new_f.__doc__ = f.__doc__
return new_f
except:
return f
# Rename functions
def Rename(name):
def wrap(f):
f.__name__ = name
return f
return wrap
# Define syntax, it's up to the function to evaluate it's arguments.
# f.Evaluate will be bound to Evaluate(exp, env, k)
# f.Environment will be bound to the current environment
# f.Continuation will be bound to the current continuation
def Syntax(f):
f.Syntax = True
return f
```
#### File: jpverkamp/schempy/Environment.py
```python
import os
import sys
import types
import Exceptions
import Procedure
import Evaluate
import Schemify
import Parser
import Syntax
class Environment:
'''Represents a current environment in Schempy.'''
def __init__(self, base = None):
'''Create a new environment with a base environment. '''
self.Base = base
self.Vars = {}
def __getitem__(self, key):
'''Get an item from this environment.'''
if key in self.Vars:
return self.Vars[key]
elif self.Base:
return self.Base[key]
else:
raise Exceptions.SchempyException('Unbound variable: %s' % key)
def __setitem__(self, key, val):
'''Set an item in this environment.'''
self.Vars[key] = val
def Extend(self):
'''Create a new, extended environment (for lambdas and the like).'''
return Environment(self)
def Import(self, module_name):
'''Import a python package.'''
# Load the file as a module.
sys.path += os.getcwd()
newModule = __import__(module_name)
sys.path = sys.path[:-1]
# Look through the file, load any functions.
for fname in dir(newModule):
f = newModule.__dict__.get(fname)
if isinstance(f, types.FunctionType):
# Load syntax functions.
if 'Syntax' in dir(f) and f.Syntax:
self[f.__name__.lower()] = Syntax.Syntax(f)
# Load procedures.
else:
self[f.__name__.lower()] = Procedure.Procedure(f)
def Load(self, filename):
'''Load a file (scheme or python) into the environment.'''
# Make sure the file exists.
if not os.path.exists(filename):
raise Exceptions.SchempyException('Unable to load %s, file does not exist.' % filename)
path, ext = os.path.splitext(filename)
# Check if it's Python or Scheme.
# Load Scheme files.
if ext in ('.ss', '.scm'):
parse = Parser.Parser()
text = open(filename, 'r').read()
exps = parse(text)
for exp in exps:
result = Schemify.Schemify(Evaluate.Evaluate(exp, self))
if result:
print result
# Load Python files.
elif ext in ('.py', ):
# Use self.Import
self.Import(path)
# Break on any other file types.
else:
raise Exceptions.SchempyException('Unable to load %s, unknown file type.' % filename)
def __str__(self):
'''Stringify the environment.'''
result = '[env ' + ', '.join([Schemify.Schemify(k) + ':' + Schemify.Schemify(v) for k, v in self.Vars.items()])
if self.Base:
result += ' ' + str(self.Base)
result += ']'
return result
def __repr__(self):
'''Return a representation of the environment.'''
return str(self)
```
#### File: schempy/globals/mathematical.py
```python
from Decorators import Rename
@Rename('+')
def add(*args):
'''Add numbers.'''
result = 0
for arg in args:
result += arg
return result
@Rename('-')
def sub(*args):
'''Subtract numbers.'''
if len(args) < 1:
raise ValueError('- expects at least 1 argument, given %d' % len(args))
result = args[0]
for arg in args[1:]:
result -= arg
return result
@Rename('*')
def mul(*args):
'''Multiply numbers.'''
result = 1
for arg in args:
result *= arg
return result
@Rename('/')
def div(*args):
'''Divide numbers.'''
if len(args) < 1:
raise ValueError('/ expects at least 1 argument, given %d' % len(args))
result = args[0]
for arg in args[1:]:
result /= arg
return result
def add1(n):
'''Add one to a number.'''
return n + 1
def sub1(n):
'''Subtract one from a number.'''
return n - 1
```
#### File: schempy/globals/values.py
```python
from Decorators import Syntax, Rename
import Evaluate
import Exceptions
def values(*args):
'''Tests if something is an integer.'''
return ['**values**'] + list(args)
@Syntax
@Rename('let-values')
def let_values(exp, env):
'''Let values.'''
# Check syntax.
if len(exp) < 3:
raise InvalidSyntaxException(exp)
# Bind parts.
kv_pairs = exp[1]
bodies = exp[2:]
# Bind variables.
local_env = env.Extend()
for kv_pair in kv_pairs:
ks = kv_pair[0]
value_exp = kv_pair[1]
values = Evaluate.Evaluate(value_exp, env)
# Possible problems.
if not isinstance(values, list) or len(values) == 0 or values[0] != '**values**':
raise InvalidSyntaxException(exp, 'Values used out of context')
if len(ks) != len(values) - 1:
raise InvalidSyntaxException(exp, 'Incorrect number of values to unpack, expected %d got %d' % (len(keys), len(values) - 1))
# Bind the variables.
for i in range(len(ks)):
local_env[ks[i]] = Evaluate.Evaluate(values[i + 1], env)
# Evalute the bodies.
result = None
for body in bodies:
result = Evaluate.Evaluate(body, local_env)
return result
``` |
{
"source": "jpverkamp/takuzu",
"score": 3
} |
#### File: jpverkamp/takuzu/config.py
```python
import argparse
import atexit
import curses
import logging
import os
import platform
import sys
parser = argparse.ArgumentParser(description = 'Solve Takuzu puzzles')
parser.add_argument('--debug', action = 'store_true', default = False, help = 'Print debug messages')
parser.add_argument('--animated', action = 'store_true', default = False, help = 'Animate progress')
parser.add_argument('--method', default = 'human', help = 'The method to solve the puzzle with')
parser.add_argument('files', nargs = argparse.REMAINDER)
args = parser.parse_args()
animated = args.animated
debug = args.debug
method = args.method
files = args.files
logging.basicConfig(format = '%(message)s', level = logging.DEBUG if debug else logging.WARNING)
# Set up curses, clean it up when the program is done and output the last frame to stdout
if animated:
screen = curses.initscr()
last_frame = ''
def on_exit():
curses.endwin()
print(last_frame)
atexit.register(on_exit)
def animate(obj):
global last_frame
last_frame = str(obj)
if animated:
screen.clear()
screen.addstr(0, 0, str(obj))
screen.refresh()
logging.animate = animate
``` |
{
"source": "jpverkamp/yrss",
"score": 3
} |
#### File: jpverkamp/yrss/youtube.py
```python
import cachetools
import datetime
import logging
import os
import requests
CACHE_TIME = int(os.getenv('CACHE_TIME', 60 * 60)) # default = 1 hour
API_KEY = os.getenv('API_KEY', None)
def _all(endpoint, **params):
url = 'https://www.googleapis.com/youtube/v3/' + endpoint.strip('/')
logging.debug(url, params)
params.setdefault('key', API_KEY)
try:
result = requests.get(url, params = params).json()
#if result['pageInfo']['totalResults'] > result['pageInfo']['resultsPerPage']:
# logging.debug('TODO: implement paging')
for item in result['items']:
yield item
except Exception as ex:
logging.error(ex)
logging.error(result)
raise ex
def _one(endpoint, **params):
for result in _all(endpoint, **params):
return result
@cachetools.cached(cache = cachetools.TTLCache(maxsize = 1024, ttl = CACHE_TIME))
def get_id(id):
print(id)
if len(id) == 24:
return id
else:
return get_channel_id_for_username(id)
@cachetools.cached(cache = cachetools.TTLCache(maxsize = 1024, ttl = CACHE_TIME))
def get_channel_id_for_username(username):
return _one('/channels', part = 'snippet', forUsername = username)['id']
@cachetools.cached(cache = cachetools.TTLCache(maxsize = 1024, ttl = CACHE_TIME))
def get_channel(id):
data = _one('/channels', part = 'snippet,contentDetails', id = id)
return {
'youtube_id': id,
'title': data['snippet']['title'],
'updated': datetime.datetime.now(),
'logo': data['snippet']['thumbnails']['default']['url'],
'description': data['snippet']['description'],
'uploads_id': data['contentDetails']['relatedPlaylists']['uploads'],
}
@cachetools.cached(cache = cachetools.TTLCache(maxsize = 1024, ttl = CACHE_TIME))
def get_videos(id):
for video in _all('/playlistItems', part = 'snippet', maxResults = 20, playlistId = id):
yield {
'youtube_id': video['snippet']['resourceId']['videoId'],
'title': video['snippet']['title'],
'published': video['snippet']['publishedAt'],
'updated': datetime.datetime.now(),
'description': video['snippet']['description'],
'thumbnail': video['snippet']['thumbnails']['high']['url'],
}
``` |
{
"source": "jpverma85/db-sharding",
"score": 2
} |
#### File: 19.3.0/scripts/oragsm.py
```python
import os
import sys
import os.path
import re
import socket
import random
from oralogger import *
from oraenv import *
from oracommon import *
from oramachine import *
class OraGSM:
"""
This calss setup the Gsm after DB installation.
"""
def __init__(self,oralogger,orahandler,oraenv,oracommon):
"""
This constructor of OraGsm class to setup the Gsm on primary DB.
Attributes:
oralogger (object): object of OraLogger Class.
ohandler (object): object of Handler class.
oenv (object): object of singleton OraEnv class.
ocommon(object): object of OraCommon class.
ora_env_dict(dict): Dict of env variable populated based on env variable for the setup.
file_name(string): Filename from where logging message is populated.
"""
self.ologger = oralogger
self.ohandler = orahandler
self.oenv = oraenv.get_instance()
self.ocommon = oracommon
self.ora_env_dict = oraenv.get_env_vars()
self.file_name = os.path.basename(__file__)
self.omachine = OraMachine(self.ologger,self.ohandler,self.oenv,self.ocommon)
def setup(self):
"""
This function setup the Gsm on Primary DB.
"""
if self.ocommon.check_key("ADD_SHARD",self.ora_env_dict):
self.catalog_checks()
status = self.catalog_setup_checks()
if not status:
self.ocommon.log_info_message("No existing catalog and GDS setup found on this system. Setting up GDS and will configure catalog on this machine.",self.file_name)
self.ocommon.prog_exit("127")
else:
self.add_gsm_shard()
self.set_hostid_null()
self.add_invited_node("ADD_SHARD")
self.remove_invited_node("ADD_SHARD")
sys.exit(0)
elif self.ocommon.check_key("DEPLOY_SHARD",self.ora_env_dict):
self.catalog_checks()
status = self.catalog_setup_checks()
if not status:
self.ocommon.log_info_message("No existing catalog and GDS setup found on this system. Setting up GDS and will configure catalog on this machine.",self.file_name)
self.ocommon.prog_exit("127")
else:
self.deploy_shard()
self.setup_gsm_service()
sys.exit(0)
elif self.ocommon.check_key("INVITED_NODE",self.ora_env_dict):
self.catalog_checks()
status = self.catalog_setup_checks()
if not status:
self.ocommon.log_info_message("No existing catalog and GDS setup found on this system. Setting up GDS and will configure catalog on this machine.",self.file_name)
self.ocommon.prog_exit("127")
else:
shard_host=self.ora_env_dict["INVITED_NODE"]
temp_host= shard_host.split('.',1)[0]
retcode1=self.perform_invited_nodeop(temp_host,"remove")
retcode1=self.perform_invited_nodeop(shard_host,"remove")
retcode=self.perform_invited_nodeop(shard_host,"add")
if retcode == 0:
sys.exit(0)
else:
sys.exit(1)
elif self.ocommon.check_key("ADD_SGROUP_PARAMS",self.ora_env_dict):
self.catalog_checks()
status = self.catalog_setup_checks()
if not status:
self.ocommon.log_info_message("No existing catalog and GDS setup found on this system. Setting up GDS and will configure catalog on this machine.",self.file_name)
self.ocommon.prog_exit("127")
else:
self.self.setup_gsm_shardg("ADD_SGROUP_PARAMS")
sys.exit(0)
elif self.ocommon.check_key("REMOVE_SHARD",self.ora_env_dict):
self.catalog_checks()
status = self.catalog_setup_checks()
if not status:
self.ocommon.log_info_message("No existing catalog and GDS setup found on this system. Setting up GDS and will configure catalog on this machine.",self.file_name)
self.ocommon.prog_exit("127")
else:
self.remove_gsm_shard()
sys.exit(0)
elif self.ocommon.check_key("MOVE_CHUNKS",self.ora_env_dict):
self.catalog_checks()
status = self.catalog_setup_checks()
if not status:
self.ocommon.log_info_message("No existing catalog and GDS setup found on this system. Setting up GDS and will configure catalog on this machine.",self.file_name)
self.ocommon.prog_exit("127")
else:
self.move_shard_chunks()
sys.exit(0)
elif self.ocommon.check_key("CANCEL_CHUNKS",self.ora_env_dict):
self.catalog_checks()
status = self.catalog_setup_checks()
if not status:
self.ocommon.log_info_message("No existing catalog and GDS setup found on this system. Setting up GDS and will configure catalog on this machine.",self.file_name)
self.ocommon.prog_exit("127")
else:
self.cancel_move_chunks()
sys.exit(0)
elif self.ocommon.check_key("VALIDATE_NOCHUNKS",self.ora_env_dict):
self.catalog_checks()
status = self.catalog_setup_checks()
if not status:
self.ocommon.log_info_message("No existing catalog and GDS setup found on this system. Setting up GDS and will configure catalog on this machine.",self.file_name)
self.ocommon.prog_exit("127")
else:
self.validate_nochunks()
sys.exit(0)
elif self.ocommon.check_key("CHECK_ONLINE_SHARD",self.ora_env_dict):
self.catalog_checks()
status = self.catalog_setup_checks()
if not status:
self.ocommon.log_info_message("No existing catalog and GDS setup found on this system. Setting up GDS and will configure catalog on this machine.",self.file_name)
self.ocommon.prog_exit("127")
else:
self.verify_online_shard()
sys.exit(0)
elif self.ocommon.check_key("CHECK_GSM_SHARD",self.ora_env_dict):
self.catalog_checks()
status = self.catalog_setup_checks()
if not status:
self.ocommon.log_info_message("No existing catalog and GDS setup found on this system. Setting up GDS and will configure catalog on this machine.",self.file_name)
self.ocommon.prog_exit("127")
else:
self.verify_gsm_shard()
sys.exit(0)
elif self.ocommon.check_key("VALIDATE_SHARD",self.ora_env_dict):
self.catalog_checks()
status = self.catalog_setup_checks()
if not status:
self.ocommon.log_info_message("No existing catalog and GDS setup found on this system. Setting up GDS and will configure catalog on this machine.",self.file_name)
self.ocommon.prog_exit("127")
else:
self.validate_gsm_shard()
sys.exit(0)
elif self.ocommon.check_key("VALIDATE_GSM",self.ora_env_dict):
self.catalog_checks()
status = self.catalog_setup_checks()
if not status:
self.ocommon.log_info_message("No existing catalog and GDS setup found on this system. Setting up GDS and will configure catalog on this machine.",self.file_name)
self.ocommon.prog_exit("127")
else:
sys.exit(0)
elif self.ocommon.check_key("CHECK_LIVENESS",self.ora_env_dict):
status = self.catalog_setup_checks()
if not status:
self.ocommon.log_info_message("No existing catalog and GDS setup found on this system. Setting up GDS and will configure catalog on this machine.",self.file_name)
self.ocommon.prog_exit("127")
self.ocommon.log_info_message("GSM liveness check completed sucessfully!",self.file_name)
sys.exit(0)
elif self.ocommon.check_key("CATALOG_SETUP",self.ora_env_dict):
# If user pass env avariable CATALOG_SETUP true then it will just create gsm director and add catalog but will not add any shard
# It will also add service
status = self.catalog_setup_checks()
if status == False:
self.ocommon.log_info_message("No existing catalog and GDS setup found on this system. Setting up GDS and will configure catalog on this machine.",self.file_name)
self.setup_machine()
self.catalog_checks()
self.reset_gsm_setup()
status1 = self.gsm_setup_check()
if status1:
self.ocommon.log_info_message("Gsm Setup is already completed on this database",self.file_name)
self.start_gsm_director()
self.ocommon.log_info_message("Started GSM",self.file_name)
else:
# Perform Catalog setup after check GSM_MASTER FLAG. IF GSM MASTER FLAG is set then only catalog will be added.
self.ocommon.log_info_message("No existing GDS found on this system. Setting up GDS on this machine.",self.file_name)
master_flag=self.gsm_master_flag_check()
if master_flag:
self.setup_gsm_calog()
self.setup_gsm_director()
self.start_gsm_director()
self.status_gsm_director()
self.setup_gsm_shardg("SHARD_GROUP")
self.gsm_backup_file()
self.gsm_completion_message()
### Running Custom Scripts
self.run_custom_scripts()
else:
self.add_gsm_director()
self.start_gsm_director()
self.gsm_backup_file()
self.gsm_completion_message()
else:
# This block run shard addition, catalog addition and service creation
# This block also verifies if master flag is not not GSM director then it will not create catalog but add GSM ony
self.setup_machine()
self.gsm_checks()
self.reset_gsm_setup()
status = self.gsm_setup_check()
if status:
self.ocommon.log_info_message("Gsm Setup is already completed on this database",self.file_name)
self.start_gsm_director()
self.ocommon.log_info_message("Started GSM",self.file_name)
else:
# if the status = self.gsm_setup_check() return False then shard addition, catalog addition and service creation
master_flag=self.gsm_master_flag_check()
if master_flag:
self.ocommon.log_info_message("No existing GDS found on this system. Setting up GDS on this machine.",self.file_name)
self.setup_gsm_calog()
self.setup_gsm_director()
self.start_gsm_director()
self.status_gsm_director()
self.setup_gsm_shardg("SHARD_GROUP")
self.setup_gsm_shard()
self.set_hostid_null()
self.stop_gsm_director()
time.sleep(30)
self.start_gsm_director()
self.add_invited_node("SHARD")
self.remove_invited_node("SHARD")
self.stop_gsm_director()
time.sleep(30)
self.start_gsm_director()
self.deploy_shard()
self.setup_gsm_service()
self.setup_sample_schema()
self.gsm_backup_file()
self.gsm_completion_message()
### Running Custom Scripts
self.run_custom_scripts()
else:
self.add_gsm_director()
self.start_gsm_director()
self.gsm_backup_file()
self.gsm_completion_message()
########### SETUP_MACHINE begins here ####################
## Function to machine setup
def setup_machine(self):
"""
This function performs the compute before performing setup
"""
self.omachine.setup()
########### SETUP_MACHINE ENDS here ####################
def gsm_checks(self):
"""
This function perform db checks before starting the setup
"""
self.ohome_check()
self.passwd_check()
self.shard_user_check()
self.gsm_hostname_check()
self.director_params_checks()
self.catalog_params_check()
self.shard_params_check()
self.sgroup_params_check()
def catalog_checks(self):
"""
This function perform db checks before starting the setup
"""
self.ohome_check()
self.passwd_check()
self.shard_user_check()
self.gsm_hostname_check()
self.director_params_checks()
self.catalog_params_check()
self.sgroup_params_check()
def ohome_check(self):
"""
This function performs the oracle home related checks
"""
if self.ocommon.check_key("ORACLE_HOME",self.ora_env_dict):
self.ocommon.log_info_message("ORACLE_HOME variable is set. Check Passed!",self.file_name)
else:
self.ocommon.log_error_message("ORACLE_HOME variable is not set. Exiting!",self.file_name)
self.ocommon.prog_exit("127")
if os.path.isdir(self.ora_env_dict["ORACLE_HOME"]):
msg='''ORACLE_HOME {0} dirctory exist. Directory Check passed!'''.format(self.ora_env_dict["ORACLE_HOME"])
self.ocommon.log_info_message(msg,self.file_name)
else:
msg='''ORACLE_HOME {0} dirctory does not exist. Directory Check Failed!'''.format(self.ora_env_dict["ORACLE_HOME"])
self.ocommon.log_error_message(msg,self.file_name)
self.ocommon.prog_exit("127")
def passwd_check(self):
"""
This funnction perform password related checks
"""
passwd_file_flag = False
if self.ocommon.check_key("SECRET_VOLUME",self.ora_env_dict):
msg='''SECRET_VOLUME passed as an env variable and set to {0}'''.format(self.ora_env_dict["SECRET_VOLUME"])
else:
self.ora_env_dict=self.ocommon.add_key("SECRET_VOLUME","/run/secrets",self.ora_env_dict)
msg='''SECRET_VOLUME not passed as an env variable. Setting default to {0}'''.format(self.ora_env_dict["SECRET_VOLUME"])
self.ocommon.log_warn_message(msg,self.file_name)
if self.ocommon.check_key("COMMON_OS_PWD_FILE",self.ora_env_dict):
msg='''COMMON_OS_PWD_FILE passed as an env variable and set to {0}'''.format(self.ora_env_dict["COMMON_OS_PWD_FILE"])
else:
self.ora_env_dict=self.ocommon.add_key("COMMON_OS_PWD_FILE","common_os_pwdfile.enc",self.ora_env_dict)
msg='''COMMON_OS_PWD_FILE not passed as an env variable. Setting default to {0}'''.format(self.ora_env_dict["COMMON_OS_PWD_FILE"])
self.ocommon.log_warn_message(msg,self.file_name)
if self.ocommon.check_key("PWD_KEY",self.ora_env_dict):
msg='''PWD_KEY passed as an env variable and set to {0}'''.format(self.ora_env_dict["PWD_KEY"])
else:
self.ora_env_dict=self.ocommon.add_key("PWD_KEY","pwd.key",self.ora_env_dict)
msg='''PWD_KEY not passed as an env variable. Setting default to {0}'''.format(self.ora_env_dict["PWD_KEY"])
self.ocommon.log_warn_message(msg,self.file_name)
secret_volume = self.ora_env_dict["SECRET_VOLUME"]
common_os_pwd_file = self.ora_env_dict["COMMON_OS_PWD_FILE"]
pwd_key = self.ora_env_dict["PWD_KEY"]
passwd_file='''{0}/{1}'''.format(self.ora_env_dict["SECRET_VOLUME"],self.ora_env_dict["COMMON_OS_PWD_FILE"])
if os.path.isfile(passwd_file):
msg='''Passwd file {0} exist. Password file Check passed!'''.format(passwd_file)
self.ocommon.log_info_message(msg,self.file_name)
msg='''Reading encrypted passwd from file {0}.'''.format(passwd_file)
self.ocommon.log_info_message(msg,self.file_name)
cmd='''openssl enc -d -aes-256-cbc -in \"{0}/{1}\" -out /tmp/{1} -pass file:\"{0}/{2}\"'''.format(secret_volume,common_os_pwd_file,pwd_key)
output,error,retcode=self.ocommon.execute_cmd(cmd,None,None)
self.ocommon.check_os_err(output,error,retcode,True)
passwd_file_flag = True
if not passwd_file_flag:
s = "abcdefghijklmnopqrstuvwxyz01234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%^&*()?"
passlen = 8
password = "".join(random.sample(s,passlen ))
else:
fname='''/tmp/{0}'''.format(common_os_pwd_file)
fdata=self.ocommon.read_file(fname)
password=fdata
if self.ocommon.check_key("ORACLE_PWD",self.ora_env_dict):
msg="ORACLE_PWD is passed as an env variable. Check Passed!"
self.ocommon.log_info_message(msg,self.file_name)
else:
self.ora_env_dict=self.ocommon.add_key("ORACLE_PWD",password,self.ora_env_dict)
msg="ORACLE_PWD set to HIDDEN_STRING generated using encrypted password file"
self.ocommon.log_info_message(msg,self.file_name)
def shard_user_check(self):
"""
This funnction set the user for pdb and cdb.
"""
if self.ocommon.check_key("SHARD_ADMIN_USER",self.ora_env_dict):
msg='''SHARD_ADMIN_USER {0} is passed as an env variable. Check Passed!'''.format(self.ora_env_dict["SHARD_ADMIN_USER"])
self.ocommon.log_info_message(msg,self.file_name)
else:
self.ora_env_dict=self.ocommon.add_key("SHARD_ADMIN_USER","mysdbadmin",self.ora_env_dict)
msg="SHARD_ADMIN_USER is not set, setting default to mysdbadmin"
self.ocommon.log_info_message(msg,self.file_name)
if self.ocommon.check_key("PDB_ADMIN_USER",self.ora_env_dict):
msg='''PDB_ADMIN_USER {0} is passed as an env variable. Check Passed!'''.format(self.ora_env_dict["PDB_ADMIN_USER"])
self.ocommon.log_info_message(msg,self.file_name)
else:
self.ora_env_dict=self.ocommon.add_key("PDB_ADMIN_USER","PDBADMIN",self.ora_env_dict)
msg="PDB_ADMIN_USER is not set, setting default to PDBADMIN."
self.ocommon.log_info_message(msg,self.file_name)
def director_params_checks(self):
"""
This funnction check and set the shard director name
"""
status=False
reg_exp= self.director_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
msg='''SHARD Director PARAMS {0} is set to {1}'''.format(key,self.ora_env_dict[key])
self.ocommon.log_info_message(msg,self.file_name)
status=True
def gsm_hostname_check(self):
"""
This function check and set the hostname.
"""
if self.ocommon.check_key("ORACLE_HOSTNAME",self.ora_env_dict):
msg='''ORACLE_HOSTNAME {0} is passed as an env variable. Check Passed!'''.format(self.ora_env_dict["ORACLE_HOSTNAME"])
self.ocommon.log_info_message(msg,self.file_name)
else:
if self.ocommon.check_key("KUBE_SVC",self.ora_env_dict):
hostname='''{0}.{1}'''.format(socket.gethostname(),self.ora_env_dict["KUBE_SVC"])
else:
hostname='''{0}'''.format(socket.gethostname())
msg='''ORACLE_HOSTNAME is not set, setting it to hostname {0} of the compute!'''.format(hostname)
self.ora_env_dict=self.ocommon.add_key("ORACLE_HOSTNAME",hostname,self.ora_env_dict)
self.ocommon.log_info_message(msg,self.file_name)
def catalog_params_check(self):
"""
This funnction check if CATALOG[1-9]_PARAMS such as CATALOG_PARAMS is passed as an env variable or not. If not passed then exit.
"""
status=False
reg_exp= self.catalog_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
msg='''CATALOG PARAMS {0} is set to {1}'''.format(key,self.ora_env_dict[key])
self.ocommon.log_info_message(msg,self.file_name)
status=True
if not status:
msg="CATALOG[1-9]_PARAMS such as CATALOG_PARAMS is not set, exiting!"
self.ocommon.log_error_message(msg,self.file_name)
self.ocommon.prog_exit("127")
def shard_params_check(self):
"""
This funnction check if SHARD[1-9]_PARAMS such as SHARD1_PARAMS is passed as an env variable or not. If not passed then exit.
"""
status=False
reg_exp= self.shard_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
msg='''SHARD PARAMS {0} is set to {1}'''.format(key,self.ora_env_dict[key])
self.ocommon.log_info_message(msg,self.file_name)
status=True
if not status:
msg="SHARD[1-9]_PARAMS such as SHARD1_PARAMS is not set, exiting!"
self.ocommon.log_error_message(msg,self.file_name)
self.ocommon.prog_exit("127")
def sgroup_params_check(self):
"""
This funnction check if SHARD[1-9]_GROUP_PARAMS such as SHARD1_GROUP_PARAMS is passed as an env variable or not. If not passed then exit.
"""
status=False
reg_exp= self.shardg_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
msg='''SHARD GROUP PARAMS {0} is set to {1}'''.format(key,self.ora_env_dict[key])
self.ocommon.log_info_message(msg,self.file_name)
status=True
def gsm_master_flag_check(self):
"""
This funnction check if MASTER_GSM is passed as an env variable or not. If not passed then exit.
"""
status=False
if self.ocommon.check_key("MASTER_GSM",self.ora_env_dict):
msg='''MASTER_GSM is set. This machine will be configured with as master GSM director.'''
self.ocommon.log_info_message(msg,self.file_name)
return True
else:
return False
def catalog_setup_checks(self):
"""
This function checks if director and catalog is setup and connection is established.
"""
status = False
gsm_status = self.check_gsm_director(None)
#catalog_status = self.check_gsm_catalog()
if gsm_status == 'completed':
status = True
else:
status = False
#if catalog_status == 'completed':
# status = True
#else:
# status = False
return status
########### DB_CHECKS Related Functions Begin Here ####################
########## SETUP_CDB_catalog FUNCTION BEGIN HERE ###############################
def reset_gsm_setup(self):
"""
This function delete the GSM files.
"""
self.ocommon.log_info_message("Inside reset_gsm_setup",self.file_name)
gsmdata_loc='/opt/oracle/gsmdata'
cmd_list=[]
if self.ocommon.check_key("RESET_ENV",self.ora_env_dict):
if self.ora_env_dict["RESET_ENV"]:
msg='''Deleteing files from {0}'''.format(gsmdata_loc)
self.ocommon.log_info_message(msg,self.file_name)
cmd_list[0]='''rm -f {0}/gsm.ora'''.format(gsmdata_loc)
cmd_list[1]='''rm -f {0}/tnsnames.ora'''.format(gsmdata_loc)
cmd_list[2]='''rm -rf {0}/wallets'''.format(gsmdata_loc)
for cmd in cmd_list:
output,error,retcode=self.ocommon.execute_cmd(cmd,None,None)
self.ocommon.check_os_err(output,error,retcode,True)
def gsm_setup_check(self):
"""
This function check if GSM is already setup on this
"""
status=True
self.ocommon.log_info_message("Inside gsm_setup_check",self.file_name)
gsmdata_loc='/opt/oracle/gsmdata'
gsmfile_loc='''{0}/network/admin'''.format(self.ora_env_dict["ORACLE_HOME"])
gsmora='''{0}/gsm.ora'''.format(gsmdata_loc)
tnsnamesora='''{0}/tnsnames.ora'''.format(gsmdata_loc)
walletloc='''{0}/gsmwallet'''.format(gsmdata_loc)
if os.path.isfile(gsmora):
cmd='''cp -r -v -f {0} {1}/'''.format(gsmora,gsmfile_loc)
output,error,retcode=self.ocommon.execute_cmd(cmd,None,None)
self.ocommon.check_os_err(output,error,retcode,True)
else:
status=False
if os.path.isfile(tnsnamesora):
cmd='''cp -r -v -f {0} {1}/'''.format(tnsnamesora,gsmfile_loc)
output,error,retcode=self.ocommon.execute_cmd(cmd,None,None)
self.ocommon.check_os_err(output,error,retcode,True)
else:
status=False
if os.path.isdir(walletloc):
cmd='''cp -r -v -f {0} {1}/'''.format(walletloc,gsmfile_loc)
output,error,retcode=self.ocommon.execute_cmd(cmd,None,None)
self.ocommon.check_os_err(output,error,retcode,True)
else:
status=False
if status:
return True
else:
return False
#################### Catalog related Functions BEGINS Here ###########################
def setup_gsm_calog(self):
"""
This function setup the GSM catalog.
"""
self.ocommon.log_info_message("Inside setup_gsm_calog()",self.file_name)
status=False
reg_exp= self.catalog_regex()
counter=1
end_counter=60
catalog_db_status=None
while counter < end_counter:
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
catalog_db,catalog_pdb,catalog_port,catalog_region,catalog_host,catalog_name,catalog_chunks=self.process_clog_vars(key)
catalog_db_status=self.check_setup_status(catalog_host,catalog_db,catalog_pdb,catalog_port)
if catalog_db_status == 'completed':
self.configure_gsm_clog(catalog_host,catalog_db,catalog_pdb,catalog_port,catalog_name,catalog_region,catalog_chunks)
break
else:
msg='''Catalog Status must return completed but returned value is {0}'''.format(status)
self.ocommon.log_info_message(msg,self.file_name)
if catalog_db_status == 'completed':
break
else:
msg='''Catalog setup is still not completed in GSM. Sleeping for 60 seconds and sleeping count is {0}'''.format(counter)
self.ocommon.log_info_message(msg,self.file_name)
time.sleep(60)
counter=counter+1
def process_clog_vars(self,key):
"""
This function process catalog vars based on key and return values to configure the GSM
"""
catalog_db=None
catalog_pdb=None
catalog_port=None
catalog_region=None
catalog_host=None
catalog_name=None
catalog_chunks=None
self.ocommon.log_info_message("Inside process_clog_vars()",self.file_name)
cvar_str=self.ora_env_dict[key]
cvar_dict=dict(item.split("=") for item in cvar_str.split(";"))
for ckey in cvar_dict.keys():
if ckey == 'catalog_db':
catalog_db = cvar_dict[ckey]
if ckey == 'catalog_pdb':
catalog_pdb = cvar_dict[ckey]
if ckey == 'catalog_port':
catalog_port = cvar_dict[ckey]
if ckey == 'catalog_region':
catalog_region = cvar_dict[ckey]
if ckey == 'catalog_host':
catalog_host = cvar_dict[ckey]
if ckey == 'catalog_name':
catalog_name = cvar_dict[ckey]
if ckey == 'catalog_chunks':
catalog_chunks = cvar_dict[ckey]
## Set the values if not set in above block
if not catalog_port:
catalog_port=1521
if not catalog_region:
catalog_region="region1,region2"
### Check values must be set
if catalog_host and catalog_db and catalog_pdb and catalog_port and catalog_region and catalog_name:
return catalog_db,catalog_pdb,catalog_port,catalog_region,catalog_host,catalog_name,catalog_chunks
else:
msg1='''catalog_db={0},catalog_pdb={1}'''.format((catalog_db or "Missing Value"),(catalog_pdb or "Missing Value"))
msg2='''catalog_port={0},catalog_host={1}'''.format((catalog_port or "Missing Value"),(catalog_host or "Missing Value"))
msg3='''catalog_region={0},catalog_name={1}'''.format((catalog_region or "Missing Value"),(catalog_name or "Missing Value"))
msg='''Catalog params {0} is not set correctly. One or more value is missing {1} {2} {3}'''.format(key,msg1,msg2,msg3)
self.ocommon.log_info_message(msg,self.file_name)
self.ocommon.prog_exit("127")
def check_gsm_catalog(self):
"""
This function check the catalog status in GSM
"""
self.ocommon.log_info_message("Inside check_gsm_catalog()",self.file_name)
#dtrname,dtrport,dtregion=self.process_director_vars()
gsmcmd='''
config;
exit;
'''.format("test")
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
matched_output=re.findall("(?:GSMs\n)(?:.+\n)+",output)
try:
match=self.ocommon.check_substr_match(matched_output[0],"test")
except:
match=False
return(self.ocommon.check_status_value(match))
# output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
# new_output=output[0].replace(" ","")
# self.ocommon.log_info_message(new_output,self.file_name)
# match=self.ocommon.check_substr_match(new_output,"Catalogconnectionisestablished")
# return(self.ocommon.check_status_value(match))
def catalog_regex(self):
"""
This function return the rgex to search the CATALOG PARAMS
"""
self.ocommon.log_info_message("Inside catalog_regex()",self.file_name)
return re.compile('CATALOG_PARAMS')
def configure_gsm_clog(self,chost,ccdb,cpdb,cport,catalog_name,catalog_region,catalog_chunks):
"""
This function configure the GSM catalog.
"""
self.ocommon.log_info_message("Inside configure_gsm_clog()",self.file_name)
gsmhost=self.ora_env_dict["ORACLE_HOSTNAME"]
cadmin=self.ora_env_dict["SHARD_ADMIN_USER"]
if catalog_chunks:
chunks="-chunks {0}".format(catalog_chunks)
else:
chunks=""
cpasswd="<PASSWORD>"
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
gsmlogin='''{0}/bin/gdsctl'''.format(self.ora_env_dict["ORACLE_HOME"])
gsmcmd='''
create shardcatalog -database \"(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST={0})(PORT={1}))(CONNECT_DATA=(SERVICE_NAME={2})))\" {7} -user {3}/{4} -sdb {5} -region {6} -agent_port 8080 -agent_password {4} -autovncr off;
add invitednode {0};
exit;
'''.format(chost,cport,cpdb,cadmin,cpasswd,catalog_name,catalog_region,chunks)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
### Unsetting the encrypt value to None
self.ocommon.unset_mask_str()
######################################## GSM director Functions Begins Here #####################
def process_director_vars(self,key):
"""
This function process GSM director vars based on key and return values to configure the GSM
"""
dtrname=None
dtrport=None
dtregion=None
self.ocommon.log_info_message("Inside process_director_vars()",self.file_name)
cvar_str=self.ora_env_dict[key]
cvar_dict=dict(item.split("=") for item in cvar_str.split(";"))
for ckey in cvar_dict.keys():
if ckey == 'director_name':
dtrname = cvar_dict[ckey]
if ckey == 'director_port':
dtrport = cvar_dict[ckey]
if ckey == 'director_region':
dtregion = cvar_dict[ckey]
### Check values must be set
if dtrname and dtrport and dtregion:
return dtrname,dtrport,dtregion
else:
msg1='''director_name={0},director_port={1}'''.format((director_name or "Missing Value"),(director_port or "Missing Value"))
msg2='''director_region={0}'''.format((director_region or "Missing Value"))
msg='''Director params {0} is not set correctly. One or more value is missing {1} {2}'''.format(SHARD_DIRECTOR_PARAMS,msg1,msg2)
self.ocommon.log_error_message(msg,self.file_name)
self.ocommon.prog_exit("Error occurred")
def check_gsm_director(self,dname):
"""
This function check the GSM director status
"""
self.ocommon.log_info_message("Inside check_gsm_director()",self.file_name)
status=False
if dname:
gsmcmd=self.get_gsm_config_cmd(dname)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
matched_output=re.findall("(?:GSMs\n)(?:.+\n)+",output)
try:
if self.ocommon.check_substr_match(matched_output[0],dname):
status=True
except:
status=False
else:
reg_exp= self.director_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
dname,dtrport,dtregion=self.process_director_vars(key)
gsmcmd=self.get_gsm_config_cmd(dname)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
matched_output=re.findall("(?:GSMs\n)(?:.+\n)+",output)
try:
if self.ocommon.check_substr_match(matched_output[0],dname):
status=True
except:
status=False
return(self.ocommon.check_status_value(status))
def add_gsm_director(self):
"""
This function add the GSM
"""
status=False
counter=1
end_counter=60
gsmhost=self.ora_env_dict["ORACLE_HOSTNAME"]
cadmin=self.ora_env_dict["SHARD_ADMIN_USER"]
cpasswd="<PASSWORD>"
reg_exp= self.director_regex()
while counter < end_counter:
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
shard_director_status=None
dtrname,dtrport,dtregion=self.process_director_vars(key)
shard_director_status=self.check_gsm_director(dtrname)
if shard_director_status != 'completed':
self.configure_gsm_director(dtrname,dtrport,dtregion,gsmhost,cadmin)
status = self.check_gsm_director(None)
if status == 'completed':
break
if status == 'completed':
break
else:
msg='''GSM shard director setup is still not completed in GSM. Sleeping for 60 seconds and sleeping count is {0}'''.format(counter)
self.ocommon.log_info_message(msg,self.file_name)
time.sleep(60)
counter=counter+1
status = self.check_gsm_director(None)
if status == 'completed':
msg='''Shard director setup completed in GSM'''
self.ocommon.log_info_message(msg,self.file_name)
else:
msg='''Waited 60 minute to complete shard director in GSM but setup did not complete or failed. Exiting...'''
self.ocommon.log_error_message(msg,self.file_name)
self.ocommon.prog_exit("127")
def setup_gsm_director(self):
"""
This function setup in GSM
"""
self.ocommon.log_info_message("Inside setup_gsm_director()",self.file_name)
status=False
reg_exp= self.director_regex()
counter=1
end_counter=3
gsmhost=self.ora_env_dict["ORACLE_HOSTNAME"]
cadmin=self.ora_env_dict["SHARD_ADMIN_USER"]
cpasswd="<PASSWORD>"
while counter < end_counter:
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
shard_director_status=None
dtrname,dtrport,dtregion=self.process_director_vars(key)
shard_director_status=self.check_gsm_director(dtrname)
if shard_director_status != 'completed':
self.configure_gsm_director(dtrname,dtrport,dtregion,gsmhost,cadmin)
status = self.check_gsm_director(None)
if status == 'completed':
break
else:
msg='''GSM shard director setup is still not completed in GSM. Sleeping for 60 seconds and sleeping count is {0}'''.format(counter)
time.sleep(60)
counter=counter+1
status = self.check_gsm_director(None)
if status == 'completed':
msg='''Shard director setup completed in GSM'''
self.ocommon.log_info_message(msg,self.file_name)
else:
msg='''Waited 3 minute to complete shard director in GSM but setup did not complete or failed. Exiting...'''
self.ocommon.log_error_message(msg,self.file_name)
self.ocommon.prog_exit("127")
def configure_gsm_director(self,dtrname,dtrport,dtregion,gsmhost,cadmin):
"""
This function configure GSM director
"""
## Getting the values of catalog_port,catalog_pdb,catalog_host
cpasswd="<PASSWORD>"
reg_exp= self.catalog_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
catalog_db,catalog_pdb,catalog_port,catalog_region,catalog_host,catalog_name,catalog_chunks=self.process_clog_vars(key)
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
gsmcmd='''
add gsm -gsm {0} -listener {1} -pwd {2} -catalog {3}:{4}/{5} -region {6};
exit;
'''.format(dtrname,dtrport,cpasswd,catalog_host,catalog_port,catalog_pdb,dtregion,gsmhost)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
### Unsetting the encrypt value to None
self.ocommon.unset_mask_str()
def start_gsm_director(self):
"""
This function start the director in the GSM
"""
status='noval'
self.ocommon.log_info_message("Inside start_gsm_director() function",self.file_name)
reg_exp= self.director_regex()
counter=1
end_counter=10
while counter < end_counter:
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
dtrname,dtrport,dtregion=self.process_director_vars(key)
gsmcmd='''
start gsm -gsm {0};
exit;
'''.format(dtrname)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
status=self.check_gsm_director(dtrname)
if status == 'completed':
break;
if status == 'completed':
break
else:
msg='''GSM shard director failed to start.Sleeping for 60 seconds and sleeping count is {0}'''.format(counter)
self.ocommon.log_error_message(msg,self.file_name)
time.sleep(30)
counter=counter+1
if status != 'completed':
msg='''GSM shard director failed to start.Exiting!'''
self.ocommon.log_error_message(msg,self.file_name)
self.ocommon.prog_exit("127")
def stop_gsm_director(self):
"""
This function stop the director in the GSM
"""
status=False
self.ocommon.log_info_message("Inside stop_gsm_director() function",self.file_name)
reg_exp= self.director_regex()
counter=1
end_counter=2
while counter < end_counter:
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
dtrname,dtrport,dtregion=self.process_director_vars(key)
gsmcmd='''
stop gsm -gsm {0};
exit;
'''.format(dtrname)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
counter=counter+1
def status_gsm_director(self):
"""
This function check the GSM director status
"""
gsm_status = self.check_gsm_director(None)
#catalog_status = self.check_gsm_catalog()
if gsm_status == 'completed':
msg='''Director setup completed in GSM and catalog is connected'''
self.ocommon.log_info_message(msg,self.file_name)
else:
msg='''Shard director in GSM did not complete or not connected to catalog. Exiting...'''
self.ocommon.log_error_message(msg,self.file_name)
self.ocommon.prog_exit("127")
######################################## Shard Group Setup Begins Here ############################
def setup_gsm_shardg(self,restype):
"""
This function setup the shard group.
"""
self.ocommon.log_info_message("Inside setup_gsm_shardg()",self.file_name)
status=False
if restype == 'ADD_SGROUP_PARAMS':
reg_exp = self.add_shardg_regex()
elif restype == 'SHARD_GROUP':
reg_exp = self.shardg_regex()
else:
self.ocommon.log_error_message("No Key Specified! You can only pass ADD_SGROUP_PARAMS or SHARD_GROUP key to create a shard group",self.file_name)
self.ocommon.prog_exit("127")
counter=1
end_counter=3
while counter < end_counter:
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
shard_group_status=None
group_name,deploy_as,group_region=self.process_shardg_vars(key)
dtrname=self.get_director_name(group_region)
shard_group_status=self.check_shardg_status(group_name,dtrname)
if shard_group_status != 'completed':
self.configure_gsm_shardg(group_name,deploy_as,group_region)
status = self.check_shardg_status(None,None)
if status == 'completed':
break
else:
msg='''GSM shard group setup is still not completed in GSM. Sleeping for 60 seconds and sleeping count is {0}'''.format(counter)
time.sleep(60)
counter=counter+1
status = self.check_shardg_status(None,None)
if status == 'completed':
msg='''Shard group setup completed in GSM'''
self.ocommon.log_info_message(msg,self.file_name)
else:
msg='''Waited 2 minute to complete catalog setup in GSM but setup did not complete or failed. Exiting...'''
self.ocommon.log_error_message(msg,self.file_name)
self.ocommon.prog_exit("127")
def get_director_name(self,region_name):
"""
This function get the director name based on the region
"""
self.ocommon.log_info_message("Inside get_director_name()",self.file_name)
status=False
director_name=None
reg_exp= self.director_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
dtrname,dtrport,dtregion=self.process_director_vars(key)
director_name=dtrname
gsm_status = self.check_gsm_director(dtrname)
if gsm_status == 'completed':
status = True
else:
status = False
if dtregion == region_name:
break
if status:
if director_name:
return director_name
else:
self.ocommon.log_error_message("No director exist to match the region",self.file_name)
self.ocommon.prog_exit("127")
else:
self.ocommon.log_error_message("Shard Director is not running!",self.file_name)
self.ocommon.prog_exit("127")
def get_shardg_region_name(self,sgname):
"""
This function get the region name based on shard group name
"""
self.ocommon.log_info_message("Inside get_region_name()",self.file_name)
status=False
region_name=None
reg_exp= self.shardg_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
group_name,deploy_as,group_region=self.process_shardg_vars(key)
region_name=group_region
if sgname == group_name:
status=True
break
if status:
return region_name
else:
self.ocommon.log_error_message("No such shard group exist! exiting!",self.file_name)
self.ocommon.prog_exit("127")
def process_shardg_vars(self,key):
"""
This function process shardG vars based on key and return values to configure the GSM
"""
group_name=None
deploy_as=None
group_region=None
self.ocommon.log_info_message("Inside process_shardg_vars()",self.file_name)
cvar_str=self.ora_env_dict[key]
cvar_dict=dict(item.split("=") for item in cvar_str.split(";"))
for ckey in cvar_dict.keys():
if ckey == 'group_name':
group_name = cvar_dict[ckey]
if ckey == 'deploy_as':
deploy_as = cvar_dict[ckey]
if ckey == 'group_region':
group_region = cvar_dict[ckey]
### Check values must be set
if group_name and deploy_as and group_region:
return group_name,deploy_as,group_region
else:
msg1='''group_name={0},deploy_as={1}'''.format((group_name or "Missing Value"),(deploy_as or "Missing Value"))
msg2='''group_region={0}'''.format((group_region or "Missing Value"))
msg='''Shard group params {0} is not set correctly. One or more value is missing {1} {2}'''.format(key,msg1,msg2)
self.ocommon.log_error_message(msg,self.file_name)
self.ocommon.prog_exit("Error occurred")
def check_shardg_status(self,group_name,dname):
"""
This function check the shard status in GSM
"""
self.ocommon.log_info_message("Inside check_shardg_status()",self.file_name)
status=False
if dname:
gsmcmd=self.get_gsm_config_cmd(dname)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
matched_output=re.findall("(?:Shard Groups\n)(?:.+\n)+",output)
if self.ocommon.check_substr_match(matched_output[0],group_name):
status=True
else:
status=False
else:
reg_exp= self.shardg_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
group_name,deploy_as,group_region=self.process_shardg_vars(key)
dname=self.get_director_name(group_region)
gsmcmd=self.get_gsm_config_cmd(dname)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
matched_output=re.findall("(?:Shard Groups\n)(?:.+\n)+",output)
# match=re.search("(?i)(?m)"+group_name,matched_output)
if self.ocommon.check_substr_match(matched_output[0],group_name):
status=True
else:
status=False
return(self.ocommon.check_status_value(status))
def get_gsm_config_cmd(self,dname):
"""
Get the GSM config command
"""
self.ocommon.log_info_message("Inside get_gsm_config_cmd()",self.file_name)
gsmcmd='''
config;
exit;
'''.format("test")
return gsmcmd
def director_regex(self):
"""
This function return the rgex to search the SHARD DIRECTOR PARAMS
"""
self.ocommon.log_info_message("Inside director_regex()",self.file_name)
return re.compile('SHARD_DIRECTOR_PARAMS')
def shardg_regex(self):
"""
This function return the rgex to search the SHARD GROUP PARAMS
"""
self.ocommon.log_info_message("Inside shardg_regex()",self.file_name)
return re.compile('SHARD[0-9]+_GROUP_PARAMS')
def add_shardg_regex(self):
"""
This function return the rgex to search the SHARD GROUP PARAMS
"""
self.ocommon.log_info_message("Inside shardg_regex()",self.file_name)
return re.compile('ADD_SGROUP_PARAMS')
def configure_gsm_shardg(self,group_name,deploy_as,group_region):
"""
This function configure the Shard Group.
"""
self.ocommon.log_info_message("Inside configure_gsm_shardg()",self.file_name)
gsmhost=self.ora_env_dict["ORACLE_HOSTNAME"]
cadmin=self.ora_env_dict["SHARD_ADMIN_USER"]
cpasswd="<PASSWORD>"
dtrname=self.get_director_name(group_region)
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
gsmlogin='''{0}/bin/gdsctl'''.format(self.ora_env_dict["ORACLE_HOME"])
gsmcmd='''
connect {1}/{2};
add shardgroup -shardgroup {3} -deploy_as {4} -region {5}
exit;
'''.format("NA",cadmin,cpasswd,group_name,deploy_as,group_region)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
### Unsetting the encrypt value to None
self.ocommon.unset_mask_str()
#########################################Shard Function Begins Here ##############################
def setup_gsm_shard(self):
"""
This function setup and add shard in the GSM
"""
self.ocommon.log_info_message("Inside setup_gsm_shard()",self.file_name)
status=False
reg_exp= self.shard_regex()
counter=1
end_counter=60
while counter < end_counter:
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
shard_db_status=None
shard_db,shard_pdb,shard_port,shard_group,shard_host=self.process_shard_vars(key)
shard_db_status=self.check_setup_status(shard_host,shard_db,shard_pdb,shard_port)
if shard_db_status == 'completed':
self.configure_gsm_shard(shard_host,shard_db,shard_pdb,shard_port,shard_group)
else:
msg='''Shard db status must return completed but returned value is {0}'''.format(status)
self.ocommon.log_info_message(msg,self.file_name)
status = self.check_shard_status(None)
if status == 'completed':
break
else:
msg='''Shard DB setup is still not completed in GSM. Sleeping for 60 seconds and sleeping count is {0}'''.format(counter)
self.ocommon.log_info_message(msg,self.file_name)
time.sleep(60)
counter=counter+1
status = self.check_shard_status(None)
if status == 'completed':
msg='''Shard DB setup completed in GSM'''
self.ocommon.log_info_message(msg,self.file_name)
else:
msg='''Waited 60 minute to complete shard db setup in GSM but setup did not complete or failed. Exiting...'''
self.ocommon.log_error_message(msg,self.file_name)
self.ocommon.prog_exit("127")
def add_gsm_shard(self):
"""
This function add the shard in the GSM
"""
self.ocommon.log_info_message("Inside add_gsm_shard()",self.file_name)
status=False
reg_exp= self.add_shard_regex()
counter=1
end_counter=3
shard_name="none"
while counter < end_counter:
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
shard_db_status=None
shard_db,shard_pdb,shard_port,shard_group,shard_host=self.process_shard_vars(key)
shard_name='''{0}_{1}'''.format(shard_db,shard_pdb)
shard_db_status=self.check_setup_status(shard_host,shard_db,shard_pdb,shard_port)
self.ocommon.log_info_message("Shard Status : " + shard_db_status,self.file_name)
if shard_db_status == 'completed':
self.configure_gsm_shard(shard_host,shard_db,shard_pdb,shard_port,shard_group)
else:
msg='''Shard db status must return completed but returned value is {0}'''.format(status)
self.ocommon.log_info_message(msg,self.file_name)
status = self.check_shard_status(None)
if status == 'completed':
break
else:
msg='''Shard DB setup is still not completed in GSM. Sleeping for 60 seconds and sleeping count is {0}'''.format(counter)
self.ocommon.log_info_message(msg,self.file_name)
time.sleep(60)
counter=counter+1
status = self.check_shard_status(shard_name)
if status == 'completed':
msg='''Shard DB setup completed in GSM'''
self.ocommon.log_info_message(msg,self.file_name)
else:
msg='''Waited 3 minute to complete shard db setup in GSM but setup did not complete or failed. Exiting...'''
self.ocommon.log_error_message(msg,self.file_name)
self.ocommon.prog_exit("127")
def remove_gsm_shard(self):
"""
This function remove the shard in the GSM
"""
self.ocommon.log_info_message("Inside remove_gsm_shard()",self.file_name)
status=False
reg_exp= self.remove_shard_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
shard_db_status=None
shard_db,shard_pdb,shard_port,shard_group,shard_host=self.process_shard_vars(key)
shard_db_status=self.check_setup_status(shard_host,shard_db,shard_pdb,shard_port)
if shard_db_status == 'completed':
self.delete_gsm_shard(shard_host,shard_db,shard_pdb,shard_port,shard_group)
else:
msg='''Shard db status must return completed but returned value is {0}'''.format(status)
self.ocommon.log_info_message(msg,self.file_name)
def move_shard_chunks(self):
"""
This function move the shard chunks
"""
self.ocommon.log_info_message("Inside move_shard_chunks()",self.file_name)
status=False
reg_exp= self.move_chunks_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
gsmhost=self.ora_env_dict["ORACLE_HOSTNAME"]
cadmin=self.ora_env_dict["SHARD_ADMIN_USER"]
cpasswd="<PASSWORD>"
gsmlogin='''{0}/bin/gdsctl'''.format(self.ora_env_dict["ORACLE_HOME"])
move_chunks_status=None
shard_db,shard_pdb=self.process_chunks_vars(key)
shard_name = '''{0}_{1}'''.format(shard_db,shard_pdb)
shard_num = self.count_online_shards()
online_shard = self.check_online_shard(shard_name)
if shard_num > 1 and online_shard == 0 :
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
gsmcmd='''
connect {1}/{2};
MOVE CHUNK -CHUNK ALL -SOURCE {0}
config shard;
exit;
'''.format(shard_name,cadmin,cpasswd)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
### Unsetting the encrypt value to None
self.ocommon.unset_mask_str()
def validate_nochunks(self):
"""
This function check the chnunks
"""
self.ocommon.log_info_message("Inside validate_nochunks()",self.file_name)
status=False
reg_exp= self.move_nochunks_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
gsmhost=self.ora_env_dict["ORACLE_HOSTNAME"]
cadmin=self.ora_env_dict["SHARD_ADMIN_USER"]
cpasswd="<PASSWORD>"
gsmlogin='''{0}/bin/gdsctl'''.format(self.ora_env_dict["ORACLE_HOME"])
move_chunks_status=None
shard_db,shard_pdb=self.process_chunks_vars(key)
shard_name = '''{0}_{1}'''.format(shard_db,shard_pdb)
shard_num = self.count_online_shards()
online_shard = self.check_online_shard(shard_name)
if shard_num > 1 and online_shard == 0 :
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
gsmcmd='''
connect {1}/{2};
config chunks -shard {0}
exit;
'''.format(shard_name,cadmin,cpasswd)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
### Unsetting the encrypt value to None
self.ocommon.unset_mask_str()
matched_output=re.findall("(?:Chunks\n)(?:.+\n)+",output)
if self.ocommon.check_substr_match(matched_output[0].lower(),shard_name.lower()):
self.ocommon.prog_exit("127")
def move_chunks_regex(self):
"""
This function return the rgex to search the SHARD PARAMS
"""
self.ocommon.log_info_message("Inside move_chnuks_regex()",self.file_name)
return re.compile('MOVE_CHUNKS')
def move_nochunks_regex(self):
"""
This function return the rgex to search the SHARD PARAMS
"""
self.ocommon.log_info_message("Inside move_nochunks_regex()",self.file_name)
return re.compile('VALIDATE_NOCHUNKS')
def check_shard_chunks(self):
"""
This function check the shard chunks
"""
self.ocommon.log_info_message("Inside check_shard_chunks()",self.file_name)
status=False
reg_exp= self.check_chunks_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
gsmhost=self.ora_env_dict["ORACLE_HOSTNAME"]
cadmin=self.ora_env_dict["SHARD_ADMIN_USER"]
cpasswd="<PASSWORD>"
gsmlogin='''{0}/bin/gdsctl'''.format(self.ora_env_dict["ORACLE_HOME"])
move_chunks_status=None
shard_db,shard_pdb=self.process_chunks_vars(key)
shard_name = '''{0}_{1}'''.format(shard_db,shard_pdb)
online_shard = self.check_online_shard(shard_name)
if online_shard == 0 :
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
gsmcmd='''
connect {1}/{2};
config chunks -shard {0}
config shard;
exit;
'''.format(shard_name,cadmin,cpasswd)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
### Unsetting the encrypt value to None
self.ocommon.unset_mask_str()
def check_chunks_regex(self):
"""
This function return the rgex to search the chunks
"""
self.ocommon.log_info_message("Inside check_chunks_regex()",self.file_name)
return re.compile('CHECK_CHUNKS')
def cancel_move_chunks(self):
"""
This function cancel the shard Chunks
"""
self.ocommon.log_info_message("Inside check_shard_chunks()",self.file_name)
status=False
reg_exp= self.cancel_chunks_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
gsmhost=self.ora_env_dict["ORACLE_HOSTNAME"]
cadmin=self.ora_env_dict["SHARD_ADMIN_USER"]
cpasswd="<PASSWORD>"
gsmlogin='''{0}/bin/gdsctl'''.format(self.ora_env_dict["ORACLE_HOME"])
move_chunks_status=None
shard_db,shard_pdb=self.process_chunks_vars(key)
shard_name = '''{0}_{1}'''.format(shard_db,shard_pdb)
online_shard = self.check_online_shard(shard_name)
if online_shard == 1:
self.ocommon.log_info_message("Shard is not online. Performing chunk cancellation in GSM to set the shard chunk status.",self.file_name)
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
gsmcmd='''
connect {1}/{2};
ALTER MOVE -cancel -SHARD {0}
config shard;
exit;
'''.format(shard_name,cadmin,cpasswd)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
### Unsetting the encrypt value to None
self.ocommon.unset_mask_str()
else:
self.ocommon.log_info_message("Shard " + shard_name + " is online. Unable to perform chunk cancellation.",self.file_name)
def cancel_chunks_regex(self):
"""
This function return the cancel chunk movement
"""
self.ocommon.log_info_message("Inside cancel_chunks_regex()",self.file_name)
return re.compile('CANCEL_CHUNKS')
def verify_online_shard(self):
"""
This function verify online shard
"""
self.ocommon.log_info_message("Inside verify_online_shard()",self.file_name)
status=False
reg_exp= self.online_shard_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
shard_db,shard_pdb=self.process_chunks_vars(key)
shard_name = '''{0}_{1}'''.format(shard_db,shard_pdb)
online_shard = self.check_online_shard(shard_name)
if online_shard == 0:
msg='''Shard {0} is online.'''.format(shard_name)
self.ocommon.log_info_message(msg,self.file_name)
else:
msg='''Shard {0} is not online.'''.format(shard_name)
self.ocommon.log_info_message(msg,self.file_name)
self.ocommon.prog_exit("157")
def online_shard_regex(self):
"""
This function return the rgex to search the ONLINE Shards
"""
self.ocommon.log_info_message("Inside online_shard_regex()",self.file_name)
return re.compile('CHECK_ONLINE_SHARD')
def check_online_shard(self,shard_name):
"""
This function check the online shard
"""
self.ocommon.log_info_message("Inside check_online_shard()",self.file_name)
name_flag = False
availability_flag = False
state_flag = False
status_flag = False
gsmhost=self.ora_env_dict["ORACLE_HOSTNAME"]
cadmin=self.ora_env_dict["SHARD_ADMIN_USER"]
cpasswd="<PASSWORD>"
gsmlogin='''{0}/bin/gdsctl'''.format(self.ora_env_dict["ORACLE_HOME"])
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
gsmcmd='''
connect {1}/{2};
config shard -shard {0};
exit;
'''.format(shard_name,cadmin,cpasswd)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
### Unsetting the encrypt value to None
self.ocommon.unset_mask_str()
lines = output.split("\n")
for line in lines:
list1 = line.split(":")
if list1[0].strip() == 'Name' and list1[1].strip().lower() == shard_name.lower():
name_flag = True
if list1[0].strip().lower() == 'Availability'.lower() and list1[1].strip().lower() == 'ONLINE'.lower():
availability_flag = True
if list1[0].strip().lower() == 'STATUS'.lower() and list1[1].strip().lower() == 'OK'.lower():
status_flag = True
if list1[0].strip().lower() == 'STATE'.lower() and list1[1].strip().lower() == 'DEPLOYED'.lower():
state_flag = True
del list1[:]
if name_flag and availability_flag and state_flag and status_flag:
return 0
else:
return 1
def verify_gsm_shard(self):
"""
This function verify GSM shard
"""
self.ocommon.log_info_message("Inside verify_gsm_shard()",self.file_name)
status=False
reg_exp= self.check_shard_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
shard_db,shard_pdb=self.process_chunks_vars(key)
shard_name = '''{0}_{1}'''.format(shard_db,shard_pdb)
gsm_shard = self.check_gsm_shard(shard_name)
if gsm_shard == 0:
msg='''Shard {0} is present in GSM.'''.format(shard_name)
self.ocommon.log_info_message(msg,self.file_name)
else:
msg='''Shard {0} is not present in GSM.'''.format(shard_name)
self.ocommon.log_info_message(msg,self.file_name)
self.ocommon.prog_exit("157")
def check_shard_regex(self):
"""
This function return the rgex to search the Shards in GSM
"""
self.ocommon.log_info_message("Inside online_shard_regex()",self.file_name)
return re.compile('CHECK_GSM_SHARD')
def check_gsm_shard(self,shard_name):
"""
This function check the shard in gsm
"""
self.ocommon.log_info_message("Inside check_gsm_shard()",self.file_name)
name_flag = False
gsmhost=self.ora_env_dict["ORACLE_HOSTNAME"]
cadmin=self.ora_env_dict["SHARD_ADMIN_USER"]
cpasswd="<PASSWORD>"
gsmlogin='''{0}/bin/gdsctl'''.format(self.ora_env_dict["ORACLE_HOME"])
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
gsmcmd='''
connect {1}/{2};
config shard -shard {0};
exit;
'''.format(shard_name,cadmin,cpasswd)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
### Unsetting the encrypt value to None
self.ocommon.unset_mask_str()
lines = output.split("\n")
for line in lines:
list1 = line.split(":")
if list1[0].strip() == 'Name' and list1[1].strip().lower() == shard_name.lower():
name_flag = True
del list1[:]
if name_flag:
return 0
else:
return 1
def count_online_shards(self):
"""
This function return the returns the count of online shard
"""
self.ocommon.log_info_message("Inside count_online_shards()",self.file_name)
gsmhost=self.ora_env_dict["ORACLE_HOSTNAME"]
cadmin=self.ora_env_dict["SHARD_ADMIN_USER"]
cpasswd="<PASSWORD>"
gsmlogin='''{0}/bin/gdsctl'''.format(self.ora_env_dict["ORACLE_HOME"])
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
gsmcmd='''
connect {0}/{1};
config shard;
exit;
'''.format(cadmin,cpasswd)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
### Unsetting the encrypt value to None
self.ocommon.unset_mask_str()
online_shard = 0
lines = output.split("\n")
for line in lines:
if re.search('ok', line, re.IGNORECASE):
if re.search('deployed', line, re.IGNORECASE):
if re.search('online', line, re.IGNORECASE):
online_shard = online_shard + 1
return online_shard
def validate_gsm_shard(self):
"""
This function validate the shard in the GSM
"""
self.ocommon.log_info_message("Inside validate_gsm_shard()",self.file_name)
status=False
reg_exp= self.validate_shard_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
shard_db,shard_pdb,shard_port,shard_group,shard_host=self.process_shard_vars(key)
shard_name='''{0}_{1}'''.format(shard_db,shard_pdb)
status = self.check_shard_status(shard_name)
if status == 'completed':
msg='''Shard DB setup completed in GSM'''
self.ocommon.log_info_message(msg,self.file_name)
else:
msg='''Shard {0} info does not exist in GSM.'''.format(shard_name)
self.ocommon.log_info_message(msg,self.file_name)
self.ocommon.prog_exit("157")
def process_shard_vars(self,key):
"""
This function process sgard vars based on key and return values to configure the GSM
"""
shard_db=None
shard_pdb=None
shard_port=None
shard_group=None
shard_host=None
self.ocommon.log_info_message("Inside process_shard_vars()",self.file_name)
# self.ocommon.log_info_message(key,self.file_name)
cvar_str=self.ora_env_dict[key]
cvar_str=cvar_str.replace('"', '')
# self.ocommon.log_info_message(cvar_str,self.file_name)
cvar_dict=dict(item.split("=") for item in cvar_str.split(";"))
for ckey in cvar_dict.keys():
# self.ocommon.log_info_message("key : " + ckey,self.file_name)
# self.ocommon.log_info_message("Value: " + cvar_dict[ckey],self.file_name)
if ckey == 'shard_db':
shard_db = cvar_dict[ckey]
if ckey == 'shard_pdb':
shard_pdb = cvar_dict[ckey]
if ckey == 'shard_port':
shard_port = cvar_dict[ckey]
if ckey == 'shard_group':
shard_group = cvar_dict[ckey]
if ckey == 'shard_host':
shard_host = cvar_dict[ckey]
# # self.ocommon.log_info_message("shard_host: " + shard_host, self.file_name)
## Set the values if not set in above block
if not shard_port:
shard_port=1521
### Check values must be set
if shard_host and shard_db and shard_pdb and shard_port and shard_group:
return shard_db,shard_pdb,shard_port,shard_group,shard_host
else:
msg1='''shard_db={0},shard_pdb={1}'''.format((shard_db or "Missing Value"),(shard_pdb or "Missing Value"))
msg2='''shard_port={0},shard_host={1}'''.format((shard_port or "Missing Value"),(shard_host or "Missing Value"))
msg3='''shard_group={0}'''.format((shard_group or "Missing Value"))
msg='''Shard DB params {0} is not set correctly. One or more value is missing {1} {2} {3}'''.format(key,msg1,msg2,msg3)
self.ocommon.log_info_message(msg,self.file_name)
self.ocommon.prog_exit("Error occurred")
def process_chunks_vars(self,key):
"""
This function process the chunks vars
"""
shard_db=None
shard_pdb=None
self.ocommon.log_info_message("Inside process_chunks_vars()",self.file_name)
# self.ocommon.log_info_message(key,self.file_name)
cvar_str=self.ora_env_dict[key]
cvar_str=cvar_str.replace('"', '')
# self.ocommon.log_info_message(cvar_str,self.file_name)
cvar_dict=dict(item.split("=") for item in cvar_str.split(";"))
for ckey in cvar_dict.keys():
# self.ocommon.log_info_message("key : " + ckey,self.file_name)
# self.ocommon.log_info_message("Value: " + cvar_dict[ckey],self.file_name)
if ckey == 'shard_db':
shard_db = cvar_dict[ckey]
if ckey == 'shard_pdb':
shard_pdb = cvar_dict[ckey]
# # self.ocommon.log_info_message("shard_host: " + shard_host, self.file_name)
## Set the values if not set in above block
### Check values must be set
if shard_pdb and shard_db:
return shard_db,shard_pdb
else:
msg1='''shard_db={0},shard_pdb={1}'''.format((shard_db or "Missing Value"),(shard_pdb or "Missing Value"))
self.ocommon.log_info_message(msg1,self.file_name)
self.ocommon.prog_exit("Error occurred")
def check_shard_status(self,shard_name):
"""
This function check the shard status in GSM
"""
self.ocommon.log_info_message("Inside check_shard_status()",self.file_name)
#gsmcmd=self.get_gsm_config_cmd(dname)
gsmcmd='''
config;
exit;
'''
counter=1
end_counter=3
status=False
while counter < end_counter:
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
error_check=re.findall("(?:GSM-45034\n)(?:.+\n)+",output)
try:
if self.ocommon.check_substr_match(error_check[0],"GSM-45034"):
count = counter + 1
self.ocommon.log_info_message("Issue in catalog connection, retrying to connect to catalog in 30 seconds!",self.file_name)
time.sleep(20)
status=False
continue
except:
status=False
matched_output=re.findall("(?:Databases\n)(?:.+\n)+",output)
if shard_name:
try:
if self.ocommon.check_substr_match(matched_output[0],shard_name.lower()):
status=True
break
else:
status=False
except:
status=False
else:
reg_exp= self.shard_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
shard_db,shard_pdb,shard_port,shard_region,shard_host=self.process_shard_vars(key)
shard_name='''{0}_{1}'''.format(shard_db,shard_pdb)
try:
if self.ocommon.check_substr_match(matched_output[0],shard_name.lower()):
status=True
else:
status=False
except:
status=False
if status:
break;
counter = counter + 1
return(self.ocommon.check_status_value(status))
def shard_regex(self):
"""
This function return the rgex to search the SHARD PARAMS
"""
self.ocommon.log_info_message("Inside shard_regex()",self.file_name)
return re.compile('SHARD[0-9]+_PARAMS')
def add_shard_regex(self):
"""
This function return the rgex to search the ADD_SHARD_PARAMS
"""
self.ocommon.log_info_message("Inside add_shard_regex()",self.file_name)
return re.compile('ADD_SHARD')
def remove_shard_regex(self):
"""
This function return the rgex to search the REMOVE_SHARD_PARAMS
"""
self.ocommon.log_info_message("Inside remove_shard_regex()",self.file_name)
return re.compile('REMOVE_SHARD')
def validate_shard_regex(self):
"""
This function return the rgex to search the VALIDATE_SHARD_PARAMS
"""
self.ocommon.log_info_message("Inside remove_shard_regex()",self.file_name)
return re.compile('VALIDATE_SHARD')
def configure_gsm_shard(self,shost,scdb,spdb,sdbport,sgroup):
"""
This function configure the shard db.
"""
spasswd="<PASSWORD>"
admuser= self.ora_env_dict["SHARD_ADMIN_USER"]
#dtrname,dtrport,dtregion=self.process_director_vars()
group_region=self.get_shardg_region_name(sgroup)
dtrname=self.get_director_name(group_region)
shard_name='''{0}_{1}'''.format(scdb,spdb)
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
gsmcmd='''
connect {1}/{2};
add cdb -connect {3}:{4}:{5} -pwd {2};
add shard -cdb {5} -connect "(DESCRIPTION = (ADDRESS = (PROTOCOL = tcp)(HOST = {3})(PORT = {4})) (CONNECT_DATA = (SERVICE_NAME = {6}) (SERVER = DEDICATED)))" -shardgroup {7} -pwd {2};
config vncr;
exit;
'''.format("NA",admuser,spasswd,shost,sdbport,scdb,spdb,sgroup,shard_name)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
### Unsetting the encrypt value to None
self.ocommon.unset_mask_str()
def delete_gsm_shard(self,shost,scdb,spdb,sdbport,sgroup):
"""
This function delete the shard db.
"""
spasswd="<PASSWORD>"
admuser= self.ora_env_dict["SHARD_ADMIN_USER"]
#dtrname,dtrport,dtregion=self.process_director_vars()
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
shard_name='''{0}_{1}'''.format(scdb,spdb)
group_region=self.get_shardg_region_name(sgroup)
dtrname=self.get_director_name(group_region)
gsmcmd='''
connect {1}/{2};
remove shard -shard {8};
remove cdb -cdb {5}
config vncr;
exit;
'''.format("NA",admuser,spasswd,shost,sdbport,scdb,spdb,sgroup,shard_name)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
### Unsetting the encrypt value to None
self.ocommon.unset_mask_str()
retcode1=self.perform_invited_nodeop(shost,"remove")
def set_hostid_null(self):
"""
This function set the hostid to Null
"""
spasswd="<PASSWORD>"
admuser= self.ora_env_dict["SHARD_ADMIN_USER"]
reg_exp= self.catalog_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
catalog_db,catalog_pdb,catalog_port,catalog_region,catalog_host,catalog_name,catalog_chunks=self.process_clog_vars(key)
sqlpluslogin='''{0}/bin/sqlplus "sys/HIDDEN_STRING@{1}:{2}/{3} as sysdba"'''.format(self.ora_env_dict["ORACLE_HOME"],catalog_host,catalog_port,catalog_pdb,admuser)
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
msg='''Setting host Id null in catalog as auto vncr is disabled'''
self.ocommon.log_info_message(msg,self.file_name)
sqlcmd='''
set echo on
set termout on
set time on
update gsmadmin_internal.database set hostid=NULL;
'''
output,error,retcode=self.ocommon.run_sqlplus(sqlpluslogin,sqlcmd,None)
self.ocommon.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name)
self.ocommon.check_sql_err(output,error,retcode,None)
self.ocommon.unset_mask_str()
def add_invited_node(self,op_str):
"""
This function add the invited in the GSM configuration
"""
self.ocommon.log_info_message("Inside add_invited_node()",self.file_name)
if op_str == "SHARD":
reg_exp = self.shard_regex()
else:
reg_exp = self.add_shard_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
shard_db,shard_pdb,shard_port,shard_group,shard_host=self.process_shard_vars(key)
group_region=self.get_shardg_region_name(shard_group)
dtrname=self.get_director_name(group_region)
retcode=self.perform_invited_nodeop(shard_host,"add")
def remove_invited_node(self,op_str):
"""
This function remove the invited in the GSM configuration
"""
self.ocommon.log_info_message("Inside remove_invited_node()",self.file_name)
if op_str == "SHARD":
reg_exp = self.shard_regex()
else:
reg_exp = self.add_shard_regex()
if self.ocommon.check_key("KUBE_SVC",self.ora_env_dict):
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
shard_db,shard_pdb,shard_port,shard_group,shard_host=self.process_shard_vars(key)
temp_host= shard_host.split('.',1)[0]
group_region=self.get_shardg_region_name(shard_group)
dtrname=self.get_director_name(group_region)
recode=self.perform_invited_nodeop(temp_host,"remove")
else:
self.ocommon.log_info_message("KUBE_SVC is not set. No need to remove invited node!",self.file_name)
def perform_invited_nodeop(self,shard_host,op_type):
"""
Perform Node addition and deletion
"""
gsmhost=self.ora_env_dict["ORACLE_HOSTNAME"]
cadmin=self.ora_env_dict["SHARD_ADMIN_USER"]
cpasswd="<PASSWORD>"
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
gsmcmd='''
connect {1}/{2};
{4} invitednode {3}
exit;
'''.format("NA",cadmin,cpasswd,shard_host,op_type)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
return retcode
def deploy_shard(self):
"""
This function deploy shard
"""
self.ocommon.log_info_message("Inside deploy_shard()",self.file_name)
gsmhost=self.ora_env_dict["ORACLE_HOSTNAME"]
cadmin=self.ora_env_dict["SHARD_ADMIN_USER"]
cpasswd="<PASSWORD>"
gsmlogin='''{0}/bin/gdsctl'''.format(self.ora_env_dict["ORACLE_HOME"])
#dtrname,dtrport,dtregion=self.process_director_vars()
#if op_str == "SHARD":
# reg_exp = self.shard_regex()
#else:
# reg_exp = self.add_shard_regex()
#for key in self.ora_env_dict.keys():
# if(reg_exp.match(key)):
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
gsmcmd='''
connect {1}/{2};
config shardspace;
config shardgroup;
config vncr;
deploy;
config shard;
exit;
'''.format("test",cadmin,cpasswd)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
### Unsetting the encrypt value to None
self.ocommon.unset_mask_str()
def check_setup_status(self,host,ccdb,svc,port):
"""
This function check the shard status.
"""
systemStr='''{0}/bin/sqlplus "system/HIDDEN_STRING@{1}:{2}/{3}"'''.format(self.ora_env_dict["ORACLE_HOME"],host,port,ccdb)
fname='''/tmp/{0}'''.format("shard_setup.txt")
self.ocommon.remove_file(fname)
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
msg='''Checking shardsetup table in CDB'''
self.ocommon.log_info_message(msg,self.file_name)
sqlcmd='''
set heading off
set feedback off
set term off
SET NEWPAGE NONE
spool {0}
select * from shardsetup WHERE ROWNUM = 1;
spool off
exit;
'''.format(fname)
output,error,retcode=self.ocommon.run_sqlplus(systemStr,sqlcmd,None)
self.ocommon.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name)
self.ocommon.check_sql_err(output,error,retcode,None)
if os.path.isfile(fname):
fdata=self.ocommon.read_file(fname)
else:
fdata='nosetup'
### Unsetting the encrypt value to None
self.ocommon.unset_mask_str()
if re.search('completed',fdata):
status = self.catalog_pdb_setup_check(host,ccdb,svc,port)
if status == 'completed':
return 'completed'
else:
return 'notcompleted'
else:
return 'notcompleted'
def catalog_pdb_setup_check(self,host,ccdb,svc,port):
"""
This function check the shard status.
"""
systemStr='''{0}/bin/sqlplus "pdbadmin/HIDDEN_STRING@{1}:{2}/{3}"'''.format(self.ora_env_dict["ORACLE_HOME"],host,port,svc)
fname='''/tmp/{0}'''.format("pdb_setup_check.txt")
self.ocommon.remove_file(fname)
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
msg='''Checking setup status in PDB'''
self.ocommon.log_info_message(msg,self.file_name)
sqlcmd='''
set heading off
set feedback off
set term off
SET NEWPAGE NONE
spool {0}
select count(*) from dual;
spool off
exit;
'''.format(fname)
output,error,retcode=self.ocommon.run_sqlplus(systemStr,sqlcmd,None)
self.ocommon.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name)
self.ocommon.check_sql_err(output,error,retcode,None)
if os.path.isfile(fname):
fdata=self.ocommon.read_file(fname)
else:
fdata='nosetup'
### Unsetting the encrypt value to None
self.ocommon.unset_mask_str()
if re.search('1',fdata):
return 'completed'
else:
return 'notcompleted'
############################# Setup GSM Service ###############################################
def setup_gsm_service(self):
"""
This function setup the shard service.
"""
self.ocommon.log_info_message("Inside setup_gsm_service()",self.file_name)
status=False
service_value="service_name=oltp_rw_svc;service_role=primary"
# self.ora_env_dict=self.ocommon.add_key("SERVICE1_PARAMS",service_value,self.ora_env_dict)
reg_exp= self.service_regex()
counter=1
end_counter=3
while counter < end_counter:
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
shard_service_status=None
service_name,service_role=self.process_service_vars(key)
shard_service_status=self.check_service_status(service_name)
if shard_service_status != 'completed':
self.configure_gsm_service(service_name,service_role)
status = self.check_service_status(None)
if status == 'completed':
break
else:
msg='''GSM service setup is still not completed in GSM. Sleeping for 60 seconds and sleeping count is {0}'''.format(counter)
time.sleep(60)
counter=counter+1
status = self.check_service_status(None)
if status == 'completed':
msg='''Shard service setup completed in GSM'''
self.ocommon.log_info_message(msg,self.file_name)
else:
msg='''Waited 2 minute to complete catalog setup in GSM but setup did not complete or failed. Exiting...'''
self.ocommon.log_error_message(msg,self.file_name)
self.ocommon.prog_exit("127")
def process_service_vars(self,key):
"""
This function process shardG vars based on key and return values to configure the GSM
"""
service_name=None
service_role=None
self.ocommon.log_info_message("Inside process_service_vars()",self.file_name)
cvar_str=self.ora_env_dict[key]
cvar_dict=dict(item.split("=") for item in cvar_str.split(";"))
for ckey in cvar_dict.keys():
if ckey == 'service_name':
service_name = cvar_dict[ckey]
if ckey == 'service_role':
service_role = cvar_dict[ckey]
### Check values must be set
if service_name and service_role:
return service_name,service_role
else:
msg1='''service_name={0},service_role={1}'''.format((service_name or "Missing Value"),(service_role or "Missing Value"))
msg='''Shard service params {0} is not set correctly. One or more value is missing {1} {2}'''.format(key,msg1)
self.ocommon.log_error_message(msg,self.file_name)
self.ocommon.prog_exit("Error occurred")
def check_service_status(self,service_name):
"""
This function check the shard status in GSM
"""
self.ocommon.log_info_message("Inside check_service_status()",self.file_name)
#dtrname,dtrport,dtregion=self.process_director_vars()
gsmcmd='''
config;
exit;
'''.format("test")
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
matched_output=re.findall("(?:Services\n)(?:.+\n)+",output)
status=False
if service_name:
try:
if self.ocommon.check_substr_match(matched_output[0],service_name):
status=True
else:
status=False
except:
status=False
else:
reg_exp= self.service_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
service_name,service_role=self.process_service_vars(key)
# match=re.search("(?i)(?m)"+service_name,matched_output)
try:
if self.ocommon.check_substr_match(matched_output[0],service_name):
status=True
else:
status=False
except:
status=False
return(self.ocommon.check_status_value(status))
def service_regex(self):
"""
This function return the rgex to search the SERVICE[0-9]_PARAMS
"""
self.ocommon.log_info_message("Inside service_regex()",self.file_name)
return re.compile('SERVICE[0-9]+_PARAMS')
def configure_gsm_service(self,service_name,service_role):
"""
This function configure the service creation.
"""
self.ocommon.log_info_message("Inside configure_gsm_service()",self.file_name)
gsmhost=self.ora_env_dict["ORACLE_HOSTNAME"]
cadmin=self.ora_env_dict["SHARD_ADMIN_USER"]
cpasswd="<PASSWORD>"
#dtrname,dtrport,dtregion=self.process_director_vars()
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
gsmlogin='''{0}/bin/gdsctl'''.format(self.ora_env_dict["ORACLE_HOME"])
gsmcmd='''
connect {1}/{2};
add service -service {3} -role {4};
start service -service {3};
exit;
'''.format("test",cadmin,cpasswd,service_name,service_role)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
### Unsetting the encrypt value to None
self.ocommon.unset_mask_str()
############################## GSM backup fIle function Begins Here #############################
def gsm_backup_file(self):
"""
This function check the gsm setup status
"""
self.ocommon.log_info_message("Inside gsm_backup_file()",self.file_name)
gsmdata_loc='/opt/oracle/gsmdata'
gsmfile_loc='''{0}/network/admin'''.format(self.ora_env_dict["ORACLE_HOME"])
if os.path.isdir(gsmdata_loc):
msg='''Directory {0} exit'''.format(gsmdata_loc)
self.ocommon.log_info_message(msg,self.file_name)
cmd='''cp -r -v {0}/* {1}/'''.format(gsmfile_loc,gsmdata_loc)
output,error,retcode=self.ocommon.execute_cmd(cmd,None,None)
self.ocommon.check_os_err(output,error,retcode,True)
############### Deploy Sample Function Begins Here ##########################
def setup_sample_schema(self):
"""
This function deploy the sample app
"""
s = "abcdefghijklmnopqrstuvwxyz01234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%^&*()?"
passlen = 8
passwd = "".join(random.sample(s,passlen ))
self.ocommon.log_info_message("Inside deploy_sample_schema()",self.file_name)
reg_exp= self.catalog_regex()
for key in self.ora_env_dict.keys():
if(reg_exp.match(key)):
catalog_db,catalog_pdb,catalog_port,catalog_region,catalog_host,catalog_name,catalog_chunks=self.process_clog_vars(key)
sqlpluslogin='''{0}/bin/sqlplus "sys/HIDDEN_STRING@{1}:{2}/{3} as sysdba"'''.format(self.ora_env_dict["ORACLE_HOME"],catalog_host,catalog_port,catalog_db)
if self.ocommon.check_key("SAMPLE_SCHEMA",self.ora_env_dict):
if self.ora_env_dict["SAMPLE_SCHEMA"] == 'DEPLOY':
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
msg='''Deploying sample schema'''
self.ocommon.log_info_message(msg,self.file_name)
sqlcmd='''
set echo on
set termout on
set time on
spool /tmp/create_app_schema.lst
REM
REM Connect to the Shard Catalog and Create Schema
REM
alter session enable shard ddl;
alter session set container={2};
alter session enable shard ddl;
create user app_schema identified by {3};
grant connect, resource, alter session to app_schema;
grant execute on dbms_crypto to app_schema;
grant create table, create procedure, create tablespace, create materialized view to app_schema;
grant unlimited tablespace to app_schema;
grant select_catalog_role to app_schema;
grant all privileges to app_schema;
grant gsmadmin_role to app_schema;
grant dba to app_schema;
CREATE TABLESPACE SET tbsset1 IN SHARDSPACE shd1;
CREATE TABLESPACE SET tbsset2 IN SHARDSPACE shd2;
connect app_schema/{3}@{0}:{1}/{2}
alter session enable shard ddl;
/* Customer shard table */
CREATE SHARDED TABLE customer
( cust_id NUMBER NOT NULL,
cust_passwd VARCHAR2(20) NOT NULL,
cust_name VARCHAR2(60) NOT NULL,
cust_type VARCHAR2(10) NOT NULL,
cust_email VARCHAR2(100) NOT NULL)
partitionset by list (cust_type)
partition by consistent hash (cust_id) partitions auto
(partitionset individual values ('individual') tablespace set tbsset1,
partitionset business values ('business') tablespace set tbsset2
);
/* Invoice shard table */
CREATE SHARDED TABLE invoice
( invoice_id NUMBER NOT NULL,
cust_id NUMBER NOT NULL,
cust_type VARCHAR2(10) NOT NULL,
vendor_name VARCHAR2(60) NOT NULL,
balance FLOAT(10) NOT NULL,
total FLOAT(10) NOT NULL,
status VARCHAR2(20),
CONSTRAINT InvoicePK PRIMARY KEY (cust_id, invoice_id))
PARENT customer
partitionset by list (cust_type)
partition by consistent hash (cust_id) partitions auto
(partitionset individual values ('individual') tablespace set tbsset1,
partitionset business values ('business') tablespace set tbsset2
);
/* Data */
insert into customer values (999, 'pass', 'Customer 999', 'individual', '<EMAIL>');
insert into customer values (250251, 'pass', 'Customer 250251', 'individual', '<EMAIL>');
insert into customer values (350351, 'pass', '<PASSWORD>', 'individual', '<EMAIL>');
insert into customer values (550551, 'pass', 'Customer 550551', 'business', '<EMAIL>');
insert into customer values (650651, 'pass', 'Customer 650651', 'business', '<EMAIL>');
insert into invoice values (1001, 999, 'individual', 'VendorA', 10000, 20000, 'Due');
insert into invoice values (1002, 999, 'individual', 'VendorB', 10000, 20000, 'Due');
insert into invoice values (1001, 250251, 'individual', 'VendorA', 10000, 20000, 'Due');
insert into invoice values (1002, 250251, 'individual', 'VendorB', 0, 10000, 'Paid');
insert into invoice values (1003, 250251, 'individual', 'VendorC', 14000, 15000, 'Due');
insert into invoice values (1001, 350351, 'individual', 'VendorD', 10000, 20000, 'Due');
insert into invoice values (1002, 350351, 'individual', 'VendorE', 0, 10000, 'Paid');
insert into invoice values (1003, 350351, 'individual', 'VendorF', 14000, 15000, 'Due');
insert into invoice values (1004, 350351, 'individual', 'VendorG', 12000, 15000, 'Due');
insert into invoice values (1001, 550551, 'business', 'VendorH', 10000, 20000, 'Due');
insert into invoice values (1002, 550551, 'business', 'VendorI', 0, 10000, 'Paid');
insert into invoice values (1003, 550551, 'business', 'VendorJ', 14000, 15000, 'Due');
insert into invoice values (1004, 550551, 'business', 'VendorK', 10000, 20000, 'Due');
insert into invoice values (1005, 550551, 'business', 'VendorL', 10000, 20000, 'Due');
insert into invoice values (1006, 550551, 'business', 'VendorM', 0, 10000, 'Paid');
insert into invoice values (1007, 550551, 'business', 'VendorN', 14000, 15000, 'Due');
insert into invoice values (1008, 550551, 'business', 'VendorO', 10000, 20000, 'Due');
insert into invoice values (1001, 650651, 'business', 'VendorT', 10000, 20000, 'Due');
insert into invoice values (1002, 650651, 'business', 'VendorU', 0, 10000, 'Paid');
insert into invoice values (1003, 650651, 'business', 'VendorV', 14000, 15000, 'Due');
insert into invoice values (1004, 650651, 'business', 'VendorW', 10000, 20000, 'Due');
insert into invoice values (1005, 650651, 'business', 'VendorX', 0, 20000, 'Paid');
insert into invoice values (1006, 650651, 'business', 'VendorY', 0, 30000, 'Paid');
insert into invoice values (1007, 650651, 'business', 'VendorZ', 0, 10000, 'Paid');
commit;
select table_name from user_tables;
spool off
'''.format(catalog_host,catalog_port,catalog_pdb,passwd)
output,error,retcode=self.ocommon.run_sqlplus(sqlpluslogin,sqlcmd,None)
self.ocommon.log_info_message("Calling check_sql_err() to validate the sql command return status",self.file_name)
self.ocommon.check_sql_err(output,error,retcode,None)
### Unsetting the encrypt value to None
self.ocommon.unset_mask_str()
gsmhost=self.ora_env_dict["ORACLE_HOSTNAME"]
cadmin=self.ora_env_dict["SHARD_ADMIN_USER"]
cpasswd="<PASSWORD>"
#dtrname,dtrport,dtregion=self.process_director_vars()
self.ocommon.set_mask_str(self.ora_env_dict["ORACLE_PWD"])
gsmcmd='''
connect {1}/{2};
show ddl;
exit;
'''.format("test",cadmin,cpasswd)
output,error,retcode=self.ocommon.exec_gsm_cmd(gsmcmd,None,self.ora_env_dict)
### Unsetting the encrypt value to None
self.ocommon.unset_mask_str()
###################################### Run custom scripts ##################################################
def run_custom_scripts(self):
"""
Custom script to be excuted on every restart of enviornment
"""
self.ocommon.log_info_message("Inside run_custom_scripts()",self.file_name)
if self.ocommon.check_key("CUSTOM_SHARD_SCRIPT_DIR",self.ora_env_dict):
shard_dir=self.ora_env_dict["CUSTOM_SHARD_SCRIPT_DIR"]
if self.ocommon.check_key("CUSTOM_SHARD_SCRIPT_FILE",self.ora_env_dict):
shard_file=self.ora_env_dict["CUSTOM_SHARD_SCRIPT_FILE"]
script_file = '''{0}/{1}'''.format(shard_dir,shard_file)
if os.path.isfile(script_file):
msg='''Custom shard script exist {0}'''.format(script_file)
self.ocommon.log_info_message(msg,self.file_name)
cmd='''sh {0}'''.format(script_file)
output,error,retcode=self.ocommon.execute_cmd(cmd,None,None)
self.ocommon.check_os_err(output,error,retcode,True)
############################### GSM Completion Message #######################################################
def gsm_completion_message(self):
"""
Funtion print completion message
"""
self.ocommon.log_info_message("Inside gsm_completion_message()",self.file_name)
msg=[]
msg.append('==============================================')
msg.append(' GSM Setup Completed ')
msg.append('==============================================')
for text in msg:
self.ocommon.log_info_message(text,self.file_name)
``` |
{
"source": "jpviguerasguillen/deepcaps",
"score": 2
} |
#### File: jpviguerasguillen/deepcaps/capslayers.py
```python
from keras import backend as K
import tensorflow as tf
import numpy as np
from keras import layers, initializers, regularizers, constraints
from keras.utils import conv_utils
from keras.layers import InputSpec
from keras.utils.conv_utils import conv_output_length
cf = K.image_data_format() == '..'
useGPU = True
def squeeze(s):
sq = K.sum(K.square(s), axis=-1, keepdims=True)
return (sq / (1 + sq)) * (s / K.sqrt(sq + K.epsilon()))
class ConvertToCaps(layers.Layer):
def __init__(self, **kwargs):
super(ConvertToCaps, self).__init__(**kwargs)
# self.input_spec = InputSpec(min_ndim=2)
def compute_output_shape(self, input_shape):
output_shape = list(input_shape)
output_shape.insert(1 if cf else len(output_shape), 1)
return tuple(output_shape)
def call(self, inputs):
return K.expand_dims(inputs, 1 if cf else -1)
def get_config(self):
config = {
'input_spec': 5
}
base_config = super(ConvertToCaps, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class FlattenCaps(layers.Layer):
def __init__(self, **kwargs):
super(FlattenCaps, self).__init__(**kwargs)
self.input_spec = InputSpec(min_ndim=4)
def compute_output_shape(self, input_shape):
if not all(input_shape[1:]):
raise ValueError('The shape of the input to "FlattenCaps" '
'is not fully defined '
'(got ' + str(input_shape[1:]) + '. '
'Make sure to pass a complete "input_shape" '
'or "batch_input_shape" argument to the first '
'layer in your model.')
return (input_shape[0], np.prod(input_shape[1:-1]), input_shape[-1])
def call(self, inputs):
shape = K.int_shape(inputs)
return K.reshape(inputs, (-1, np.prod(shape[1:-1]), shape[-1]))
class CapsToScalars(layers.Layer):
def __init__(self, **kwargs):
super(CapsToScalars, self).__init__(**kwargs)
self.input_spec = InputSpec(min_ndim=3)
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[1])
def call(self, inputs):
return K.sqrt(K.sum(K.square(inputs + K.epsilon()), axis=-1))
class Conv2DCaps(layers.Layer):
def __init__(self, ch_j, n_j,
kernel_size=(3, 3),
strides=(1, 1),
r_num=1,
b_alphas=[8, 8, 8],
padding='same',
data_format='channels_last',
dilation_rate=(1, 1),
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
**kwargs):
super(Conv2DCaps, self).__init__(**kwargs)
rank = 2
self.ch_j = ch_j # Number of capsules in layer J
self.n_j = n_j # Number of neurons in a capsule in J
self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.r_num = r_num
self.b_alphas = b_alphas
self.padding = conv_utils.normalize_padding(padding)
self.data_format = K.normalize_data_format(data_format)
self.dilation_rate = (1, 1)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.input_spec = InputSpec(ndim=rank + 3)
def build(self, input_shape):
self.h_i, self.w_i, self.ch_i, self.n_i = input_shape[1:5]
self.h_j, self.w_j = [conv_utils.conv_output_length(input_shape[i + 1],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i]) for i in (0, 1)]
self.ah_j, self.aw_j = [conv_utils.conv_output_length(input_shape[i + 1],
self.kernel_size[i],
padding=self.padding,
stride=1,
dilation=self.dilation_rate[i]) for i in (0, 1)]
self.w_shape = self.kernel_size + (self.ch_i, self.n_i,
self.ch_j, self.n_j)
self.w = self.add_weight(shape=self.w_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.built = True
def call(self, inputs):
if self.r_num == 1:
# if there is no routing (and this is so when r_num is 1 and all c are equal)
# then this is a common convolution
outputs = K.conv2d(K.reshape(inputs, (-1, self.h_i, self.w_i,
self.ch_i * self.n_i)),
K.reshape(self.w, self.kernel_size +
(self.ch_i * self.n_i, self.ch_j * self.n_j)),
data_format='channels_last',
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate)
outputs = squeeze(K.reshape(outputs, ((-1, self.h_j, self.w_j,
self.ch_j, self.n_j))))
return outputs
def compute_output_shape(self, input_shape):
return (input_shape[0], self.h_j, self.w_j, self.ch_j, self.n_j)
def get_config(self):
config = {
'ch_j': self.ch_j,
'n_j': self.n_j,
'kernel_size': self.kernel_size,
'strides': self.strides,
'b_alphas': self.b_alphas,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint)
}
base_config = super(Conv2DCaps, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Mask(layers.Layer):
def call(self, inputs, **kwargs):
if isinstance(inputs, list): # true label is provided with shape = [None, n_classes], i.e. one-hot code.
assert len(inputs) == 2
inputs, mask = inputs
else: # if no true label, mask by the max length of capsules. Mainly used for prediction
# compute lengths of capsules
x = K.sqrt(K.sum(K.square(inputs), -1))
# generate the mask which is a one-hot code.
# mask.shape=[None, n_classes]=[None, num_capsule]
mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])
# inputs.shape=[None, num_capsule, dim_capsule]
# mask.shape=[None, num_capsule]
# masked.shape=[None, num_capsule * dim_capsule]
masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
return masked
def compute_output_shape(self, input_shape):
if isinstance(input_shape[0], tuple): # true label provided
return tuple([None, input_shape[0][1] * input_shape[0][2]])
else: # no true label provided
return tuple([None, input_shape[1] * input_shape[2]])
class Mask_CID(layers.Layer):
def call(self, inputs, **kwargs):
if isinstance(inputs, list): # true label is provided with shape = [None, n_classes], i.e. one-hot code.
assert len(inputs) == 2
inputs, a = inputs
mask = K.argmax(a, 1)
else: # if no true label, mask by the max length of capsules. Mainly used for prediction
# compute lengths of capsules
x = K.sqrt(K.sum(K.square(inputs), -1))
# generate the mask which is a one-hot code.
# mask.shape=[None, n_classes]=[None, num_capsule]
mask = K.argmax(x, 1)
increasing = tf.range(start=0, limit=tf.shape(inputs)[0], delta=1)
m = tf.stack([increasing, tf.cast(mask, tf.int32)], axis=1)
# inputs.shape=[None, num_capsule, dim_capsule]
# mask.shape=[None, num_capsule]
# masked.shape=[None, num_capsule * dim_capsule]
# x1 = tf.transpose(inputs, (0))
masked = tf.gather_nd(inputs, m)
return masked
def compute_output_shape(self, input_shape):
if isinstance(input_shape[0], tuple): # true label provided
return tuple([None, input_shape[0][2]])
else: # no true label provided
return tuple([None, input_shape[2]])
class ConvCapsuleLayer3D(layers.Layer):
def __init__(self, kernel_size, num_capsule, num_atoms, strides=1, padding='valid', routings=3,
kernel_initializer='he_normal', **kwargs):
super(ConvCapsuleLayer3D, self).__init__(**kwargs)
self.kernel_size = kernel_size
self.num_capsule = num_capsule
self.num_atoms = num_atoms
self.strides = strides
self.padding = padding
self.routings = routings
self.kernel_initializer = initializers.get(kernel_initializer)
def build(self, input_shape):
assert len(input_shape) == 5, "The input Tensor should have shape=[None, input_height, input_width," \
" input_num_capsule, input_num_atoms]"
self.input_height = input_shape[1]
self.input_width = input_shape[2]
self.input_num_capsule = input_shape[3]
self.input_num_atoms = input_shape[4]
# Transform matrix
self.W = self.add_weight(shape=[self.input_num_atoms, self.kernel_size, self.kernel_size, 1, self.num_capsule * self.num_atoms],
initializer=self.kernel_initializer,
name='W')
self.b = self.add_weight(shape=[self.num_capsule, self.num_atoms, 1, 1],
initializer=initializers.constant(0.1),
name='b')
self.built = True
def call(self, input_tensor, training=None):
input_transposed = tf.transpose(input_tensor, [0, 3, 4, 1, 2])
input_shape = K.shape(input_transposed)
input_tensor_reshaped = K.reshape(input_tensor, [input_shape[0], 1, self.input_num_capsule * self.input_num_atoms, self.input_height, self.input_width])
input_tensor_reshaped.set_shape((None, 1, self.input_num_capsule * self.input_num_atoms, self.input_height, self.input_width))
# conv = Conv3D(input_tensor_reshaped, self.W, (self.strides, self.strides),
# padding=self.padding, data_format='channels_first')
conv = K.conv3d(input_tensor_reshaped, self.W, strides=(self.input_num_atoms, self.strides, self.strides), padding=self.padding, data_format='channels_first')
votes_shape = K.shape(conv)
_, _, _, conv_height, conv_width = conv.get_shape()
conv = tf.transpose(conv, [0, 2, 1, 3, 4])
votes = K.reshape(conv, [input_shape[0], self.input_num_capsule, self.num_capsule, self.num_atoms, votes_shape[3], votes_shape[4]])
votes.set_shape((None, self.input_num_capsule, self.num_capsule, self.num_atoms, conv_height.value, conv_width.value))
logit_shape = K.stack([input_shape[0], self.input_num_capsule, self.num_capsule, votes_shape[3], votes_shape[4]])
biases_replicated = K.tile(self.b, [1, 1, conv_height.value, conv_width.value])
activations = update_routing(
votes=votes,
biases=biases_replicated,
logit_shape=logit_shape,
num_dims=6,
input_dim=self.input_num_capsule,
output_dim=self.num_capsule,
num_routing=self.routings)
a2 = tf.transpose(activations, [0, 3, 4, 1, 2])
return a2
def compute_output_shape(self, input_shape):
space = input_shape[1:-2]
new_space = []
for i in range(len(space)):
new_dim = conv_output_length(space[i], self.kernel_size, padding=self.padding, stride=self.strides, dilation=1)
new_space.append(new_dim)
return (input_shape[0],) + tuple(new_space) + (self.num_capsule, self.num_atoms)
def get_config(self):
config = {
'kernel_size': self.kernel_size,
'num_capsule': self.num_capsule,
'num_atoms': self.num_atoms,
'strides': self.strides,
'padding': self.padding,
'routings': self.routings,
'kernel_initializer': initializers.serialize(self.kernel_initializer)
}
base_config = super(ConvCapsuleLayer3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def update_routing(votes, biases, logit_shape, num_dims, input_dim, output_dim,
num_routing):
if num_dims == 6:
votes_t_shape = [3, 0, 1, 2, 4, 5]
r_t_shape = [1, 2, 3, 0, 4, 5]
elif num_dims == 4:
votes_t_shape = [3, 0, 1, 2]
r_t_shape = [1, 2, 3, 0]
else:
raise NotImplementedError('Not implemented')
votes_trans = tf.transpose(votes, votes_t_shape)
_, _, _, height, width, caps = votes_trans.get_shape()
def _body(i, logits, activations):
"""Routing while loop."""
# route: [batch, input_dim, output_dim, ...]
a,b,c,d,e = logits.get_shape()
a = logit_shape[0]
b = logit_shape[1]
c = logit_shape[2]
d = logit_shape[3]
e = logit_shape[4]
print(logit_shape)
logit_temp = tf.reshape(logits, [a,b,-1])
route_temp = tf.nn.softmax(logit_temp, dim=-1)
route = tf.reshape(route_temp, [a, b, c, d, e])
preactivate_unrolled = route * votes_trans
preact_trans = tf.transpose(preactivate_unrolled, r_t_shape)
preactivate = tf.reduce_sum(preact_trans, axis=1) + biases
activation = _squash(preactivate)
activations = activations.write(i, activation)
act_3d = K.expand_dims(activation, 1)
tile_shape = np.ones(num_dims, dtype=np.int32).tolist()
tile_shape[1] = input_dim
act_replicated = tf.tile(act_3d, tile_shape)
distances = tf.reduce_sum(votes * act_replicated, axis=3)
logits += distances
return (i + 1, logits, activations)
activations = tf.TensorArray(
dtype=tf.float32, size=num_routing, clear_after_read=False)
logits = tf.fill(logit_shape, 0.0)
i = tf.constant(0, dtype=tf.int32)
_, logits, activations = tf.while_loop(
lambda i, logits, activations: i < num_routing,
_body,
loop_vars=[i, logits, activations],
swap_memory=True)
a = K.cast(activations.read(num_routing - 1), dtype='float32')
return K.cast(activations.read(num_routing - 1), dtype='float32')
class DenseCaps(layers.Layer):
def __init__(self, ch_j, n_j,
r_num=1,
b_alphas=[8, 8, 8],
kernel_initializer='glorot_uniform',
kernel_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
**kwargs):
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super(DenseCaps, self).__init__(**kwargs)
self.ch_j = ch_j # number of capsules in layer J
self.n_j = n_j # number of neurons in a capsule in J
self.r_num = r_num
self.b_alphas = b_alphas
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.input_spec = InputSpec(min_ndim=3)
self.supports_masking = True
def build(self, input_shape):
self.ch_i, self.n_i = input_shape[1:]
self.w_shape = (self.ch_i, self.n_i, self.ch_j, self.n_j)
self.w = self.add_weight(shape=self.w_shape,
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.built = True
def call(self, inputs):
if self.r_num == 1:
outputs = K.dot(K.reshape(inputs, (-1, self.ch_i * self.n_i)),
K.reshape(self.w, (self.ch_i * self.n_i,
self.ch_j * self.n_j)))
outputs = squeeze(K.reshape(outputs, (-1, self.ch_j, self.n_j)))
else:
wr = K.reshape(self.w, (self.ch_i, self.n_i, self.ch_j * self.n_j))
u = tf.transpose(tf.matmul(tf.transpose(inputs, [1, 0, 2]), wr), [1, 0, 2])
u = K.reshape(u, (-1, self.ch_i, self.ch_j, self.n_j))
def rt(ub):
ub = K.reshape(ub, (-1, self.ch_i, self.ch_j, self.n_j))
ub_wo_g = K.stop_gradient(ub)
b = 0.0
for r in range(self.r_num):
if r > 0:
c = K.expand_dims(K.softmax(b * self.b_alphas[r])) * self.ch_j # distribution of weighs of capsules in I across capsules in J
c = K.stop_gradient(c)
else:
c = 1.0
if r == self.r_num - 1:
cub = c * ub
else:
cub = c * ub_wo_g
s = K.sum(cub, axis=-3) # vectors of capsules in J
v = squeeze(s) # squeezed vectors of capsules in J
if r == self.r_num - 1:
break
v = K.stop_gradient(v)
a = tf.einsum('bjk,bijk->bij', v, ub) # a = v dot u
# a = K.matmul(K.reshape(v, (-1, 1, J, 1, n_j)),
# K.reshape(u, (-1, I, J, n_j, 1))).reshape((-1, I, J))
b = b + a # increase those b[i,j] where v[j] dot b[i,j] is larger
return v
u = K.reshape(u, (-1, self.ch_i * self.ch_j * self.n_j))
global useGPU
if useGPU:
outputs = rt(u)
else:
outputs = tf.map_fn(rt, u,
parallel_iterations=100, back_prop=True,
infer_shape=False)
outputs = K.reshape(outputs, (-1, self.ch_j, self.n_j))
return outputs
def compute_output_shape(self, input_shape):
return (input_shape[0], self.ch_j, self.n_j)
def get_config(self):
config = {
'ch_j': self.ch_j,
'n_j': self.n_j,
'r_num': self.r_num,
'b_alphas': self.b_alphas,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
}
base_config = super(DenseCaps, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class CapsuleLayer(layers.Layer):
def __init__(self, num_capsule, dim_capsule, channels, routings=3,
kernel_initializer='glorot_uniform',
**kwargs):
super(CapsuleLayer, self).__init__(**kwargs)
self.num_capsule = num_capsule
self.dim_capsule = dim_capsule
self.routings = routings
self.channels = channels
self.kernel_initializer = initializers.get(kernel_initializer)
def build(self, input_shape):
assert len(input_shape) >= 3, "The input Tensor should have shape=[None, input_num_capsule, input_dim_capsule]"
self.input_num_capsule = input_shape[1]
self.input_dim_capsule = input_shape[2]
if(self.channels != 0):
assert int(self.input_num_capsule / self.channels) / (self.input_num_capsule / self.channels) == 1, "error"
self.W = self.add_weight(shape=[self.num_capsule, self.channels,
self.dim_capsule, self.input_dim_capsule],
initializer=self.kernel_initializer,
name='W')
self.B = self.add_weight(shape=[self.num_capsule, self.dim_capsule],
initializer=self.kernel_initializer,
name='B')
else:
self.W = self.add_weight(shape=[self.num_capsule, self.input_num_capsule,
self.dim_capsule, self.input_dim_capsule],
initializer=self.kernel_initializer,
name='W')
self.B = self.add_weight(shape=[self.num_capsule, self.dim_capsule],
initializer=self.kernel_initializer,
name='B')
self.built = True
def call(self, inputs, training=None):
# inputs.shape=[None, input_num_capsule, input_dim_capsule]
# inputs_expand.shape=[None, 1, input_num_capsule, input_dim_capsule]
inputs_expand = K.expand_dims(inputs, 1)
# Replicate num_capsule dimension to prepare being multiplied by W
# inputs_tiled.shape=[None, num_capsule, input_num_capsule, input_dim_capsule]
inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1])
if(self.channels != 0):
W2 = K.repeat_elements(self.W, int(self.input_num_capsule / self.channels), 1)
else:
W2 = self.W
# Compute `inputs * W` by scanning inputs_tiled on dimension 0.
# x.shape=[num_capsule, input_num_capsule, input_dim_capsule]
# W.shape=[num_capsule, input_num_capsule, dim_capsule, input_dim_capsule]
# Regard the first two dimensions as `batch` dimension,
# then matmul: [input_dim_capsule] x [dim_capsule, input_dim_capsule]^T -> [dim_capsule].
# inputs_hat.shape = [None, num_capsule, input_num_capsule, dim_capsule]
inputs_hat = K.map_fn(lambda x: K.batch_dot(x, W2, [2, 3]), elems=inputs_tiled)
# Begin: Routing algorithm ---------------------------------------------------------------------#
# The prior for coupling coefficient, initialized as zeros.
# b.shape = [None, self.num_capsule, self.input_num_capsule].
b = tf.zeros(shape=[K.shape(inputs_hat)[0], self.num_capsule, self.input_num_capsule])
assert self.routings > 0, 'The routings should be > 0.'
for i in range(self.routings):
# c.shape=[batch_size, num_capsule, input_num_capsule]
c = tf.nn.softmax(b, dim=1)
# c.shape = [batch_size, num_capsule, input_num_capsule]
# inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
# The first two dimensions as `batch` dimension,
# then matmal: [input_num_capsule] x [input_num_capsule, dim_capsule] -> [dim_capsule].
# outputs.shape=[None, num_capsule, dim_capsule]
outputs = squash(K.batch_dot(c, inputs_hat, [2, 2]) + self.B) # [None, 10, 16]
if i < self.routings - 1:
# outputs.shape = [None, num_capsule, dim_capsule]
# inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
# The first two dimensions as `batch` dimension,
# then matmal: [dim_capsule] x [input_num_capsule, dim_capsule]^T -> [input_num_capsule].
# b.shape=[batch_size, num_capsule, input_num_capsule]
b += K.batch_dot(outputs, inputs_hat, [2, 3])
# End: Routing algorithm -----------------------------------------------------------------------#
return outputs
def compute_output_shape(self, input_shape):
return tuple([None, self.num_capsule, self.dim_capsule])
def _squash(input_tensor):
norm = tf.norm(input_tensor, axis=-1, keep_dims=True)
norm_squared = norm * norm
return (input_tensor / norm) * (norm_squared / (1 + norm_squared))
def squash(vectors, axis=-1):
s_squared_norm = K.sum(K.square(vectors), axis, keepdims=True)
scale = s_squared_norm / (1 + s_squared_norm) / K.sqrt(s_squared_norm)
return scale * vectors
``` |
{
"source": "jpvillazon/raspberry-pi",
"score": 3
} |
#### File: firebase/python/stream_firebase_realtime_db.py
```python
import pyrebase
config = {
"apiKey": "",
"authDomain": "",
"databaseURL": "",
"projectId": "",
"storageBucket": "",
"messagingSenderId": "",
"appId": ""
}
firebase = pyrebase.initialize_app(config)
db = firebase.database()
def stream_handler(message):
if message["data"]== True and message["path"] == "/quit":
print "Quitting"
quit()
print(message["event"])
print(message["path"])
print(message["data"])
my_stream = db.child("database").stream(stream_handler)
#my_stream.close()
``` |
{
"source": "jpvlsmv/didactic-spork",
"score": 3
} |
#### File: didactic-spork/misc/dedup2.py
```python
from pathlib import Path
from collections import namedtuple
def _readable_dir(p):
return Path(p).is_dir()
def dirwalker(p):
for entry in p.iter_dir():
if entry.is_dir():
yield from dirwalker(entry)
else:
yield entry
def main(args):
for candidate in dirwalker(p):
if args.dev:
print(candidate)
return
if haveseen(candidate):
fh = fhash(candidate)
for seen in sizematches:
if fh == seen.hash:
if len(seen.name) < len(candidate):
pass
storehash(candidate,fh)
else:
marksize(candidate)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--verbose','-v',action='count')
parser.add_argument('--noopt', '-n', action='store_true')
parser.add_argument('--dev', '-d', action='store_true')
parser.add_argument('paths', nargs='*', type=_readable_dir)
args = parser.parse_args()
if args.paths:
args.apaths = map(Path, args.paths)
else:
args.apaths = [ Path().resolve() ]
if args.dev:
print(args)
else:
main(args)
# vim: tabstop=8 expandtab shiftwidth=2 softtabstop=2
``` |
{
"source": "jpvlsmv/micropython-gy521",
"score": 2
} |
#### File: micropython-gy521/devscripts/fetch_register_constants.py
```python
DS_url='https://www.invensense.com/wp-content/uploads/2015/02/MPU-6000-Register-Map1.pdf'
import logging
logging.basicConfig( level = logging.ERROR )
log = logging.getLogger(__name__)
loglvls = [ logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG ]
def main(args):
log.debug("Debug logging set")
log.info("Info logging set")
log.warning("Warn logging set")
log.error("Errors logging set (default)")
import requests
r = requests.get(args.url)
log.info(f'HTTP response status {r.status_code}')
if __name__ == "__main__":
import argparse
cli = argparse.ArgumentParser(
description = 'Extract tables from Invensys datasheet',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
cli.add_argument('-v', '--verbose', dest="ll", help='increase verbose',
action='count', default=0)
cli.add_argument('-d', '--debug', dest="ll", help='debug',
action='store_const', const=3)
cli.add_argument('-u', '--url', dest="url", help='Datasheet URL',
action='store', default=DS_url)
args = cli.parse_args()
print(f'args: {args}')
log.setLevel( loglvls[min(args.ll,len(loglvls)-1)] )
main(args)
```
#### File: micropython-gy521/micropython_gy521/gy521.py
```python
from struct import pack
"""Main module."""
# SDA=Pin(5)
# SCL=Pin(16)
# Int = None
DEFAULT_ADDR = 0x68
REG_WHO_AM_I = 0x75
class gy521:
def __init__(self):
from machine import Pin, I2C
self.bus = I2C(scl=Pin(16), sda=Pin(5), freq=400000)
self.addr = DEFAULT_ADDR
# def init(self, SCL=None, SDA=None, INT=None, addr=0x68):
# from machine import Pin,I2C
# (self.SCL, self.SDA, self.INT, self.addr) = (SCL, SDA, INT, addr)
# self.bus = I2C(scl=Pin(SCL), sda=Pin(SDA), freq=400000)
def ping(self):
iam = self.bus.readfrom_mem( self.addr, REG_WHO_AM_I, 1 )
return (iam == pack('B', 0x68))
#
# def deinit(self):
# self.bus.deinit()
``` |
{
"source": "jpVm5jYYRE1VIKL/djangocms-bootstrap4",
"score": 2
} |
#### File: tests/bootstrap4_tabs/test_models.py
```python
from django.test import TestCase
from djangocms_bootstrap4.contrib.bootstrap4_tabs.models import (
Bootstrap4Tab, Bootstrap4TabItem,
)
class B4TabsModelTestCase(TestCase):
def test_tab_instance(self):
instance = Bootstrap4Tab.objects.create()
self.assertEqual(str(instance), "1")
self.assertEqual(instance.get_short_description(), "(nav-tabs)")
instance.tab_alignment = "nav-fill"
self.assertEqual(instance.get_short_description(), "(nav-tabs) .nav-fill")
def test_tab_item_instance(self):
instance = Bootstrap4TabItem.objects.create()
self.assertEqual(str(instance), "1")
self.assertEqual(instance.get_short_description(), "")
```
#### File: tests/bootstrap4_utilities/test_plugins.py
```python
from cms.api import add_plugin
from cms.test_utils.testcases import CMSTestCase
from djangocms_bootstrap4.contrib.bootstrap4_utilities.cms_plugins import (
Bootstrap4SpacingPlugin,
)
from ..fixtures import B4TestFixture
class B4UtilitiesPluginTestCase(B4TestFixture, CMSTestCase):
def test_plugin(self):
plugin = add_plugin(
placeholder=self.placeholder,
plugin_type=Bootstrap4SpacingPlugin.__name__,
language=self.language,
)
plugin.full_clean()
self.page.publish(self.language)
with self.login_user_context(self.superuser):
response = self.client.get(self.request_url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<div class="m-0">')
```
#### File: djangocms-bootstrap4/tests/test_fields.py
```python
from django.test import TestCase
from djangocms_bootstrap4.fields import (
AttributesField, IntegerRangeField, TagTypeField,
)
class B4FieldsTestCase(TestCase):
def test_attributes_field(self):
field = AttributesField()
self.assertEqual(field.verbose_name, "Attributes")
self.assertEqual(field.blank, True)
def test_tag_type_field(self):
field = TagTypeField()
self.assertEqual(field.verbose_name, "Tag type")
self.assertEqual(field.choices, (
('div', 'div'),
('section', 'section'),
('article', 'article'),
('header', 'header'),
('footer', 'footer'),
('aside', 'aside')
))
self.assertEqual(field.default, "div")
self.assertEqual(field.max_length, 255)
self.assertEqual(
field.help_text,
"Select the HTML tag to be used.",
)
def test_integer_range_field(self):
field = IntegerRangeField()
self.assertEqual(field.min_value, None)
self.assertEqual(field.max_value, None)
field.min_value = 255
field.max_value = 255
field = field.formfield()
self.assertEqual(field.min_value, 255)
self.assertEqual(field.max_value, 255)
``` |
{
"source": "jpvolt/CameraCalibration",
"score": 3
} |
#### File: camcalibpython/camcalib/camcalib.py
```python
import cv2
class Calibrate:
def __init__(self, path, w, h):
self.config = cv2.FileStorage(path, cv2.FileStorage_READ)
self.CamMat = self.config.getNode("cameraMatrix").mat()
self.distCoeffs = self.config.getNode("distCoeffs").mat()
print(self.distCoeffs.shape)
self.newCamMat, _ = cv2.getOptimalNewCameraMatrix(self.CamMat, self.distCoeffs,(w,h),1,(w,h))
def fix(self, frame):
return cv2.undistort(frame, self.CamMat, self.distCoeffs, self.newCamMat)
``` |
{
"source": "jpvt/Digital_Image_Processing",
"score": 3
} |
#### File: app/opendip/dct.py
```python
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
class DCT:
def __init__(self):
self.input_dct2d = None
self.output_dct2d = None
self.input_idct2d = None
self.output_idct2d = None
def get_2d_dct(self, image, n_coef = 0):
self.input_dct2d = image
output = np.zeros(image.shape)
R = output.shape[0]
C = output.shape[1]
for k in range(R):
ck = np.sqrt(0.5) if k == 0 else 1
for l in range(C):
cl = np.sqrt(0.5) if l == 0 else 1
for m in range(R):
for n in range(C):
output[k][l] += image[m][n] * np.cos(((2*m + 1) * k*np.pi)/(2*R)) * np.cos(((2*n + 1) * l*np.pi)/(2*C))
output[k][l] *= ck * cl
output *= 2.0/np.sqrt(R*C)
if n_coef > 0:
output.sort()
for i in range(n_coef, n):
output[i] = 0
self.output_dct2d = output
return output
def get_2d_dct_sep(self, image, n_coef = 0):
self.input_dct2d = image
output = np.zeros(image.shape)
for row in range(image.shape[0]):
output[row, :] = self.get_1d_dct(image[row, :], n_coef)
for column in range(image.shape[1]):
output[:, column] = self.get_1d_dct(output[:, column], n_coef)
self.output_dct2d = output
return output
def get_1d_dct(self, image, n_coef = 0):
output = np.zeros(image.shape)
n = len(image)
for k in range(n):
ck = np.sqrt(0.5) if k == 0 else 1
for i in range(n):
output[k] += image[i] * np.cos(2 * np.pi * k / (2.0 * n) * i + (k * np.pi) / (2.0 * n))
output[k] *= ck
output *= np.sqrt(2.0/n)
if type(n_coef) == float or type(n_coef) == np.float32:
n_coef = int(n_coef * len(output))
if n_coef > 0:
output.sort()
for i in range(n_coef, n):
output[i] = 0
return output
def get_inv_2d_dct(self, dct = None):
if type(dct) == type(None):
dct = self.output_dct2d
self.input_idct2d = dct
output = np.zeros(dct.shape)
R = output.shape[0]
C = output.shape[1]
for m in range(R):
for n in range(C):
for k in range(R):
ck = np.sqrt(0.5) if k == 0 else 1
for l in range(C):
cl = np.sqrt(0.5) if l == 0 else 1
output[m][n] += ck * cl * dct[k][l] * np.cos(((2*m + 1) * k*np.pi)/(2*R)) * np.cos(((2*n + 1) * l*np.pi)/(2*C))
output *= (2 / np.sqrt(R*C))
self.output_idct2d = np.round(output)
self.output_idct2d[self.output_idct2d < 0] = 0
self.output_idct2d[self.output_idct2d > 255] = 255
return output
def get_inv_2d_dct_sep(self, dct = None):
if type(dct) == type(None):
dct = self.output_dct2d
self.input_idct2d = dct
output = np.zeros(dct.shape)
for row in range(dct.shape[0]):
output[row, :] = self.get_inv_1d_dct(dct[row, :])
for column in range(dct.shape[1]):
output[:, column] = self.get_inv_1d_dct(output[:, column])
self.output_idct2d = np.round(output)
self.output_idct2d[self.output_idct2d < 0] = 0
self.output_idct2d[self.output_idct2d > 255] = 255
return output
def get_inv_1d_dct(self, dct):
output = np.zeros(dct.shape)
n = len(dct)
for i in range(n):
for k in range(n):
ck = np.sqrt(0.5) if k == 0 else 1
output[i] += ck * dct[k] * np.cos(2 * np.pi * k / (2.0 * n) * i + (k * np.pi) / (2.0 * n))
output[i] *= np.sqrt(2.0 / n)
return output
def show_process(self):
return self.input_dct2d, self.output_dct2d, self.output_idct2d
``` |
{
"source": "jpw1991/perlin-noise-2d-terrain-generation",
"score": 3
} |
#### File: perlin-noise-2d-terrain-generation/perlin-noise-2d-terrain-generation/noisemap.py
```python
import json
import enum
from PIL import Image, ImageFont, ImageDraw
from noise import pnoise2, snoise2
from noisemaptile import NoiseMapTile
from noiserange import NoiseRange
class NoiseMapBiome(enum.Enum):
OCEAN = 1
SHALLOWS = 2
BEACH = 3
SCORCHED = 4
BARE = 5
TUNDRA = 6
TEMPERATE_DESERT = 7
SHRUBLAND = 8
GRASSLAND = 9
TEMPERATE_DECIDUOUS_FOREST = 10
TEMPERATE_RAIN_FOREST = 11
SUBTROPICAL_DESERT = 12
TROPICAL_SEASONAL_FOREST = 13
TROPICAL_RAIN_FOREST = 14
SNOW = 15
TAIGA = 16
SWAMP = 17
class NoiseMap:
"""
A map of NoiseMapTiles.
"""
def __init__(self, width, height, noise_ranges=[], tiles=[], moisture_map=None):
"""
ranges = the number ranges for the different kinds of tiles
eg. water 0.1, sand 0.25, etc as a dictionary
"""
self.width = width
self.height = height
self.noise_ranges = noise_ranges
self.tiles = tiles
self.moisture_map = moisture_map
self.algorithm = None
self.scale = None
self.octaves = None
self.image = None
# create a dictionary from the noise ranges list for quick lookups later
self.noise_range_dict = {}
for noise_range in noise_ranges:
self.noise_range_dict[noise_range.name] = noise_range
def generate(self, algorithm, scale, octaves, persistence=0.5, lacunarity=2.0, sink_edges=False):
"""
Generates the noise map.
:param algorithm: use simplex or perlin algorithms
:param scale: it's the scale of the map. Higher = zoomed in, lower = zoomed out.
:param octaves: the level of detail. Lower = more peaks and valleys, higher = less peaks and valleys.
:param persistence: how much an octave contributes to overall shape (adjusts amplitude).
:param lacunarity: the level of detail on each octave (adjusts frequency).
:param sink_edges: Sinks the edges and corners of the map into the ocean to create islands.
:return: None
"""
self.algorithm = algorithm
self.scale = scale
self.octaves = octaves
self.tiles = []
for y in range(self.height):
row = []
for x in range(self.width):
noise_value = None
if algorithm == 'perlin':
noise_value = pnoise2(x=x/scale, y=y/scale, octaves=octaves,
persistence=persistence, lacunarity=lacunarity)
elif algorithm == 'simplex':
noise_value = snoise2(x=x/scale, y=y/scale, octaves=octaves,
persistence=persistence, lacunarity=lacunarity)
row += [NoiseMapTile(x, y, noise_value)]
self.tiles += row
# If sink edges is true, we need to sink the corners & sides of the map into the ocean
#
# 1. Generate a box that fits inside the map with a small degree of margin.
# 2. Generate a circle in its center
# 3. Use this circle to cull all tiles that fall outside of it.
# _ _ _ _ _ _
# |ooooKKKoooo|
# |ooKKKKKKKoo|
# |oKKKKKKKKKo|
# |ooKKKKKKKoo|
# |ooooKKKoooo|
#
# Something like above, where K is keep and O is ocean. Ok, awful ASCII art. I admit.
#
# http://mathcentral.uregina.ca/QQ/database/QQ.09.06/s/lori1.html
# 1. Find the center tile
# C=PI*d
# circumference = 3.14 * diameter
def biome(self, elevation, moisture):
""" Determine the biome from the elevation & moisture of the tile """
""" Water/Shore"""
if elevation <= self.noise_range_dict['water'].threshold:
return NoiseMapBiome.OCEAN
if elevation <= self.noise_range_dict['sand'].threshold and moisture >= 0.2:
return NoiseMapBiome.SWAMP
if elevation <= self.noise_range_dict['shallowwater'].threshold:
return NoiseMapBiome.SHALLOWS
if elevation <= self.noise_range_dict['sand'].threshold:
return NoiseMapBiome.BEACH
""" High mountain """
if elevation > self.noise_range_dict['hugemountain'].threshold:
if moisture < 0.1:
return NoiseMapBiome.SCORCHED
elif moisture < 0.2:
return NoiseMapBiome.BARE
elif moisture < 0.5:
return NoiseMapBiome.TUNDRA
return NoiseMapBiome.SNOW
""" Mountain """
if elevation > self.noise_range_dict['mountain'].threshold:
if moisture < 0.33:
return NoiseMapBiome.TEMPERATE_DESERT
elif moisture < 0.66:
return NoiseMapBiome.SHRUBLAND
return NoiseMapBiome.TAIGA
""" Land """
if moisture < 0.16:
return NoiseMapBiome.SUBTROPICAL_DESERT
if moisture < 0.33:
return NoiseMapBiome.GRASSLAND
if moisture < 0.66:
return NoiseMapBiome.TROPICAL_SEASONAL_FOREST
return NoiseMapBiome.TROPICAL_RAIN_FOREST
def __iter__(self):
""" Yields a dictionary when dict() is called for serializing to JSON """
yield 'width', self.width
yield 'height', self.height
yield 'algorithm', self.algorithm
yield 'scale', self.scale
yield 'octaves', self.octaves
yield 'noise_ranges', [dict(noise_range) for noise_range in self.noise_ranges]
yield 'tiles', [dict(tile) for tile in self.tiles]
if self.moisture_map is not None:
yield 'moisture_map', dict(self.moisture_map)
def display_as_image(self, tile_size):
"""
Display the map as an image.
:param tile_size: The size of each tile.
:return: None
"""
def chunks(target_list, chunk_size):
"""
Break a big list into smaller lists.
"""
for i in range(0, len(target_list), chunk_size):
yield target_list[i:i + chunk_size]
def get_biome_color(value):
if value == NoiseMapBiome.OCEAN:
return (54, 62, 150) # dark blue
elif value == NoiseMapBiome.SHALLOWS:
return (88, 205, 237) # cyan
elif value == NoiseMapBiome.BEACH:
return (247, 247, 119) # yellow
elif value == NoiseMapBiome.SCORCHED:
return (247, 149, 119) # peach
elif value == NoiseMapBiome.BARE:
return (168, 166, 165) # grey
elif value == NoiseMapBiome.TUNDRA:
return (132, 173, 158) # grey green
elif value == NoiseMapBiome.TEMPERATE_DESERT:
return (227, 155, 0) # orange
elif value == NoiseMapBiome.SHRUBLAND:
return (62, 110, 58) # olive
elif value == NoiseMapBiome.GRASSLAND:
return (55, 181, 43) # green
elif value == NoiseMapBiome.TEMPERATE_DECIDUOUS_FOREST:
return (62, 138, 55) # darker green
elif value == NoiseMapBiome.TEMPERATE_RAIN_FOREST:
return (161, 38, 255) # violet
elif value == NoiseMapBiome.SUBTROPICAL_DESERT:
return (255, 214, 153) # fleuro yellow
elif value == NoiseMapBiome.TROPICAL_SEASONAL_FOREST:
return (102, 153, 0) # some kind of green
elif value == NoiseMapBiome.TROPICAL_RAIN_FOREST:
return (255, 0, 119) # rose
elif value == NoiseMapBiome.SNOW:
return (255, 255, 255) # white
elif value == NoiseMapBiome.TAIGA:
return (62, 87, 71) # dark olive
elif value == NoiseMapBiome.SWAMP:
return (92, 112, 104) # grey green
else:
return (0, 0, 0) # black
# add some extra height to the image for the legend
legend_height = 200
legend_width = 1500
image_width = self.width*tile_size
if image_width < legend_width:
image_width = legend_width
self.image = Image.new('RGBA', size=(image_width, (self.height*tile_size)+legend_height), color=(0, 0, 0))
d = ImageDraw.Draw(self.image)
for tile_index in range(len(self.tiles)):
tile = self.tiles[tile_index]
moisture_tile = self.moisture_map.tiles[tile_index]
biome_color = get_biome_color(self.biome(tile.noise_value, moisture_tile.noise_value))
d.rectangle([tile.x*tile_size, tile.y*tile_size, tile.x*tile_size+tile_size, tile.y*tile_size+tile_size], fill=biome_color)
# print the map legend so we know what we're looking at
font_size = 14
font = ImageFont.truetype('resources/fonts/JoshuaFont3.pfb', 14)
keys = [str(key)[14:] for key in NoiseMapBiome]
key_rows = chunks(keys, 5)
text_x = 10
text_y = (self.height*tile_size) + 10
for key_row in key_rows:
for key in key_row:
# draw color key block
d.rectangle([text_x, text_y, text_x + font_size, text_y + font_size], fill=get_biome_color(getattr(NoiseMapBiome, key)))
# offset it by 2 char widths due to the color key and equals sign etc
d.text((text_x+(font_size*2), text_y), ' = ' + key, font=font, fill=(255, 255, 255,))
text_x += font_size * 20
text_y += 50
text_x = 10
self.image.show()
def save(self, file_name):
""" Save the map as JSON to a file. """
with open(file_name, 'w', encoding='utf8') as file:
json.dump(dict(self), file, indent=4)
file.close()
def save_image(self, file_name):
""" Save the map image file. """
if self.image is not None:
self.image.save(file_name)
@classmethod
def load(cls, data) -> 'NoiseMap':
if data is not None:
# parse map info
width = data['width']
height = data['height']
# parse tiles
tiles = [NoiseMapTile(tile['x'], tile['y'], tile['noise_value']) for tile in data['tiles']]
# parse noise ranges
noise_ranges = [
NoiseRange(noise_range['name'], noise_range['threshold'])
for noise_range in data['noise_ranges']]
# parse moisture map
moisture_map = None
if 'moisture_map' in data:
moisture_map = NoiseMap.load(data['moisture_map'])
return cls(width, height, noise_ranges, tiles, moisture_map)
```
#### File: perlin-noise-2d-terrain-generation/perlin-noise-2d-terrain-generation/noiserange.py
```python
class NoiseRange:
"""
Defines where a range begins and ends.
Also contains a helpful name tag eg. water, mountain, etc.
"""
def __init__(self, name, threshold):
self.name = name
self.threshold = threshold
def __iter__(self):
""" Yields a dictionary when dict() is called for serializing to JSON """
yield 'name', self.name
yield 'threshold', self.threshold
``` |
{
"source": "jpw447/N-body-gravity",
"score": 3
} |
#### File: project/Investigation/fourier_transforms.py
```python
import numpy as np
def fourier_func(signal_amplitude:np.ndarray, t_max:float):
'''Calculates the power and equivalent frequency for the first half of the k-modes for the supplied signal amplitude.
Args:
signal_amplitude (np.ndarray): The supplied signal amplitudes, uniformly sampled from t = 0 to t = t_max
t_max (float): The maximum time
Returns:
(np.ndarray, np.ndarray): A tuple of numpy arrays, the first being the power values and the second being the freqencies corresponding to the power.
'''
global FT, frequencies # Necessary for the inverse Fourier Transform
FT = np.fft.fft(signal_amplitude)
N = len(signal_amplitude)
half = int(N/2)
FT = FT[:half] # Second half of array is irrelevant
step = 1/t_max
nyq = half*step # Nyquist frequency
frequencies = np.arange(0, nyq, step)
FT_conj = FT.conj()
power = ((FT*FT_conj)/N).real # Power spectrum from FT×FT*
return power, frequencies
```
#### File: project/Investigation/TRAPPIST - Jupiter Study.py
```python
import numpy as np
import matplotlib.pyplot as plt
from N_body_simulator import N_body_solver
from energy_calculator import system_energy
'''
Investigating the TRAPPIST system by adding a Jupiter-sized interloper between TRAPPIST-1e and 1f, and varying its mass.
TRAPPIST-1e's eccentricity is measured and trajectories plotted in time to illustrate escape trajectories.
Used to produce figure 9 in the report.
All of the planets initially have very low eccentricity (e<0.02) so can be treated as simple Keplerian orbits.
Initial condition data retrieved from: https://exoplanetarchive.ipac.caltech.edu/overview/TRAPPIST-1
'''
# Constants and establishing initial conditions for each body
G = 6.67e-11
M_sol = 1.989e30
M_earth = 5.972e24
AU = 1.496e11
days_to_seconds = (60**2) * 24
M_star = 0.898*M_sol
ic_star = [0,0,0, 0,0,0]
M_b = 1.374*M_earth
period_b = 1.510826*days_to_seconds
SMA_b = ((period_b**2 * G* M_star)/(4*np.pi**2))**(1/3)
v_b = np.sqrt(G*M_star/SMA_b)
ic_b = [SMA_b,0,0, 0,v_b,0]
M_c = 1.308*M_earth
period_c = 2.421937*days_to_seconds
SMA_c = ((period_c**2 * G* M_star)/(4*np.pi**2))**(1/3)
v_c = np.sqrt(G*M_star/SMA_c)
ic_c = [SMA_c,0,0, 0,v_c,0]
M_d = 0.388*M_earth
period_d = 4.049219*days_to_seconds
SMA_d = ((period_d**2 * G* M_star)/(4*np.pi**2))**(1/3)
v_d = np.sqrt(G*M_star/SMA_d)
ic_d = [SMA_d,0,0, 0,v_d,0]
M_e = 0.692*M_earth
period_e = 6.101013*days_to_seconds
SMA_e = ((period_e**2 * G* M_star)/(4*np.pi**2))**(1/3)
v_e = np.sqrt(G*M_star/SMA_e)
ic_e = [SMA_e,0,0, 0,v_e,0]
M_f = 1.039*M_earth
period_f = 9.20754*days_to_seconds
SMA_f = ((period_f**2 * G* M_star)/(4*np.pi**2))**(1/3)
v_f = np.sqrt(G*M_star/SMA_f)
ic_f = [SMA_f,0,0, 0,v_f,0]
M_g = 1.321*M_earth
period_g = 12.352446*days_to_seconds
SMA_g = ((period_g**2 * G* M_star)/(4*np.pi**2))**(1/3)
v_g = np.sqrt(G*M_star/SMA_g)
ic_g = [SMA_g,0,0, 0,v_g,0]
M_h = 0.326*M_earth
period_h = 18.772866*days_to_seconds
SMA_h = ((period_h**2 * G* M_star)/(4*np.pi**2))**(1/3)
v_h = np.sqrt(G*M_star/SMA_h)
ic_h = [SMA_h,0,0, 0,v_h,0]
# Adding a Jupiter-sized planet between e and f, but on the other side of the star and travelling in -y direction
M_jupiter = 317.83*M_earth
SMA_jupiter = -np.mean([SMA_e,SMA_f])
v_jupiter = -np.sqrt(G*M_star/abs(SMA_jupiter))
ic_jupiter = [SMA_jupiter,0,0, 0,v_jupiter,0]
# Initial conditions, masses and names for the undisturbed TRAPPIST system
ic = ic_star + ic_b + ic_c + ic_d + ic_e + ic_f + ic_g + ic_h
masses = [M_star, M_b, M_c, M_d, M_e, M_f, M_g, M_h]
names = ["TRAPPIST-1", "TRAPPIST-1b", "TRAPPIST-1c", "TRAPPIST-1d", "TRAPPIST-1e", "TRAPPIST-1f", "TRAPPIST-1g", "TRAPPIST-1h"]
# Creating time array and calling the function
N = int(len(masses))
T = 4*period_g
number_of_points = 50000
t_array = np.linspace(0, T, number_of_points) # In seconds#
# Creating plots
# 2d system plot
fig_trappist_1, ax_trappist_1 = plt.subplots(1, 2, figsize=(12,8))
plt.subplots_adjust(wspace=0.3, hspace=0.5, top=0.85)
for axis in ax_trappist_1:
axis.set_xlabel("$x$ (AU)", fontsize=16)
axis.set_ylabel("$y$ (AU)", fontsize=16)
axis.set_aspect("equal")
axis.set_xlim(-0.18, 0.18)
axis.set_ylim(-0.18, 0.18)
# Plots of separation between TRAPPIST-1 and planets
fig_separation = plt.figure(figsize=(8,6))
ax_separation = fig_separation.gca()
ax_separation.set_xlabel("$t$ (days)", fontsize=16)
ax_separation.set_ylabel("$r$ (AU)", fontsize=16)
# Phase space plots
fig_poincare, ax_poincare = plt.subplots(1, 2, figsize=(12,6))
plt.subplots_adjust(wspace=0.3, hspace=0.5, top=0.85)
for axis in ax_poincare:
axis.set_xlabel("$x$ (AU)", fontsize=16)
axis.set_ylabel("$v_{x}$ (ms$^{-1}$)", fontsize=16)
axis.set_xlim(-0.18, 0.18)
axis.set_ylim(-140000, 140000)
plt.tight_layout()
# Energy plots. Not used in report
fig_energy = plt.figure(figsize=(8,6))
ax_energy = fig_energy.gca()
ax_energy.set_xlabel("Mass of Disturbing Body ($M_{Jupiter}$)", fontsize=16)
ax_energy.set_ylabel("Energy (J)", fontsize=16)
# Eccentricities
fig_eccentricity = plt.figure(figsize=(8,6))
ax_eccentricity = fig_eccentricity.gca()
ax_eccentricity.set_xlabel("Mass of Disturbing Body ($M_{Jupiter}$)", fontsize=16)
ax_eccentricity.set_ylabel("Eccentricity", fontsize=16)
# Converting to days for the plots
t_days = t_array/days_to_seconds
def system_solver(N, ic, masses, t_array, axis_system, axis_separation, axis_poincare, axis_eccentricity, disturbed=False):
'''
Simple function that calls N_body_solver to solve the system, and proceeds to systematically plot each relevant body on the relevant axis.
This is used to prevent repetition later in the code. Although a very long function, is is written this way so that the logic of plotting is easier to follow.
This function is also not general, but built specifically to the investigation being conducted in the report.
Args:
N: integer number of bodies.
ic: initial conditions array for the system. Passed to N_body_solver, so must be in the correct format.
masses: array containing masses in order of their initial conditions given to N_body_solver.
t_array: array of time steps in seconds.
axis_system: pyplot axis on which to plot the y versus x positions of each body.
axis_separation: pyplot axis on which separation between a body and the star is plotted against t_array (time).
axis_poinare: pyplot axis on which the phase space plot of vx against x is plotted.
axis_eccentricity: pyplot axis on which the eccentricity against interloper masse is calculated. Only used if disturbed=True
disturbed: Boolean default set to False. Set to true when an interloper is added to the system.
Returns:
Nothing.
'''
solution, initial_energy = N_body_solver(N, ic, masses, t_array)
# Looping through solution to plot all bodies
x_star = solution[:,0]/AU
y_star = solution[:,1]/AU
z_star = solution[:,2]/AU
for i in range(N):
name = names[i]
x_start = 6*i
y_start = x_start+1
z_start = x_start+2
x = solution[:,x_start]/AU - x_star
y = solution[:,y_start]/AU - y_star
z = solution[:,z_start]/AU - z_star
# When system is not disturbed by Jupiter
if disturbed == False:
# Special Jupiter plot
if name == "Jupiter":
axis_system.plot(x, y, color="cyan", linestyle="dashed", label=name)
axis_system.plot(x[0], y[0], color="cyan", marker="o")
# Phase space and separation plots for 1f and 1g
elif (name == "TRAPPIST-1f") or (name == "TRAPPIST-1e"):
# Velocities needed for phase space for only these bodies
vx_start = x_start+3
vx = solution[:,vx_start]
separation = np.sqrt( x**2 + y**2 + z**2 )
if name == "TRAPPIST-1f":
axis_system.plot(x, y, 'black', label=name)
axis_system.plot(x[0], y[0], 'ko')
axis_separation.plot(t_days, separation, 'k--', label=name)
axis_poincare.plot(x, vx, 'black', label=name)
axis_poincare.plot(x[0], vx[0], 'ko')
else:
axis_system.plot(x, y, 'red', label=name)
axis_system.plot(x[0], y[0], 'ro')
axis_separation.plot(t_days, separation, 'r--', label=name)
axis_poincare.plot(x, vx, 'red', label=name)
axis_poincare.plot(x[0], vx[0], 'ro')
# Plots the planet normally if not Jupiter, 1f or 1g
else:
axis_system.plot(x, y, label=name)
# When system is disturbed by Jupiter, only plot Jupiter, 1f and 1e
else:
if name == "TRAPPIST-1e":
vx_start = x_start+3
vx = solution[:,vx_start]
separation = np.sqrt( x**2 + y**2 + z**2 )
# Calculating maximum kinetic energy achieved by 1f and plotting against Jupiter mass
# Only require Potential Energy from calculator
null, null, PE = system_energy(solution, masses, N, number_of_points)
vy_start = x_start+4
vz_start = x_start+5
vy = solution[:,vy_start]
vz = solution[:,vz_start]
v_squared = vx*vx + vy*vy + vz*vz
KE = 0.5*masses[i]*v_squared
# Finding where maximum occurs. Redundant in final report
max_index = np.argmax(KE)
max_KE_list.append(KE[max_index])
max_PE_list.append(abs(PE[max_index]))
# Calculating eccentricity
ratio = np.min(separation)/np.max(separation)
if ratio > 1:
eccentricity = np.sqrt(1+ratio**2)
else:
eccentricity = np.sqrt(1-ratio**2)
# Plotting everything
axis_system.plot(x, y, 'black', label=name)
axis_system.plot(x[0], y[0], 'ko')
axis_separation.plot(t_days, separation, label="M="+str(np.round(masses[-1]/M_jupiter, 1))+"M$_{J}$")
axis_poincare.plot(x, vx, 'black', label=name)
axis_poincare.plot(x[0], vx[0], 'ko')
axis_eccentricity.plot(masses[-1]/M_jupiter, eccentricity, 'rx')
# Special Jupiter plot again
elif name == "Jupiter":
axis_system.plot(x, y, color="cyan", linestyle="dashed", label=name)
axis_system.plot(x[0], y[0], color="cyan", marker="o")
else:
pass
# Updating system initial conditions, masse and names to include Jupiter of varying mass
ic += ic_jupiter
names.append("Jupiter")
N = int(len(masses)) + 1
# Mass ranges to investigate, in Jupiter masses
min_mass = 0.1
max_mass = 15
mass_range = np.arange(min_mass*M_jupiter, max_mass*M_jupiter, 1*M_jupiter)
max_KE_list = []
max_PE_list = []
print("There are "+str(len(mass_range+1))+" masses in the range to observe.")
i = 1
# Cycling through the masses and solving the system in each case, with plots
for jupiter_mass in mass_range:
print("Calculating system "+str(i)+"...")
new_masses = masses + [jupiter_mass]
system_solver(N, ic, new_masses, t_array, ax_trappist_1[1], ax_separation, ax_poincare[1], ax_eccentricity, disturbed=True)
i += 1
# Plotting the maximum kinetic energy achieved and potential energy at that time. Redundant in final report.
ax_energy.plot(mass_range/M_jupiter, max_KE_list, 'rx', label="Maximum KE achieved (TRAPPIST-1e)")
ax_energy.plot(mass_range/M_jupiter, max_PE_list, 'bo', label="System PE at time of max KE")
ax_energy.hlines(0, 0, mass_range[-1]/M_jupiter + 1, color="black", linestyle="dashed")
ax_energy.legend()
# Adding legends and moving them in the figure to not obscure data
ax_trappist_1[1].legend(bbox_to_anchor=(0, 0.05))
ax_separation.legend()
ax_poincare[1].legend(bbox_to_anchor=(0, 0.05))
plt.show()
```
#### File: Redundant Files/2D Systems/sun-jupiter-system.py
```python
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
'''
Hand-crafted solution to the 2 body problem in 2D for the Sun Jupiter system. This is a redundant file and may not produce the correct solution.
It was not used in the final report, but existed as a study into the 2 body problem.
'''
def two_body_field_function(parameters, t, constants):
'''
Function that takes input parameters (initial conditions) and constants, as well as at time array.
Returns a list containing the field of differential equations for each derivative.
Args:
parameters: list with initial conditions, containing positions and velocities of 2 bodies
t: time array used by ode_int
constants: list containing constants such as Gravitational Constant and masses
Returns:
field: list containing the derivatives for the system
'''
x1, y1, vx1, vy1, x2, y2, vx2, vy2 = parameters
G, M1, M2 = constants
# Separation of bodies
r_12 = ((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2))**0.5
# r_13 = ((x1-x3)*(x1-x3) + (y1-y3)*(y1-y3))**0.5
# r_23 = ((x2-x3)*(x2-x3) + (y2-y3)*(y2-y3))**0.5
# ODEs for star
x1_dot = vx1
vx1_dot = -(G*M2/r_12**3) * (x1-x2)
y1_dot = vy1
vy1_dot = -(G*M2/r_12**3) * (y1-y2)
# ODEs for Earth
x2_dot = vx2
vx2_dot = -(G*M1/r_12**3) * (x2-x1)
y2_dot = vy2
vy2_dot = -(G*M1/r_12**3) * (y2-y1)
# Returning ODEs as a list
field = [x1_dot, y1_dot, vx1_dot, vy1_dot,
x2_dot, y2_dot, vx2_dot, vy2_dot]
return field
def three_body_field_function(parameters, t, constants):
'''
Function that takes input parameters (initial conditions) and constants, as well as at time array.
Returns a list containing the field of differential equations for each derivative.
Args:
parameters: list with initial conditions, containing positions and velocities of 2 bodies
t: time array used by ode_int
constants: list containing constants such as Gravitational Constant and masses
Returns:
field: list containing the derivatives for the system
'''
x1, y1, vx1, vy1, x2, y2, vx2, vy2, x3, y3, vx3, vy3 = parameters
G, M1, M2, M3 = constants
# Close encounters could be causing bus, by creating huge numbers.
# Needs some checking.
# Separation of bodies
r_12 = ((x1-x2)*(x1-x2) + (y1-y2)*(y1-y2))**0.5
r_13 = ((x1-x3)*(x1-x3) + (y1-y3)*(y1-y3))**0.5
r_23 = ((x2-x3)*(x2-x3) + (y2-y3)*(y2-y3))**0.5
# ODEs for M1
x1_dot = vx1
vx1_dot = -(G*M2/r_12**3) * (x1-x2) - (G*M3/r_13**3) * (x1-x3)
y1_dot = vy1
vy1_dot = -(G*M2/r_12**3) * (y1-y2) - (G*M3/r_13**3) * (x1-x3)
# ODEs for M2
x2_dot = vx2
vx2_dot = -(G*M1/r_12**3) * (x2-x1) - (G*M3/r_23**3) * (x2-x3)
y2_dot = vy2
vy2_dot = -(G*M1/r_12**3) * (y2-y1) - (G*M3/r_23**3) * (x2-x3)
# ODEs for M3
x3_dot = vx3
vx3_dot = -(G*M1/r_13**3) * (x3-x1) - (G*M2/r_23**3) * (x3-x2)
y3_dot = vy3
vy3_dot = -(G*M1/r_13**3) * (y3-y1) - (G*M2/r_23**3) * (y3-y2)
# Returning ODEs as a list
field =[x1_dot, y1_dot, vx1_dot, vy1_dot,
x2_dot, y2_dot, vx2_dot, vy2_dot,
x3_dot, y3_dot, vx3_dot, vy3_dot]
return field
if __name__ == "__main__":
# Constants
G = 6.67408e-11
M1 = 1.989e30
M2 = 1.898e27
M3 = 5.683e26
AU = 1.496e11
MU = 384400e3
# Get division by zero otherwise
zero = 1e-6
# Position intitial conditions
x1_init = zero
y1_init = zero
x2_init = 778.5e9
y2_init = zero
x3_init = 1.434e12
y3_init = zero
# Separations
M1_M2_separation = np.sqrt( (x1_init - x2_init)**2 + (y1_init - y2_init)**2 )
M1_M3_separation = np.sqrt( (x1_init - x3_init)**2 + (y1_init - y3_init)**2 )
M2_M3_separation = np.sqrt( (x2_init - x3_init)**2 + (y2_init - y3_init)**2 )
# Velocities
vx1_init = zero
vy1_init = zero
vx2_init = zero
vy2_init = np.sqrt(G*M1/M1_M2_separation)
vx3_init = zero
vy3_init = np.sqrt(G*M1/M1_M3_separation)
v1_squared = vx1_init*vx1_init + vy1_init*vy1_init
v2_squared = vx2_init*vx2_init + vy2_init*vy2_init
v3_squared = vx3_init*vx3_init + vy3_init*vy3_init
# Energies
M1_KE_init = 0.5 * M1 * v1_squared
M2_KE_init = 0.5 * M2 * v2_squared
M3_KE_init = 0.5 * M3 * v3_squared
PE_init = -G*M1*M2/M1_M2_separation - G*M1*M3/M1_M3_separation - G*M2*M3/M2_M3_separation
KE_init = M1_KE_init + M2_KE_init + M3_KE_init
initial_energy = KE_init + PE_init
# Angular momentum
L1_init = M1 * np.sqrt(v1_squared) * np.sqrt( x1_init*x1_init + y1_init*y1_init )
L2_init = M2 * np.sqrt(v2_squared) * np.sqrt( x2_init*x2_init + y2_init*y2_init )
L3_init = M3 * np.sqrt(v3_squared) * np.sqrt( x3_init*x3_init + y3_init*y3_init )
L_init = L1_init + L2_init + L3_init
# Time array
year_constant = 60**2 * 24 * 365.35
points_per_year = 1000
number_of_years = 26*5
number_of_points = number_of_years*points_per_year
t_max = number_of_years * year_constant
t_array = np.linspace(0, t_max, number_of_points)
# Lists containin intial conditions (parameters) and important constants.
# These appear in a certain order here, and the order must be adheredt to
# everywhere else you create a list like this - in the function passed to
# odeint in both input and output, and what odeint outputs.
initial_parameters_two_body = [x1_init, y1_init, vx1_init, vy1_init,
x2_init, y2_init, vx2_init, vy2_init]
initial_parameters_three_body = [x1_init, y1_init, vx1_init, vy1_init,
x2_init, y2_init, vx2_init, vy2_init,
x3_init, y3_init, vx3_init, vy3_init]
constants_two_body = [G, M1, M2]
constants_three_body = [G, M1, M2, M3]
# Passing function to odeint and retrieving planet and star positions
# Tighter tolerances results in smaller energy deviations
solution_jupiter = odeint(two_body_field_function, initial_parameters_two_body, t_array, args=(constants_two_body,), rtol=1e-10)
solution_jupiter_saturn = odeint(three_body_field_function, initial_parameters_three_body, t_array, args=(constants_three_body,), rtol=1e-10)
# solution = array containing 8 columns for x_star, y_star etc. in order they appear in field_function. Each row is t value
# Columns:
# 0: x1
# 1: y1
# 2: vx1
# 3: vy1
# 4: x2
# 5: y2
# 6: vx2
# 7: vy2
# Velocities
vx1_jupiter, vy1_jupiter = solution_jupiter[:,2], solution_jupiter[:,3]
v1_squared_jupiter = vx1_jupiter*vx1_jupiter + vy1_jupiter*vy1_jupiter
vx2_jupiter, vy2_jupiter = solution_jupiter[:,6], solution_jupiter[:,7]
v2_squared_jupiter = vx2_jupiter*vx2_jupiter + vy2_jupiter*vy2_jupiter
# Positions
x1_jupiter, y1_jupiter = solution_jupiter[:,0], solution_jupiter[:,1]
x2_jupiter, y2_jupiter = solution_jupiter[:,4], solution_jupiter[:,5]
r_jupiter = np.sqrt ( (x1_jupiter - x2_jupiter)**2 + (y1_jupiter - y2_jupiter)**2 )
x1_AU_jupiter, y1_AU_jupiter = x1_jupiter/AU, y1_jupiter/AU
x2_AU_jupiter, y2_AU_jupiter = x2_jupiter/AU, y2_jupiter/AU # Converting to AU for plot
# Energies
KE = 0.5*M1*v1_squared_jupiter + 0.5*M2*v2_squared_jupiter
PE = -G*M1*M2/r_jupiter
energy_difference_jupiter = (KE + PE - initial_energy)/initial_energy
# Creating figures and axes
fig_planet = plt.figure(figsize=(8,6))
ax_planet = fig_planet.gca()
fig_sinusoid = plt.figure(figsize=(8,6))
ax_sinusoid = fig_sinusoid.gca()
fig_resonance = plt.figure(figsize=(8,6))
ax_resonance = fig_resonance.gca()
t_array = t_array / year_constant # Converting time back to years
fig_phase = plt.figure(figsize=(8,6))
ax_phase = fig_phase.gca()
x1, y1 = solution_jupiter_saturn[:,0], solution_jupiter_saturn[:,1]
x2, y2 = solution_jupiter_saturn[:,4], solution_jupiter_saturn[:,5]
x3, y3 = solution_jupiter_saturn[:,8], solution_jupiter_saturn[:,9]
r_12 = np.sqrt( (x1 - x2)**2 + (y1 - y2)**2 )
r_13 = np.sqrt( (x1 - x3)**2 + (y1 - y3)**2 )
r_23 = np.sqrt( (x2 - x3)**2 + (y2 - y3)**2 )
x1_AU, y1_AU = x1/AU, y1/AU
x2_AU, y2_AU = x2/AU, y2/AU # Converting to AU for plot
x3_AU, y3_AU = x3/AU, y3/AU
ax_planet.set_title("Jupiter, Saturn and Sun System")
ax_planet.plot(x1_AU, y1_AU, 'b', label="Sun")
ax_planet.plot(x2_AU, y2_AU, 'r', label="Jupiter")
ax_planet.plot(x3_AU, y3_AU, 'g', label="Saturn")
ax_planet.legend()
ax_sinusoid.set_title("Orbit Cycle for Jupiter and Jupiter-Saturn Systems")
ax_sinusoid.plot(t_array, x2_AU_jupiter, 'k--', label="Jupiter System")
ax_sinusoid.plot(t_array, x2_AU, 'r', label="Jupiter-Saturn System")
ax_sinusoid.legend()
ax_resonance.set_title("Saturn and Jupiter Cycles")
ax_resonance.plot(t_array, x2_AU, 'r', label="Jupiter")
ax_resonance.plot(t_array, x3_AU, 'g', label="Saturn")
ax_resonance.legend()
ax_phase.set_title("Comparing Orbital Positions for Solo and Coupled Systems")
ax_phase.plot(t_array, r_jupiter, 'r', label="Jupiter System")
ax_phase.plot(t_array, r_12, 'g', label="Jupiter-Saturn System")
ax_phase.legend()
plt.show()
#%%
plt.close()
fig = plt.figure(figsize=(8,6))
ax = fig.gca()
for i in range(len(x2_AU)):
x1 = x1_AU[i]
x2 = x2_AU[i]
x3 = x3_AU[i]
y1 = y1_AU[i]
y2 = y2_AU[i]
y3 = y3_AU[i]
ax.plot(x1, y1, 'bx')
ax.plot(x2, y2, 'rx')
ax.plot(x3, y3, 'gx')
plt.pause(0.0001)
``` |
{
"source": "jp-wagner/vccrosshair",
"score": 3
} |
#### File: vccrosshair/vccrosshair/__main__.py
```python
import argparse
from .Svm import Svm
from .Commit import Commit
from .BagOfWords import BagOfWords
parser = argparse.ArgumentParser(description='Linear Support Vector Machine that classifies a commit as VCC or non-VCC.')
parser.add_argument('-c', '--commit', type=str, default="HEAD", help='Hash of the commit that should be classified')
parser.add_argument('-r', '--repo', type=str, default=".", help='Path to the git repository containing the commit')
args = parser.parse_args()
def main():
svm = Svm()
bag_of_words = BagOfWords()
commit = Commit(args.repo, args.commit, bag_of_words)
commit.extract_features()
svm.vcc_or_unclassified(commit.get_feature_vector(), bag_of_words)
``` |
{
"source": "jpwarren/holideck",
"score": 3
} |
#### File: holideck/api/base.py
```python
__author__ = "<NAME>"
__version__ = '0.02-dev'
__license__ = "MIT"
import os
import logging
# Set up logging
log = logging.getLogger('base')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s: %(name)s [%(levelname)s]: %(message)s"))
log.addHandler(handler)
log.setLevel(logging.DEBUG)
class HolidayBase(object):
"""
The base Holiday class for the main API
"""
NUM_GLOBES = 50
# Local representation of globe state
globes = [ (0,0,0), ] * NUM_GLOBES
def setglobe(self, globenum, r, g, b):
# FIXME: This should be (self, globenum, color) where color is
# a tuple of (r g, b).
"""Set a globe"""
if (globenum < 0) or (globenum >= self.NUM_GLOBES):
return
self.globes[globenum] = (r, g, b)
def fill(self, r, g, b):
"""Sets the whole string to a particular colour"""
self.globes = [ (int(r), int(g), int(b)), ] * self.NUM_GLOBES
#for e in self.globes:
# e[0] = int(r)
# e[1] = int(g)
# e[2] = int(b)
def getglobe(self, globenum):
"""Return a tuple representing a globe's RGB color value"""
if (globenum < 0) or (globenum >= self.NUM_GLOBES):
# Fail hard, don't ignore errors
raise IndexError("globenum %d does not exist", globenum)
return self.globes[globenum]
def set_pattern(self, pattern):
"""
Set the entire string in one go
"""
if len(pattern) != self.NUM_GLOBES:
raise ValueError("pattern length incorrect: %d != %d" % ( len(pattern), self.NUM_GLOBES) )
self.globes = pattern[:]
def chase(self, direction=True):
"""
Rotate all globes around one step.
@param direction: Direction to rotate: up if True, down if False
"""
if direction:
# Rotate all globes around by one place
oldglobes = self.globes[:]
self.globes = oldglobes[1:]
self.globes.append(oldglobes[0])
pass
else:
oldglobes = self.globes[:]
self.globes = oldglobes[:-1]
self.globes.insert(0, oldglobes[-1])
pass
return
def rotate(self, newr, newg, newb, direction=True):
"""
Rotate all globes, just like chase, but replace the 'first'
globe with new color.
"""
self.chase(direction)
if direction:
self.globes[-1] = (newr, newg, newb)
pass
else:
self.globes[0] = (newr, newg, newb)
pass
pass
def render(self):
raise NotImplementedError
class ButtonHoliday(HolidayBase):
"""
Used when running on a physical Holiday.
"""
def __init__(self):
super(ButtonHoliday, self).__init__()
self.pid = os.getpid()
self.pipename = '/run/compose.fifo'
try:
self.pipe = open(self.pipename, "wb")
except:
print "Couldn't open the pipe! Oh no!"
raise
self.pipe = None
pass
def render(self):
"""
Render globe colours to local pipe
"""
rend = []
rend.append("0x000010\n")
rend.append("0x%06x\n" % self.pid)
for g in self.globes:
tripval = (g[0] * 65536) + (g[1] * 256) + g[2]
rend.append("0x%06X\n" % tripval)
pass
self.pipe.write(''.join(rend))
self.pipe.flush()
class ButtonApp(object):
"""
A ButtonApp runs on a physical Holiday using the button interface.
"""
def start(self):
"""
Do whatever is required to start up the app
"""
return
def stop(self):
"""
Do whatever is required to stop the app
"""
return
def up(self):
"""
Called when the Up button is pressed
"""
return
def down(self):
"""
Called when the Down button is pressed
"""
return
```
#### File: holideck/examples/holivid.py
```python
import numpy as np
import cv2
import math
import optparse
import time
from api.udpholiday import UDPHoliday
from holiscreen import render_to_hols
NUM_GLOBES = UDPHoliday.NUM_GLOBES
class HolividOptions(optparse.OptionParser):
"""
Command-line options parser
"""
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(self, **kwargs)
self.addOptions()
def addOptions(self):
self.add_option('-n', '--numstrings', dest='numstrings',
help="Number of Holiday strings to simulate [%default]",
type="int", default=25)
self.add_option('-f', '--file', dest='filename',
help="Video file to display.",
type="string" )
# Listen on multiple TCP/UDP ports, one for each Holiday we simulate
self.add_option('-p', '--portstart', dest='portstart',
help="Port number to start at for UDP listeners [%default]",
type="int", default=9988)
self.add_option('-o', '--orientation', dest='orientation',
help="Orientation of the strings [%default]",
type="choice", choices=['vertical', 'horizontal'], default='vertical')
self.add_option('', '--switchback', dest='switchback',
help="'Switchback' strings, make a single string display like its "
"more than one every m globes",
type="int")
self.add_option('', '--fps', dest='fps',
help="Set video playback frames-per-second. [%default]",
type="int", default=25)
pass
def parseOptions(self):
"""
Emulate twistedmatrix options parser API
"""
options, args = self.parse_args()
self.options = options
self.args = args
self.postOptions()
return self.options, self.args
def postOptions(self):
if len(self.args) < 1:
self.error("Specify address and port of remote Holiday(s)")
pass
if not self.options.filename:
self.error("Video filename not given.")
pass
pass
if __name__ == '__main__':
usage = "Usage: %prog [options] <hol_addr:hol_port> [<hol_addr:hol_port> ... ]"
optparse = HolividOptions(usage=usage)
options, args = optparse.parseOptions()
hols = []
if len(args) > 1:
for arg in args:
hol_addr, hol_port = arg.split(':')
hols.append(UDPHoliday(ipaddr=hol_addr, port=int(hol_port)))
else:
hol_addr, hol_port = args[0].split(':')
for i in range(options.numstrings):
hols.append(UDPHoliday(ipaddr=hol_addr, port=int(hol_port)+i))
pass
pass
if options.switchback:
if options.orientation == 'vertical':
height = options.switchback
pieces = int(math.floor(float(NUM_GLOBES) / height))
width = options.numstrings * pieces
else:
width = options.switchback
pieces = int(math.floor(float(NUM_GLOBES) / width))
height = options.numstrings * pieces
else:
if options.orientation == 'vertical':
height = NUM_GLOBES
pieces = options.numstrings
width = options.numstrings
else:
width = NUM_GLOBES
pieces = options.numstrings
height = options.numstrings
pass
pass
cap = cv2.VideoCapture(options.filename)
newsize = (width, height)
skipframe = False
while True:
loopstart = time.time()
ret, frame = cap.read()
if ret != True:
print "No valid frame."
break
# Resize the frame into the resolution of our Holiday array
holframe = cv2.resize(frame, newsize, interpolation=cv2.INTER_CUBIC)
# The colours are in the wrong format, so convert them
holframe = cv2.cvtColor(holframe, cv2.COLOR_BGR2RGB)
# Display the original frame, for the demo
cv2.imshow('holivid monitor display', frame)
# A frame is just a Numpy array of pixel values, i.e. globelists. We need to take
# these values and map them onto our holiday array.
render_to_hols(holframe, hols, width, height,
options.orientation, options.switchback)
# Wait period between keycapture (in milliseconds)
# This gives us approximately the right number of frames per second
wait_time = 1000/options.fps
# Figure out how long the wait_time would be without the
# processing time
loopend = time.time()
# Adjust waiting based on how long it takes us to process
process_time = (loopend - loopstart) * 1000
wait_time = int(wait_time - process_time)
if cv2.waitKey(wait_time) & 0xFF == ord('q'):
break
pass
cap.release()
cv2.destroyAllWindows()
```
#### File: holideck/examples/twinkle.py
```python
import optparse
import time
import sys
import random
import colorsys
import webcolors
import json
from simplexnoise import raw_noise_2d
from secretapi.holidaysecretapi import HolidaySecretAPI
import logging
log = logging.getLogger(sys.argv[0])
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s: %(name)s [%(levelname)s]: %(message)s"))
log.addHandler(handler)
log.setLevel(logging.DEBUG)
DEFAULT_HOL_PORT = 9988
class TwinkleOptions(optparse.OptionParser):
"""
Command-line options parser
"""
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(self, **kwargs)
self.addOptions()
def addOptions(self):
self.add_option('-n', '--numstrings', dest='numstrings',
help="Number of Holiday strings to use [%default]",
type="int", default=1)
self.add_option('-b', '--basecolor', dest='basecolor',
help="Color to initialize string to, as #nnnnnn",
type="string")
self.add_option('-c', '--change_chance', dest='change_chance',
help="% chance a given globe will change each round [%default]",
type="float", default=1.0 )
self.add_option('-a', '--animsleep', dest='anim_sleep',
help="Sleep between animation frames, in seconds [%default]",
type="float", default=0.1 )
self.add_option('-f', '--patternfile', dest='patternfile',
help="Initalise string with a pattern from a JSON format file",
type="string")
self.add_option('-t', '--twinkle-algo', dest='twinkle_algo',
help="Algorithm to use for twinkling [%default]",
type="choice", choices=['random', 'simplex', 'throb', 'chase'], default='simplex')
self.add_option('-i', '--init-only', dest='initonly',
help="Initialize string(s) and exit",
action="store_true")
self.add_option('-H', '--HUESTEP', dest='huestep_max',
help="Maximum step between hues [%default]",
type="float", default=0.1 )
self.add_option('-S', '--SATSTEP', dest='satstep_max',
help="Maximum step between saturations [%default]",
type="float", default=0.01 )
self.add_option('-V', '--VALSTEP', dest='valstep_max',
help="Maximum step between values [%default]",
type="float", default=0.2 )
self.add_option('', '--huediff-max', dest='huediff_max',
help="Maximum hue difference from basecolor [%default]",
type="float", default=0.0 )
self.add_option('', '--satdiff-max', dest='satdiff_max',
help="Maximum saturation difference from basecolor [%default]",
type="float", default=0.0 )
self.add_option('', '--valdiff-max', dest='valdiff_max',
help="Maximum value difference from basecolor [%default]",
type="float", default=0.0 )
self.add_option('', '--chase-forwards', dest='chase',
help="Lights chase around the string",
action="store_true")
self.add_option('', '--chase-backwards', dest='chase',
help="Lights chase around the string",
action="store_false")
self.add_option('', '--simplex-damper', dest='simplex_damper',
help="Amount of simplex noise dampening [%default]",
type="float", default=2.0)
self.add_option('', '--throb-speed', dest='throb_speed',
help="Speed of throb animation [%default]",
type="float", default=2.0)
self.add_option('', '--throb-up', dest='throb_dir',
help="Start throbbing up",
action="store_true")
self.add_option('', '--throb-down', dest='throb_dir',
help="Start throbbing down",
action="store_false")
def parseOptions(self):
"""
Emulate twistedmatrix options parser API
"""
options, args = self.parse_args()
self.options = options
self.args = args
self.postOptions()
return self.options, self.args
def postOptions(self):
if len(self.args) < 1:
self.error("Specify address and port of remote Holiday(s)")
pass
self.options.initpattern = None
if self.options.patternfile:
with open(self.options.patternfile) as fp:
jdata = json.load(fp)
self.options.initpattern = jdata['lights']
pass
pass
def init_hol(hol, basecolor=None, pattern=None):
"""
Initialize a Holiday to some random-ish colors
"""
if basecolor is not None:
(r, g, b) = webcolors.hex_to_rgb(basecolor)
hol.fill(r, g, b)
elif pattern is not None:
for globeidx, vals in enumerate(pattern):
(r, g, b) = webcolors.hex_to_rgb(vals)
hol.setglobe(globeidx, r, g, b)
pass
else:
for globeidx in range(0, HolidaySecretAPI.NUM_GLOBES):
color = []
# red
color.append(random.randint(0, 130))
#color.append(0)
# green
color.append(random.randint(0, 130))
# blue
color.append(random.randint(0, 130))
#color.append(0)
r, g, b = color
hol.setglobe(globeidx, r, g, b)
pass
hol.render()
pattern = hol.globes[:]
return pattern
def twinkle_holiday(hol, options, init_pattern, noise_array=None):
"""
Make a Holiday twinkle like the stars
"""
# For each globe, mostly have a random drift of brightness
# and hue by but occasionally jump in brightness up or down
if options.basecolor:
(base_r, base_g, base_b) = webcolors.hex_to_rgb(options.basecolor)
base_hsv = colorsys.rgb_to_hsv(base_r/255.0, base_g/255.0, base_b/255.0)
pass
if noise_array is None:
noise_array = [ 0, ] * HolidaySecretAPI.NUM_GLOBES
pass
if options.twinkle_algo == 'throb':
# Increase brightness by throb_speed / anim_sleep
# until max brightness, and then decrease
r, g, b = hol.getglobe(0)
(h, s, v) = colorsys.rgb_to_hsv(r/255.0, g/255.0, b/255.0)
# Check direction for throb. The limits are set to be visually
# interesting, because value above about 0.6 doesn't look much
# different.
if v > 0.6:
options.throb_dir = False
elif v < 0.02:
options.throb_dir = True
throb_amount = (1.0 / options.throb_speed * options.anim_sleep)
for idx in range(0, HolidaySecretAPI.NUM_GLOBES):
r, g, b = hol.getglobe(idx)
(h, s, v) = colorsys.rgb_to_hsv(r/255.0, g/255.0, b/255.0)
if options.throb_dir:
v += throb_amount
if v > 0.91:
v = 0.91
else:
v -= throb_amount
if v < 0.02:
v = 0.02
pass
(r, g, b) = colorsys.hsv_to_rgb(h, s, v)
hol.setglobe(idx, int(255*r), int(255*g), int(255*b))
pass
pass
elif options.twinkle_algo in ['simplex', 'random']:
for idx in range(0, HolidaySecretAPI.NUM_GLOBES):
# Choose globe update algorithm
if options.twinkle_algo == 'simplex':
nv = raw_noise_2d(noise_array[idx], random.random()) / options.simplex_damper
noise_array[idx] += nv
if noise_array[idx] > 1.0:
noise_array[idx] = 1.0
pass
elif noise_array[idx] < -1.0:
noise_array[idx] = -1.0
pass
ranger = (noise_array[idx] + 1.0) / 1.0
#log.debug("ranger: %f", ranger)
# Adjust colour. If basecolor, adjust from basecolor
if options.basecolor:
(base_r, base_g, base_b) = webcolors.hex_to_rgb(options.basecolor)
#log.debug("adjusting from base: %d %d %d", base_r, base_g, base_b)
r = int(base_r * ranger)
g = int(base_g * ranger)
b = int(base_b * ranger)
pass
else:
# adjust from original color
(base_r, base_g, base_b) = init_pattern[idx]
#log.debug("init pattern: %s", init_pattern[idx])
#log.debug("adjusting from orig: %d %d %d", base_r, base_g, base_b)
r = int(base_r * ranger)
#log.debug("adjusted red from orig: %d %d %d", base_r, base_g, base_b)
g = int(base_g * ranger)
b = int(base_b * ranger)
pass
if r > 255:
r = 255
if g > 255:
g = 255
if b > 255:
b = 255
hol.setglobe(idx, r, g, b)
#log.debug("init pattern: %s", init_pattern[idx])
else:
# % chance of updating a given globe
if random.random() < options.change_chance:
r, g, b = hol.getglobe(idx)
(h, s, v) = colorsys.rgb_to_hsv(r/255.0, g/255.0, b/255.0)
#log.debug("start h s v: %f %f %f", h, s, v)
# Adjust hue by a random amount
huestep = random.random() * options.huestep_max
# 50% chance of positive or negative step
if random.randint(0, 1):
h += huestep
if options.basecolor and abs(base_hsv[0] - h) > options.huediff_max:
h = base_hsv[0] + options.huediff_max
if h > 1.0:
h = h - 1.0
else:
h -= huestep
if options.basecolor and abs(h - base_hsv[0]) > options.huediff_max:
h = base_hsv[0] - options.huediff_max
if h < 0.0:
h = 1.0 + h
pass
satstep = random.random() * options.satstep_max
if random.randint(0, 1):
s += satstep
if options.basecolor and abs(base_hsv[1] - s) > options.satdiff_max:
s = base_hsv[1] + options.satdiff_max
if s > 1.0:
s = 1.0
else:
s -= satstep
if options.basecolor and abs(s - base_hsv[1]) > options.satdiff_max:
s = base_hsv[1] - options.satdiff_max
# Make sure things stay bright and colorful!
if s < 0.0:
s = 0.0
# Adjust value by a random amount
valstep = random.random() * options.valstep_max
# 50% chance of positive or negative step
if random.randint(0, 1):
v += valstep
if options.basecolor and abs(base_hsv[2] - v) > options.valdiff_max:
v = base_hsv[2] + options.valdiff_max
if v > 1.0:
v = 1.0
else:
v -= valstep
if options.basecolor and abs(v - base_hsv[2]) > options.valdiff_max:
v = base_hsv[2] - options.valdiff_max
if v < 0.2:
v = 0.2
pass
#log.debug("end h s v: %f %f %f", h, s, v)
(r, g, b) = colorsys.hsv_to_rgb(h, s, v)
#log.debug("r g b: %f %f %f", r, g, b)
hol.setglobe(idx, int(255*r), int(255*g), int(255*b))
pass
pass
pass
pass
elif options.twinkle_algo in ['chase']:
# Chase mode?
if options.chase:
# Rotate all globes around by one place
oldglobes = hol.globes[:]
hol.globes = oldglobes[1:]
hol.globes.append(oldglobes[0])
pass
else:
#log.debug("old: %s", hol.globes)
oldglobes = hol.globes[:]
hol.globes = oldglobes[:-1]
hol.globes.insert(0, oldglobes[-1])
#log.debug("new: %s", hol.globes)
pass
hol.render()
if __name__ == '__main__':
usage = "Usage: %prog [options] <hol_addr:hol_port> [<hol_addr:hol_port> ... ]"
optparse = TwinkleOptions(usage=usage)
options, args = optparse.parseOptions()
hols = []
# List of holiday initial patterns
hol_inits = []
# List of holiday noise patterns
hol_noise = []
def split_addr_args(arg):
split_args = arg.split(':')
if len(split_args) == 1:
hol_addr = split_args[0]
hol_port = DEFAULT_HOL_PORT
else:
hol_addr = split_args[0]
hol_port = split_args[1]
return hol_addr, hol_port
if len(args) > 1:
for arg in args:
hol_addr, hol_port = split_addr_args(arg)
hols.append(HolidaySecretAPI(addr=hol_addr, port=int(hol_port)))
else:
hol_addr, hol_port = split_addr_args(args[0])
for i in range(options.numstrings):
hols.append(HolidaySecretAPI(addr=hol_addr, port=int(hol_port)+i))
pass
# Initialise holidays
for hol in hols:
hol_inits.append(init_hol(hol, options.basecolor, options.initpattern))
hol_noise.append(None)
pass
if options.initonly:
sys.exit(0)
while True:
for i, hol in enumerate(hols):
noise = twinkle_holiday(hol, options, hol_inits[i], hol_noise[i])
pass
time.sleep(options.anim_sleep)
``` |
{
"source": "jpwarren/libsnmp",
"score": 2
} |
#### File: lib/libsnmp/debug.py
```python
import logging
import os
class snmpLogger(logging.Logger):
def __init__(self, name):
pid = os.getpid()
FORMAT = "%(asctime)s [" + str(pid) + "] %(name)s: %(levelname)s - %(message)s"
level = logging.DEBUG
logging.Logger.__init__(self, name, level)
handler = logging.StreamHandler()
formatter = logging.Formatter(FORMAT)
handler.setFormatter(formatter)
self.addHandler(handler)
return
logging.setLoggerClass(snmpLogger)
```
#### File: lib/libsnmp/rfc1902.py
```python
import util
import debug
import logging
import types
from rfc1155 import *
log = logging.getLogger('rfc1902')
## change logging level.. options of:
##
## logging.CRITICAL
## logging.ERROR
## logging.WARN
## logging.INFO
## logging.DEBUG
log.setLevel(logging.INFO)
# Add a new TagNumber for encoding purposes
asnTagNumbers['Counter64'] = 0x06
class Integer32(Integer):
""" A 32 bit integer
"""
MINVAL = -2147483648L
MAXVAL = 2147483648L
class Counter32(Counter):
""" A 32 bit counter
"""
pass
class Guage32(Guage):
""" A 32 bit Guage
"""
pass
class Counter64(Counter):
""" A 64 bit counter
"""
MINVAL = 0L
MAXVAL = 18446744073709551615L
asnTagClass = asnTagNumbers['Counter64']
class OctetString(OctetString):
""" An SNMP v2 OctetString must be between
0 and 65535 bytes in length
"""
def __init__(self, value=''):
if len(value) > 65535:
raise ValueError('OctetString must be shorter than 65535 bytes')
OctetString.__init__(self, value)
## Modify tag decode lookup table to use SNMPv2 classes
## instead of the old SNMPv1 classes. Little actual difference
## apart from the class names.
tagDecodeDict[0x02] = Integer32
tagDecodeDict[0x41] = Counter32
tagDecodeDict[0x42] = Guage32
tagDecodeDict[0x46] = Counter64
```
#### File: libsnmp/test/test_encoder.py
```python
import unittest
import logging
import string
import sys
sys.path.append('../lib')
from libsnmp import util
from libsnmp import debug
from libsnmp import rfc1155
# Some integer encodings to check
test_integers = {
0: '\000',
5: '\005',
15: '\017',
73: '\111',
128: '\000\200',
-127: '\201',
-128: '\200',
124787: '\001\347\163',
-1: '\377',
-267: '\376\365',
-5848548: '\246\302\034'
}
# octetstrings are simple, since they stay as they are
test_octetstrings = [
'fred',
'the small frog sat in the well',
'43 403i 594 5908kjljdfj weljf',
u'This is a unicode string',
u'This is another unicode string',
]
test_objectids = {
'.1.2.4.5.6': '\052\004\005\006',
'1.2.4.5.6': '\052\004\005\006',
'.2.3.3': '\123\003',
'.0.2.8.5': '\002\010\005',
'0.2.8.5': '\002\010\005',
'.1.2.65.7.3394.172.16.17.32': '\052\101\007\232\102\027\005\163\056'
}
test_sequences = {
'\002\001\016': [ rfc1155.Integer(14), ],
'\002\002\006\321': [ rfc1155.Integer(1745), ],
'\002\001\077\005\000': [ rfc1155.Integer(63), rfc1155.Null() ],
'\006\006\051\006\005\054\003\005\004\004\142\154\141\150\002\003\001\202\037': [ rfc1155.ObjectID('.1.1.6.5.44.3.5'), rfc1155.OctetString('blah'), rfc1155.Integer(98847) ]
}
test_sequenceOf = {
'blah': [ rfc1155.Integer, [ rfc1155.Integer(7), rfc1155.Integer(5567), rfc1155.Integer(84743) ] ],
'fred': [ rfc1155.ObjectID, [ rfc1155.ObjectID('.1.2.4.3'), rfc1155.ObjectID('.1.0.4.6.44') ] ]
}
test_ipaddresses = {
'blah': '10.232.8.4',
'fred': '255.255.255.0',
'albert': '192.168.127.12',
}
test_octets = {
# A fully encoded integer
'\002\001\005': [5, ],
# Another fully encoded integer
'\002\003\001\347\163': [124787, ],
# three integers
'\002\003\246\302\034\002\003\001\347\163\002\001\337': [-5848548, 124787, -1],
# a simple octet string
'\004\036the small frog sat in the well': ['the small frog sat in the well'],
# some object IDs
'\006\002\123\003': [ [2, 3, 3], ],
'\006\004\052\004\005\006': [ [1, 2, 4, 5, 6], ],
'\006\011\052\101\007\232\102\027\005\163\056': [ [1, 2, 65, 7, 3394, 23, 5, 115, 46], ],
# A Null
'\005\000': [ None, ],
}
class EncoderTest(unittest.TestCase):
def setUp(self):
self.log = logging.getLogger('EncoderTest')
self.log.setLevel(logging.DEBUG)
def tearDown(self):
logging.shutdown()
def test_integerEncode(self):
""" Test encoding of Integer type
"""
for item in test_integers.keys():
myobj = rfc1155.Integer(item)
# self.log.debug('Encoding int: %s' % myobj() )
octets = myobj.encodeContents()
# self.log.debug('Got value [length %s]: %s, oct: %s' % ( len(octets), util.octetsToHex(octets), util.octetsToOct(octets)) )
# self.log.debug('check against handcode: %s [%d] %s' % ( util.octetsToHex(test_integers[item]), len(octets), util.octetsToOct(test_integers[item]) ) )
self.assertEquals(test_integers[item], octets)
def test_integerEncodeDecode(self):
""" Test encode/decode of Integer type
"""
for item in test_integers.keys():
myobj = rfc1155.Integer(item)
# self.log.debug('Encoding int: %s' % myobj() )
octets = myobj.encodeContents()
# self.log.debug('Got value [length %s]: %s, oct: %s' % ( len(octets), util.octetsToHex(octets), util.octetsToOct(octets)) )
object = myobj.decodeContents(octets)
# self.log.debug('Got value [%s]: %s' % ( object, object.value) )
self.assertEquals(item, object.value)
def test_octetStringEncode(self):
""" Test encode of OctetString type
"""
# self.log.debug('testing octet string in octal and hex')
for item in test_octetstrings:
myobj = rfc1155.OctetString(item)
# self.log.debug('as hex: %s' % hex(myobj) )
# self.log.debug('as octal: %s' % oct(myobj) )
octets = myobj.encodeContents()
self.assertEquals(item, octets)
def test_octetStringEncodeDecode(self):
""" Test encode/decode of OctetString type
"""
for item in test_octetstrings:
myobj = rfc1155.OctetString(item)
octets = myobj.encodeContents()
object = myobj.decodeContents(octets)
self.assertEquals(item, object.value)
def test_objectidEncode(self):
"""Test encode of ObjectID type"""
for input, output in test_objectids.items():
myobj = rfc1155.ObjectID(input)
octets = myobj.encodeContents()
self.assertEquals(octets, output)
pass
return
def test_objectidEncodeDecode(self):
"""Test encode/decode of ObjectID type"""
for input, output in test_objectids.items():
myobj = rfc1155.ObjectID(input)
octets = myobj.encodeContents()
object = myobj.decodeContents(octets)
result = []
input_check = input.lstrip('.')
output_check = '.'.join( [ str(x) for x in object.value ] )
self.assertEquals(input_check, output_check)
pass
return
def test_nullEncode(self):
"""Test encode of Null type"""
myobj = rfc1155.Null()
octets = myobj.encodeContents()
self.assertEquals(octets, '')
return
def test_nullEncodeDecode(self):
"""Test encode/decode of Null type"""
myobj = rfc1155.Null()
octets = myobj.encodeContents()
object = myobj.decodeContents(octets)
self.assertEquals(object.value, None)
def test_sequenceEncode(self):
""" Test encode of Sequence type
"""
for item in test_sequences.keys():
myobj = rfc1155.Sequence(test_sequences[item])
octets = myobj.encodeContents()
#self.log.debug('Got value [length %s]: %s, oct: %s' % ( len(octets), util.octetsToHex(octets), util.octetsToOct(octets)) )
self.assertEquals(item, octets)
def test_sequenceEncodeDecode(self):
""" Test encode/decode of Sequence type
"""
for item in test_sequences.keys():
myobj = rfc1155.Sequence(test_sequences[item])
octets = myobj.encodeContents()
object = myobj.decodeContents(octets)
for x, y in zip(myobj.value, object.value):
self.assertEquals(x.__class__, y.__class__)
self.assertEquals(x.value, y.value)
def test_sequenceofEncode(self):
""" Test encode of SequenceOf type
"""
for item in test_sequenceOf.keys():
myobj = rfc1155.SequenceOf(test_sequenceOf[item][0], test_sequenceOf[item][1])
# self.log.debug('SequenceOf: %s' % myobj)
def test_sequenceofEncodeDecode(self):
""" Test encode/decode of SequenceOf type
"""
for item in test_sequenceOf.keys():
myobj = rfc1155.SequenceOf(test_sequenceOf[item][0], test_sequenceOf[item][1])
# self.log.debug('SequenceOf: %s' % myobj)
octets = myobj.encodeContents()
object = myobj.decodeContents(octets)
for x, y in zip(myobj.value, object.value):
self.assertEquals(x.__class__, y.__class__)
self.assertEquals(x.value, y.value)
def test_sequenceofNegativeTest_Type(self):
""" Test mismatching SequenceOf types
"""
self.assertRaises(ValueError, rfc1155.SequenceOf, rfc1155.Integer, [rfc1155.OctetString('fhdhd')])
def test_ipAddressEncode(self):
""" Test encode of IPAddress type
"""
for item in test_ipaddresses.keys():
myobj = rfc1155.IPAddress(test_ipaddresses[item])
# self.log.debug('IPAddress: %s' % myobj)
def test_octetDecode(self):
""" Test decoding of multiple object types
"""
decoder = rfc1155.Asn1Object()
for item in test_octets.keys():
# self.log.debug('decoding octets: %s [%s]' % ( item, util.octetsToHex(item) ))
objectList = decoder.decode( item )
# self.log.debug('objectList: %s' % objectList)
# for object in objectList:
# self.log.debug('object: %s, value: %s' % ( object.__class__.__name__, object) )
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jpwarren/twitreach",
"score": 4
} |
#### File: jpwarren/twitreach/twitreach.py
```python
import sys
import os.path
import argparse
import ConfigParser
from itertools import izip_longest
import twitter
import logging
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('twitreach')
def get_reach(tw, args):
"""
Get the marketing 'reach' of a Twitter user.
The reach is the total count of follower's followers.
In this simple version, we over-count because we don't exclude
duplicates.
"""
username = args.userids[0]
followers = tw.followers.ids(screen_name=username)
reach = get_follower_count(tw, followers['ids'])
print("Reach for @%s: %s" % (username, '{0:,}'.format(reach)))
def get_follower_count(tw, userids):
"""
Get the count of how many followers a twitter userid has.
"""
# We need to chunk this into multiple requests,
# because twitter has a limit of 100 userids per-request.
reach = 0
for chunk in chunker(100, userids):
userid_str = ','.join(['%s' % x for x in chunk if x is not None])
users = tw.users.lookup(user_id=userid_str)
for user in users:
reach += int(user['followers_count'])
pass
pass
return reach
def chunker(n, iterable):
"""
Return chunks of items of length n from iterable.
chunker(3, 'abcdef') -> ('a','b','c'), ('d', 'e', 'f')
"""
return izip_longest(*[iter(iterable)]*n)
def authenticate(args):
"""
Authenticate with Twitter and return an authenticated
Twitter() object to use for API calls
"""
# import the config file
cp = ConfigParser.SafeConfigParser()
cp.read(os.path.expanduser(args.config))
token = cp.get('twitter', 'token')
token_key = cp.get('twitter', 'token_key')
con_secret = cp.get('twitter', 'con_secret')
con_secret_key = cp.get('twitter', 'con_secret_key')
tw = twitter.Twitter(auth=twitter.OAuth(token,
token_key,
con_secret,
con_secret_key))
return tw
if __name__ == '__main__':
ap = argparse.ArgumentParser(description="Get Twitter Reach",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
ap.add_argument('userids', nargs="+", help="Users to find reach for")
ap.add_argument('-c', '--config', default='~/.twitreach', help="Config file")
args = ap.parse_args()
tw = authenticate(args)
get_reach(tw, args)
``` |
{
"source": "jpwatt/AllyInvest.py",
"score": 3
} |
#### File: ally/responses/response.py
```python
import json
class Response():
def __init__(self, response_format, data):
self.response_format = response_format
self.raw_data = data
def parse_xml(self, data):
pass
def parse_json(self, data):
self.json = json.loads(json.dumps(data["response"]))
def get_raw_data(self):
return self.raw_data
``` |
{
"source": "jpwatt/PyAlly",
"score": 2
} |
#### File: PyAlly/ally/Info.py
```python
from .Api import Endpoint, RequestType
class Clock ( Endpoint ):
_type = RequestType.Info
_resource = 'market/clock.json'
class Status ( Endpoint ):
_type = RequestType.Info
_resource = 'utility/status.json'
def clock ( block: bool = True):
"""Return the current market clock.
Gets a simple dict with timestamp and the status of the market (pre-market, post-market, etc.),
including the next time that the market clock changes status.
Args:
block:
Specify whether to block thread if request exceeds rate limit
Returns:
A dictionary with timestamp, current market status, and any error information.
Raises:
RateLimitException: If block=False, rate limit problems will be raised
Example:
.. code-block:: python
# Equivalent to the static function
# ally.Info.clock()
a.clock()
# => {
'date': '2020-06-14 18:03:58.0-04:00',
'unixtime': '1592172240.069',
'status': {
'current': 'close',
'next': 'pre',
'change_at': '08:00:00'
},
'error': 'Success',
}
"""
return Clock().request(block=block)
def status ( block: bool = True ):
"""Return the status of the API service.
Gets a simple dict with timestamp and the current status (up, down, etc.) of the service.
Args:
block:
Specify whether to block thread if request exceeds rate limit
Returns:
A dictionary with current time, and the status of the API service.
Raises:
RateLimitException: If block=False, rate limit problems will be raised
Example:
.. code-block:: python
# Equivalent to the static function
# ally.Info.status()
a.status()
# => {
'time': 'Sun, 14, Jun 2020 18:17:06 GMT',
'error': 'Success'
}
"""
return Status().request(block=block)
```
#### File: ally/News/Search.py
```python
from ..Api import AuthenticatedEndpoint, RequestType
class SearchNews ( AuthenticatedEndpoint ):
_type = RequestType.Info
_resource = 'market/news/search.json'
def req_body ( self, **kwargs ):
"""Return get params together with post body data
"""
params = {
'symbols':kwargs.get('symbols'),
'maxhits':kwargs.get('limit',10)
}
return params, None
def extract ( self, response ):
"""Extract certain fields from response
"""
k = response.json().get('response')['articles']['article']
return k
@staticmethod
def DataFrame ( raw ):
import pandas as pd
# Create dataframe from our dataset
df = pd.DataFrame( raw ).replace({'na':None}).set_index('id')
return df
def searchNews ( self, symbols, limit=None, dataframe = True, block: bool = True ):
"""Searches for news on a set of symbols.
Calls the 'market/news/search.json' endpoint to search for
news articles related to some set of symbols.
Args:
symbols: Specify the stock symbols for which to search
limit: (int) maximum number of hits (10 default)
dataframe: whether to return results as dataframe
block: Specify whether to block thread if request exceeds rate limit
Returns:
Dataframe, or list
Raises:
RateLimitException: If block=False, rate limit problems will be raised
Example:
.. code-block:: python
df = a.searchNews('spy')
df.columns
# Index(['date', 'headline', 'story'], dtype='object')
df.index
# Index([...], dtype='object', name='id')
"""
result = SearchNews (
auth = self.auth,
account_nbr = self.account_nbr,
block = block,
limit = limit,
symbols = symbols
).request()
if dataframe:
try:
result = SearchNews.DataFrame ( result )
except:
raise
return result
```
#### File: ally/Option/expirations.py
```python
from ..Api import AuthenticatedEndpoint, RequestType
class Expirations ( AuthenticatedEndpoint ):
_type = RequestType.Info
_resource = 'market/options/expirations.json'
def req_body ( self, **kwargs ):
"""Return get params together with post body data
"""
params = {
"symbol":kwargs.get('symbol')
}
return params, None
def extract ( self, response ):
"""Extract certain fields from response
"""
k = response.json().get('response')['expirationdates']['date']
# Make sure we have a valid object, not None
if k is None:
k = []
if self.useDatetime:
from datetime import datetime
f = lambda x: datetime.strptime( x, '%Y-%m-%d' )
else:
f = str
return list(map( f, k ))
def expirations ( self, symbol, useDatetime = True, block: bool = True ):
"""Gets list of available expiration dates for a symbol.
Calls the 'market/options/expirations.json' endpoint to get list of all
exp_dates available for some given equity.
Args:
symbol: Specify the stock symbol against which to query
useDatetime: Specify whether to return datetime objects, or strings
block: Specify whether to block thread if request exceeds rate limit
Returns:
List of dates (datetime obj, or string)
Raises:
RateLimitException: If block=False, rate limit problems will be raised
Example:
.. code-block:: python
a.expirations('spy')
# [ datetime.datetime(2022, 3, 18, 0, 0), ... ]
a.expirations('spy', useDatetime = False)
# [ '2022-03-18', ... ]
"""
# Create request
req = Expirations(
auth = self.auth,
account_nbr = self.account_nbr,
block = block,
symbol = symbol
)
# Add in the extra information
req.useDatetime = useDatetime
# result
result = req.request()
return result
```
#### File: ally/Order/Outstanding.py
```python
from ..Api import AccountEndpoint, RequestType
from .order import Order
class OutstandingOrders ( AccountEndpoint ):
"""Send an order off
"""
_type = RequestType.Order
_resource = 'accounts/{0}/orders.json'
_method = 'GET'
def extract ( self, response ):
"""Extract certain fields from response
"""
response = response.json()['response']
raworders = response['orderstatus']['order']
if not isinstance(raworders, list):
raworders = [raworders]
orders = [ Order(fixml=x['fixmlmessage']) for x in raworders]
return orders
def orders ( self, block: bool = True ):
"""View all recent orders in the last 24 hours.
Calls accounts/./orders.json from the Ally API.
Args:
block: Specify whether to block thread if request exceeds rate limit
Returns:
A list of Order objects. Attributes can be viewed in the
same way as orders created by the user.
Raises:
RateLimitException: If block=False, rate limit problems will be raised
"""
result = OutstandingOrders(
auth = self.auth,
account_nbr = self.account_nbr,
block = block
).request()
return result
``` |
{
"source": "jpwatts/django-positions",
"score": 3
} |
#### File: examples/photos/tests.py
```python
from django.test import TestCase
import doctest
import unittest
import pprint
from examples.photos.forms import PhotoForm
from examples.photos.models import Album, Photo
class PhotosTestCase(TestCase):
def setUp(self):
self.album = Album.objects.create(name="Vacation")
self.bahamas = self.album.photos.create(name="Bahamas")
self.bahamas_id = self.bahamas.id
self.assertEqual(self.bahamas.position, 0)
self.jamaica = self.album.photos.create(name="Jamaica")
self.jamaica_id = self.jamaica.id
self.assertEqual(self.jamaica.position, 0)
self.grand_cayman = self.album.photos.create(name="Grand Cayman")
self.grand_cayman_id = self.grand_cayman.id
self.assertEqual(self.grand_cayman.position, 0)
self.cozumel = self.album.photos.create(name="Cozumel")
self.cozumel_id = self.cozumel.id
self.assertEqual(self.cozumel.position, 0)
def refresh(self):
self.bahamas = Photo.objects.get(id=self.bahamas_id)
self.jamaica = Photo.objects.get(id=self.jamaica_id)
self.grand_cayman = Photo.objects.get(id=self.grand_cayman_id)
self.cozumel = Photo.objects.get(id=self.cozumel_id)
def tearDown(self):
Album.objects.all().delete()
def test_reordered_positions(self):
ordered_by_position = list(self.album.photos.order_by('position').values_list('name', 'position'))
expected_order = [(u'Cozumel', 0), (u'Grand Cayman', 1), (u'Jamaica', 2), (u'Bahamas', 3)]
self.assertEqual(
ordered_by_position,
expected_order
)
def test_renamed_positions(self):
self.refresh()
new_name = '<NAME>'
self.cozumel.name = new_name
self.cozumel.save(update_fields=['name'])
self.refresh()
self.assertEqual(self.cozumel.name, new_name)
self.assertEqual(self.cozumel.position, 0)
self.jamaica.name = "<NAME>"
self.jamaica.save(update_fields=['name', 'position'])
self.refresh()
self.assertEqual(self.jamaica.position, 2)
self.jamaica.position = -1
self.jamaica.save(update_fields=['name', 'position'])
self.refresh()
self.assertEqual(self.jamaica.position, 3)
def test_form_renamed_position(self):
self.refresh()
grand_cayman_form = PhotoForm(dict(name="<NAME>"), instance=self.grand_cayman)
grand_cayman_form.save()
self.refresh()
self.assertEqual(self.grand_cayman.position, 1)
``` |
{
"source": "jpweber/wardroom",
"score": 2
} |
#### File: kubernetes-master/filter_plugins/kube_master.py
```python
import re
import socket
class FilterModule(object):
def filters(self):
return {
'kube_lookup_hostname': self.kube_lookup_hostname,
}
def kube_lookup_hostname(self, ip, hostname, many=False):
ips = set()
ip = ip.split(':')[0]
if ip and ip != "":
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
ips.add(ip)
try:
(_, _, iplist) = socket.gethostbyname_ex(hostname)
ips |= set(iplist)
except socket.error as e:
pass
if many:
ips.add(hostname)
return sorted(list(ips))
else:
return sorted(list(ips))[0]
```
#### File: wardroom/swizzle/provision.py
```python
import argparse
import jinja2
import os
import pprint
import re
import subprocess
import tempfile
import yaml
WARDROOM_BOXES = {
'xenial': 'generic/ubuntu1804',
'centos7': 'generic/centos7',
}
def vagrant_status():
""" Run `vagrant status` and parse the current vm state """
node_state = {}
output = subprocess.check_output(['vagrant', 'status'])
for i, line in enumerate(output.splitlines()):
if i < 2:
continue
parts = re.split(r'\s+', line)
if len(parts) == 3:
node_state[parts[0]] = parts[1]
elif len(parts) == 4:
node_state[parts[0]] = " ".join(parts[1:3])
return node_state
def vagrant_up():
""" Bring up the vm's with a `vagrant up`"""
subprocess.call(['vagrant', 'up', '--parallel'])
def vagrant_ssh_config(tempfile):
""" Get the current ssh config via `vagrant ssh-config` """
output = subprocess.check_output(['vagrant', 'ssh-config'])
with open(tempfile, 'w') as fh:
fh.write(output)
def run_ansible(playbook, inventory_file, extra_args=[]):
""" Run ansible playbook via subprocess.
We do not want to link ansible as it is GPL """
ssh_tempfile = tempfile.mkstemp()
vagrant_ssh_config(ssh_tempfile[1])
run_env = os.environ.copy()
ansible_env = {}
ansible_env['ANSIBLE_CONFIG'] = "ansible.cfg"
ansible_env['ANSIBLE_SSH_ARGS'] = os.getenv('ANSIBLE_SSH_ARGS', '')
ansible_env['ANSIBLE_SSH_ARGS'] += " -F %s" % (ssh_tempfile[1])
run_env.update(ansible_env)
cmd = [
"ansible-playbook",
"-i",
inventory_file,
playbook,
]
cmd += extra_args
print "Wardroom ansible environment:\n %s\n" % pprint.pformat(ansible_env)
print "Wardroom ansbile command:\n %s\n" % " ".join(cmd)
subprocess.call(cmd, env=run_env)
def get_vagrant_provider():
return os.environ.get('VAGRANT_DEFAULT_PROVIDER', 'virtualbox')
def get_loadbalancer_ip():
provider = get_vagrant_provider()
if provider == 'virtualbox':
return "10.10.10.3"
output = subprocess.check_output(['vagrant', 'ssh-config', 'loadbalancer'])
for line in output.split('\n'):
match = re.match(r'\s*HostName\s+(.*)', line)
if match:
return match.groups()[0]
raise Exception("Could not determine loadbalancer IP")
def merge_dict(source, destination):
for key, value in source.items():
if isinstance(value, dict):
node = destination.setdefault(key, {})
merge_dict(value, node)
else:
destination[key] = value
return destination
def generate_inventory(config, node_state={}):
""" from node_state generate a dynamic ansible inventory.
return temporary inventory file path """
inventory = {
"loadbalancer": {"hosts": {}},
"etcd": {"hosts": {}},
"primary_master": {"hosts": {}},
"masters": {"hosts": {}},
"nodes": {"hosts": {}},
}
for node, state in node_state.items():
if state == "running":
if node.startswith('master'):
inventory["masters"]["hosts"][node] = {}
inventory["etcd"]["hosts"][node] = {}
elif node.startswith("node"):
inventory["nodes"]["hosts"][node] = {}
elif node.startswith("loadbalancer"):
inventory["loadbalancer"]["hosts"][node] = {}
inventory['primary_master']["hosts"]["master1"] = {}
data = None
with open(config, 'rb') as fh:
render_args = {
'loadbalancer_ip': get_loadbalancer_ip(),
'vagrant_provider': get_vagrant_provider(),
}
config = jinja2.Template(fh.read()).render(**render_args)
data = yaml.load(config)
inventory = merge_dict(data, inventory)
temp_file = tempfile.mkstemp()[1]
with open(temp_file, 'w') as fh:
yaml.dump(inventory, fh)
print "Running with inventory:\n"
print yaml.dump(inventory)
print
return temp_file
def state_purpose():
print "############################################################"
print " provision.py is a tool to help test wardroom playbooks "
print " against Vagrant provisioned infrastructure. It is simply "
print " a wrapper around Vagrant and ansible. All of the Ansible "
print " playbooks may be run against any ssh-enabled hosts. "
print " provision.py in intended for refereence purposes. "
print "############################################################"
print
print
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--action', default='install',
choices=['install', "upgrade"])
parser.add_argument('-o', '--os', default='xenial',
choices=WARDROOM_BOXES.keys())
parser.add_argument('config')
args, extra_args = parser.parse_known_args()
os.environ["WARDROOM_BOX"] = WARDROOM_BOXES[args.os]
state_purpose()
node_state = vagrant_status()
start_vms = False
for node, state in node_state.items():
if state != 'running':
start_vms = True
break
if start_vms:
vagrant_up()
node_state = vagrant_status()
inventory_file = generate_inventory(args.config, node_state)
playbook = "%s.yml" % args.action
run_ansible(playbook, inventory_file, extra_args)
if __name__ == '__main__':
main()
```
#### File: wardroom/wardroom/aws.py
```python
import logging
import sys
import time
import boto3
import click
logger = logging.getLogger(name=__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stderr)
logger.addHandler(handler)
yaml_template ='''
{}:
'64': {}
'''.strip('\r\n')
def copy_to_region(image, src_region, dest_region):
session = boto3.session.Session(region_name=dest_region)
local_client = session.client('ec2')
logger.info("creating image in region {}".format(dest_region))
resp = local_client.copy_image(
Name=image.name,
SourceImageId=image.image_id,
SourceRegion=src_region,
)
local_ec2 = session.resource('ec2')
new_image = local_ec2.Image(resp['ImageId'])
return (new_image, dest_region)
def make_public_and_tag(image, region, desc):
while True:
image.load()
if image.state == 'available':
image.modify_attribute(
LaunchPermission={
'Add': [{'Group': 'all'}]
}
)
# Can only modify one attribute at a time
image.modify_attribute(Description={'Value': desc})
logger.info("region {} ami {} is available".format(region, image.id))
break
time.sleep(5)
def encode_desc(dict_):
return " ".join("{0}={1}".format(*item) for item in dict_.items())
@click.group()
def aws():
pass
@aws.command(name='copy-ami')
@click.option('-r', '--src-region', default='us-east-1', help='AWS Region')
@click.option('-q', '--quiet', is_flag=True)
@click.argument('src_ami')
def copy_ami(src_region, src_ami, quiet):
if quiet:
logger.setLevel(logging.WARN)
session = boto3.session.Session(region_name=src_region)
client = session.client('ec2')
dest_regions = [region['RegionName'] for region in client.describe_regions()['Regions']
if region['RegionName'] != src_region
]
dest_regions.sort()
logger.info("detected {} regions".format(len(dest_regions)))
image = session.resource('ec2').Image(src_ami)
description = encode_desc({i['Key']: i['Value'] for i in image.tags or []})
# copy to all regions
images = [copy_to_region(image, src_region, region) for region in dest_regions]
# Add the original
images.append((image, src_region))
# print out the YAML
for (image, region) in images:
print(yaml_template.format(region, image.id))
logger.info("waiting for all images to be available. In the mean time,"
"that YAML can be pasted into the quickstart template.")
# wait for all images to be available
for (image, region) in images:
make_public_and_tag(image, region, description)
``` |
{
"source": "jpweldon/Module_1_Challenge",
"score": 4
} |
#### File: jpweldon/Module_1_Challenge/loan_analyzer.py
```python
import csv # I want to import the csv library.
from pathlib import Path # I want the Path function from the pathlib library.
"""Part 1: Automate the Calculations.
Automate the calculations for the loan portfolio summaries.
First, let's start with some calculations on a list of prices for 5 loans.
1. Use the `len` function to calculate the total number of loans in the list.
2. Use the `sum` function to calculate the total of all loans in the list.
3. Using the sum of all loans and the total number of loans, calculate the average loan price.
4. Print all calculations with descriptive messages.
"""
loan_costs = [500, 600, 200, 1000, 450]
# How many loans are in the list?
# @TODO: Use the `len` function to calculate the total number of loans in the list.
# Print the number of loans from the list
def loan_count(loan_info):
number_of_loans = len(loan_info)
return number_of_loans
def loan_count_print(loan_info_1):
number_of_loans_1 = loan_count(loan_info_1)
print(f"There is a total of {number_of_loans_1} loans.")
loan_count_print(loan_costs)
# What is the total of all loans?
# @TODO: Use the `sum` function to calculate the total of all loans in the list.
# Print the total value of the loans
def loan_total(loan_data):
total_of_loans = sum(loan_data)
return total_of_loans
def loan_total_print(loan_data_1):
total_of_loans_1 = loan_total(loan_data_1)
print(f"The loans sum to a total of ${total_of_loans_1: .2f}.")
loan_total_print(loan_costs)
# What is the average loan amount from the list?
# @TODO: Using the sum of all loans and the total number of loans, calculate the average loan price.
# Print the average loan amount
def loan_average (loan_material):
average_of_loans = loan_total(loan_material)/loan_count(loan_material)
return average_of_loans
def loan_average_print(loan_material_1):
average_of_loans_1 = loan_average(loan_material_1)
print(f"The average loan price is ${average_of_loans_1: .2f}.")
loan_average_print(loan_costs)
"""Part 2: Analyze Loan Data.
Analyze the loan to determine the investment evaluation.
Using more detailed data on one of these loans, follow these steps to calculate a Present Value, or a "fair price" for what this loan would be worth.
1. Use get() on the dictionary of additional information to extract the **Future Value** and **Remaining Months** on the loan.
a. Save these values as variables called `future_value` and `remaining_months`.
b. Print each variable.
@NOTE:
**Future Value**: The amount of money the borrower has to pay back upon maturity of the loan (a.k.a. "Face Value")
**Remaining Months**: The remaining maturity (in months) before the loan needs to be fully repaid.
2. Use the formula for Present Value to calculate a "fair value" of the loan. Use a minimum required return of 20% as the discount rate.
3. Write a conditional statement (an if-else statement) to decide if the present value represents the loan's fair value.
a. If the present value of the loan is greater than or equal to the cost, then print a message that says the loan is worth at least the cost to buy it.
b. Else, the present value of the loan is less than the loan cost, then print a message that says that the loan is too expensive and not worth the price.
@NOTE:
If Present Value represents the loan's fair value (given the required minimum return of 20%), does it make sense to buy the loan at its current cost?
"""
# Given the following loan data, you will need to calculate the present value for the loan
loan = {
"loan_price": 500,
"remaining_months": 9,
"repayment_interval": "bullet",
"future_value": 1000,
}
# @TODO: Use get() on the dictionary of additional information to extract the Future Value and Remaining Months on the loan.
# Print each variable.
def future_value(loan_1):
fv = loan_1.get("future_value")
return fv
def future_value_print(loan_2):
fv_1 = future_value(loan_2)
print(f"The future value of the loan is ${fv_1: .2f}.")
future_value_print(loan)
def remaining_months(loan_3):
rm = loan_3.get("remaining_months")
return rm
def remaining_months_print(loan_4):
rm_1 = remaining_months(loan_4)
print(f"The months remaining on the loan is {rm_1} months.")
remaining_months_print(loan)
# @TODO: Use the formula for Present Value to calculate a "fair value" of the loan.
# Use a minimum required return of 20% as the discount rate.
# You'll want to use the **monthly** version of the present value formula.
# HINT: Present Value = Future Value / (1 + Discount_Rate/12) ** remaining_months
discount_rate = 0.20
def present_value(loan_5):
pv = future_value(loan_5) / ((1 + discount_rate/12) ** remaining_months(loan_5))
return pv
def present_value_print(loan_6):
pv_1 = present_value(loan_6)
print(f"The present value of the loan is ${pv_1: .2f} given a future value of ${future_value(loan_6): .2f}, a discount rate of {discount_rate * 100: .2f}%, and {remaining_months(loan_6)} months remaining.")
present_value_print(loan)
# If Present Value represents what the loan is really worth, does it make sense to buy the loan at its cost?
# @TODO: Write a conditional statement (an if-else statement) to decide if the present value represents the loan's fair value.
# If the present value of the loan is greater than or equal to the cost, then print a message that says the loan is worth at least the cost to buy it.
# Else, the present value of the loan is less than the loan cost, then print a message that says that the loan is too expensive and not worth the price.
def loan_cost(loan_7):
lc = loan_7.get("loan_price")
return lc
def loan_cost_print(loan_8):
lc_1 = loan_cost(loan_8)
print(f"The cost of the loan is ${lc_1: .2f}.")
loan_cost_print(loan)
def buy_nobuy_loan(loan_9):
if present_value(loan_9) >= loan_cost(loan_9):
print(f"The loan is worth at least the cost to buy it.")
#loan_cost_print(loan_9)
#present_value_print(loan_9)
else:
print(f"The loan is too expensive and not worth the price.")
#loan_cost_print(loan_9)
#present_value_print(loan_9)
buy_nobuy_loan(loan)
"""Part 3: Perform Financial Calculations.
Perform financial calculations using functions.
1. Define a new function that will be used to calculate present value.
a. This function should include parameters for `future_value`, `remaining_months`, and the `annual_discount_rate`
b. The function should return the `present_value` for the loan.
2. Use the function to calculate the present value of the new loan given below.
a. Use an `annual_discount_rate` of 0.2 for this new loan calculation.
"""
# Given the following loan data, you will need to calculate the present value for the loan
new_loan = {
"loan_price": 800,
"remaining_months": 12,
"repayment_interval": "bullet",
"future_value": 1000,
}
# I already created the present_value and present_value_print functions.
# I am creating the next function to satisfy the above specifications for the assignment.
# @TODO: Define a new function that will be used to calculate present value.
# This function should include parameters for `future_value`, `remaining_months`, and the `annual_discount_rate`
# The function should return the `present_value` for the loan.
def calculate_present_value(fv_2, rm_2, discount_rate_1):
pv_2 = fv_2 / ((1 + discount_rate_1/12) ** rm_2)
return pv_2 # I already created the present_value function. I am creating this function to satisfy the above specifications for the assignment.
# present_value(new_loan) # accomplishes the same
# print(f"${present_value(new_loan): .2f}") # prints the returned value with a dollar sign and two decimal places
# @TODO: Use the function to calculate the present value of the new loan given below.
# Use an `annual_discount_rate` of 0.2 for this new loan calculation.
annual_discount_rate = 0.20
present_value_1 = calculate_present_value(new_loan["future_value"], new_loan["remaining_months"], annual_discount_rate)
print(f"The present value of the loan is: ${present_value_1: .2f}") # I already created the present_value_print function. I am creating this function to satisfy the above specifications for the assignment.
# present_value_print(new_loan) # performs a similar print statement with additional details
"""Part 4: Conditionally filter lists of loans.
In this section, you will use a loop to iterate through a series of loans and select only the inexpensive loans.
1. Create a new, empty list called `inexpensive_loans`.
2. Use a for loop to select each loan from a list of loans.
a. Inside the for loop, write an if-statement to determine if the loan_price is less than 500
b. If the loan_price is less than 500 then append that loan to the `inexpensive_loans` list.
3. Print the list of inexpensive_loans.
"""
loans = [
{
"loan_price": 700,
"remaining_months": 9,
"repayment_interval": "monthly",
"future_value": 1000,
},
{
"loan_price": 500,
"remaining_months": 13,
"repayment_interval": "bullet",
"future_value": 1000,
},
{
"loan_price": 200,
"remaining_months": 16,
"repayment_interval": "bullet",
"future_value": 1000,
},
{
"loan_price": 900,
"remaining_months": 16,
"repayment_interval": "bullet",
"future_value": 1000,
},
]
# @TODO: Create an empty list called `inexpensive_loans`
inexpensive_loans = []
# @TODO: Loop through all the loans and append any that cost $500 or less to the `inexpensive_loans` list
for loan in loans:
loan_price = loan.get("loan_price")
if loan_price <= 500:
inexpensive_loans.append(loan)
# @TODO: Print the `inexpensive_loans` list
print(inexpensive_loans)
"""Part 5: Save the results.
Output this list of inexpensive loans to a csv file
1. Use `with open` to open a new CSV file.
a. Create a `csvwriter` using the `csv` library.
b. Use the new csvwriter to write the header variable as the first row.
c. Use a for loop to iterate through each loan in `inexpensive_loans`.
i. Use the csvwriter to write the `loan.values()` to a row in the CSV file.
Hint: Refer to the official documentation for the csv library.
https://docs.python.org/3/library/csv.html#writer-objects
"""
# Set the output header
header = ["loan_price", "remaining_months", "repayment_interval", "future_value"]
# Set the output file path
output_path = Path("inexpensive_loans.csv")
# @TODO: Use the csv library and `csv.writer` to write the header row
# and each row of `loan.values()` from the `inexpensive_loans` list.
with open(output_path, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(header) # I am writing the header first.
for loan in inexpensive_loans:
csvwriter.writerow(loan.values()) # I am writing the inexpensive loans rows.
``` |
{
"source": "jpweldon/Module_2_Challenge",
"score": 3
} |
#### File: qualifier/tests/test_qualifier.py
```python
from pathlib import Path
#Import fileio
from qualifier.utils import fileio
# Import Calculators
from qualifier.utils import calculators
# Import Filters
from qualifier.filters import credit_score
from qualifier.filters import debt_to_income
from qualifier.filters import loan_to_value
from qualifier.filters import max_loan_size
# Test the save_csv Function
def test_save_csv():
csvpath = Path('./data/output/qualifying_loans.csv')
loans_data = fileio.load_csv(Path('./data/daily_rate_sheet.csv'))
fileio.save_csv(csvpath, loans_data)
assert Path('./data/output/qualifying_loans.csv').exists()
# Test the calculate_monthly_debt_ratio Function
def test_calculate_monthly_debt_ratio():
assert calculators.calculate_monthly_debt_ratio(1500, 4000) == 0.375
# Test the calculate_loan_to_value_ratio Function
def test_calculate_loan_to_value_ratio():
assert calculators.calculate_loan_to_value_ratio(210000, 250000) == 0.84
# Test the filters
def test_filters():
bank_data = fileio.load_csv(Path('./data/daily_rate_sheet.csv'))
current_credit_score = 750
debt = 1500
income = 4000
loan = 210000
home_value = 250000
monthly_debt_ratio = 0.375
loan_to_value_ratio = 0.84
# Run qualification filters
bank_data_filtered = max_loan_size.filter_max_loan_size(loan, bank_data)
bank_data_filtered = credit_score.filter_credit_score(current_credit_score, bank_data_filtered)
bank_data_filtered = debt_to_income.filter_debt_to_income(monthly_debt_ratio, bank_data_filtered)
bank_data_filtered = loan_to_value.filter_loan_to_value(loan_to_value_ratio, bank_data_filtered)
assert len(bank_data_filtered) == 6
# Test the save_csv Function with the Filtered Bank Data
csvpath = Path('./data/output/qualifying_loans_filtered.csv')
fileio.save_csv(csvpath, bank_data_filtered)
assert Path('./data/output/qualifying_loans_filtered.csv').exists()
``` |
{
"source": "jpweldon/Module_2_Practice",
"score": 4
} |
#### File: 01-Creating_the_Loan_Qualifier_README_File/Completed/calculators.py
```python
def calculate_monthly_debt_ratio(monthly_debt_payment, monthly_income):
"""
Calculates the monthly debt ratio.
Converts the monthly debt payment and monthly income
parameters to int values and divides the monthly debt
payment by the monthly income to produce the monthly
debt ratio.
Parameters:
monthly_debt_payment (float): The monthly debt payment.
monthly_income (float): The monthly income.
Returns:
monthly_debt_ratio (int): The monthly debt ratio.
"""
monthly_debt_ratio = int(monthly_debt_payment) / int(monthly_income)
return monthly_debt_ratio
def calculate_loan_to_value_ratio(loan_amount, home_value):
"""
Calculates the loan to value ratio.
Converts the loan amount and home value parameters to
int values and divides the loan amount by the home value
to produce the loan to value ratio.
Parameters:
loan_amount (float): The loan amount.
home_value (float): The value of the home.
Returns:
loan_to_value_ratio (int): The loan to value ratio.
"""
loan_to_value_ratio = int(loan_amount) / int(home_value)
return loan_to_value_ratio
```
#### File: Calculator_Application/calculations/mul.py
```python
def mul(num1, num2):
return num1 * num2
```
#### File: Calculator_Application/calculations/sub.py
```python
def sub(num1, num2):
return num1 - num2
```
#### File: qualifier/utils/calculators.py
```python
def calculate_monthly_debt_ratio(monthly_debt_payment, monthly_income):
monthly_debt_ratio = int(monthly_debt_payment) / int(monthly_income)
return monthly_debt_ratio
# Calculates user's loan-to-value
def calculate_loan_to_value_ratio(loan_amount, home_value):
loan_to_value_ratio = int(loan_amount) / int(home_value)
return loan_to_value_ratio
``` |
{
"source": "jpweytjens/ET-Jekyll",
"score": 3
} |
#### File: _includes/code_snippets/read_parquet_fast.py
```python
from functools import partial
import pandas as pd
import pyarrow as pa
from tqdm.auto import tqdm
from tqdm.contrib.concurrent import process_map
def _read_parquet(filename, columns=None):
"""
Wrapper to pass to a ProcessPoolExecutor to read parquet files as fast as possible. The PyArrow engine (v4.0.0) is faster than the fastparquet engine (v0.7.0) as it can read columns in parallel. Explicitly enable multithreaded column reading with `use_threads == true`.
Parameters
----------
filename : str
Path of the parquet file to read.
columns : list, default=None
List of columns to read from the parquet file. If None, reads all columns.
Returns
-------
pandas Dataframe
"""
return pd.read_parquet(
filename, columns=columns, engine="pyarrow", use_threads=True
)
def read_parquet(
files,
columns=None,
parallel=True,
n_concurrent_files=8,
n_concurrent_columns=4,
show_progress=True,
ignore_index=True,
chunksize=None,
):
"""
Read a single parquet file or a list of parquet files and return a pandas DataFrame.
If `parallel==True`, it's on average 50% faster than `pd.read_parquet(..., engine="fastparquet")`. Limited benchmarks indicate that the default values for `n_concurrent_files` and `n_concurrent_columns` are the fastest combination on a 32 core CPU. `n_concurrent_files` * `n_concurrent_columns` <= the number of available cores.
Parameters
----------
files : list or str
String with path or list of strings with paths of the parqiaet file(s) to be read.
columns : list, default=None
List of columns to read from the parquet file(s). If None, reads all columns.
parallel : bool, default=True
If True, reads both files and columns in parallel. If False, read the files serially while still reading the columns in parallel.
n_concurrent_files : int, default=8
Number of files to read in parallel.
n_concurrent_columns : int, default=4
Number of columns to read in parallel.
show_progress : bool, default=True
If True, shows a tqdm progress bar with the number of files that have already been read.
ignore_index : bool, default=True
If True, do not use the index values along the concatenation axis. The resulting axis will be labeled 0, ..., n-1. This is useful if you are concatenating objects where the concatention axis does not have meaningful indexing information.
chunksize : int, default=None
Number of files to pass as a single task to a single process. Values greater than 1 can improve performance if each task is expected to take a similar amount of time to complete and `len(files) > n_concurrent_files`. If None, chunksize is set to `len(files) / n_concurrent_files` if `len(files) > n_concurrent_files` else it's set to 1.
Returns
------
pandas DataFrame
"""
# ensure files is a list when reading a single file
if isinstance(files, str):
files = [files]
# no need for more cpu's then files
if len(files) < n_concurrent_files:
n_concurrent_files = len(files)
# no need for more workers than columns
if columns:
if len(columns) < n_concurrent_columns:
n_concurrent_columns = len(columns)
# set number of threads used for reading the columns of each parquet files
pa.set_cpu_count(n_concurrent_columns)
# try to optimize the chunksize based on
# https://stackoverflow.com/questions/53751050/python-multiprocessing-understanding-logic-behind-chunksize
# this assumes each task takes roughly the same amount of time to complete
# i.e. each dataset is roughly the same size if there are only a few files
# to be read, i.e. ´len(files) < n_concurrent_files´, give each cpu a single file to read
# when there are more files than cpu's give chunks of multiple files to each cpu
# this is in an attempt to minimize the overhead of assigning files after every completed file read
if (chunksize is None) and (len(files) > n_concurrent_files):
chunksize, remainder = divmod(len(files), n_concurrent_files)
if remainder:
chunksize += 1
else:
chunksize = 1
if parallel is True:
_read_parquet_map = partial(_read_parquet, columns=columns)
dfs = process_map(
_read_parquet_map,
files,
max_workers=n_concurrent_files,
chunksize=chunksize,
disabled=not show_progress,
)
else:
dfs = [_read_parquet(file) for file in tqdm(files, disabled=not show_progress)]
# reduce the list of dataframes to a single dataframe
df = pd.concat(dfs, ignore_index=ignore_index)
return df
``` |
{
"source": "jpwhalley/literaryclock",
"score": 4
} |
#### File: jpwhalley/literaryclock/convert_books_wrapper.py
```python
def convert(type='books/*.mobi'):
"""Convert books in the current directory using Calibre command line function.
INPUT: The format of books to convert in the current directory.
FUNCTION: convert(type)
OUTPUT: All the books of that type converted to txt files.
Time taken: < 1 minute per mobi file (< 10 MB), longer for pdfs."""
from glob import glob
from os import remove
import subprocess
import time
start_time = time.time()
ebooks = glob(type)
for n,item in enumerate(ebooks):
temp = item.split('.')
if temp[-1] != 'txt':
submit_name = '/Applications/calibre.app/Contents/console.app/Contents/MacOS/./ebook-convert "'+ item + '" "' + temp[0] + '.txt"'
subprocess.call([submit_name], shell=True)
# remove(item) # Uncomment if you want to remove the original book format after conversion
print n, (time.time() - start_time)
``` |
{
"source": "jpwhalley/PCAWG-QC_Graphs",
"score": 3
} |
#### File: jpwhalley/PCAWG-QC_Graphs/PCAWG_QC_Star_Rating.py
```python
def star_rating(data='Supplementary_Table_1.tsv'):
"""Imports the QC measures in the form of a tsv (Supplementary Table 1 in the PCAWG-QC paper), calculates which pass for each QC measure and gives a star rating. Various graphs to show various quality measures are also plotted.
INPUT: TSV files saved from google sheets containing the data, metadata file linking the projects to tissure types. Numerical calculations are done using the numpy and scipy.stats packages. Graphs are plotted using matplotlib.pyplot package. Also, collections.Counter is needed for manipulation of the data.
FUNCTION: star_rating(data)
OUTPUT: A TSV file with the star rating and a series of graphs used to illustrate the different QC measures and the star rating.
Command line to run (from same folder as the supplementary tables):
python -c 'import PCAWG_QC_Star_Rating; PCAWG_QC_Star_Rating.star_rating()'
Time Taken: ~ 30 seconds"""
import matplotlib.pyplot as plt
import numpy as np
from collections import Counter
from scipy.stats import gaussian_kde
### First, caculate the thresholds for the Mean/Median Coverage ratio, which are the whiskers from the boxplots of the normal and tumour samples.
f = open(data, 'r')
line = f.next()
medmean_norm = []
medmean_tumo = []
norm_ids = []
for line in f:
temp = line.split('\t')
if (temp[9] != 'NA') and (temp[2] not in norm_ids):
norm_ids.append(temp[2])
medmean_norm.append(float(temp[9]))
if temp[11] != 'NA':
medmean_tumo.append(float(temp[11]))
f.close()
# Plot it
fig = plt.figure(1, figsize=(9, 6))
ax = fig.add_subplot(111)
bp = ax.boxplot([medmean_norm, medmean_tumo])
ax.set_xticklabels(['Normal', 'Tumour'])
ax.axhline(1, color='k', linestyle='dashed', linewidth=2)
fig_name = 'MeanMed_boxplot.pdf'
fig.savefig(fig_name, bbox_inches='tight')
whiskers = [item.get_ydata() for item in bp['whiskers']]
fig.clf()
for item in whiskers:
print item[1]
### Second, collect all the QC data and calculate the star rating for each normal-tumour sample pair
## Grab the data
f = open(data, 'r')
line = f.next()
# This lists are for the comparison of the evenness of coverage methods
Med_Mean_size_norm = []
Med_Mean_size_tumo = []
fwhm_norm = []
fwhm_tumo = []
# Empty lists to record the individual qc measures for each list
FWHM_size_normal = []
FWHM_size_tumour = []
MedMean_size_normal = []
MedMean_size_tumour = []
CallPow_size_normal = []
CallPow_size_tumour = []
DiffChrom_size_normal = []
DiffChrom_size_tumour = []
BaseBias_size_normal = []
BaseBias_size_tumour = []
Mean_size_normal = []
Mean_size_tumour = []
FWHM_norm = {}
FWHM_tumo = {}
CallPow = {}
DiffChrom_norm = {}
DiffChrom_tumo = {}
BaseBias_norm ={}
BaseBias_tumo = {}
Mean_norm = {}
Mean_tumo = {}
# Dictionary to store the star ratings
starred = {}
all_dam = []
# Lists to store the samples which we already have the norm qc measure - so we don't count it twice for when we have samples with multiple tumours
norm_ids_mean = []
norm_ids_fwhm = []
norm_ids_diff = []
norm_ids_base = []
norm_ids_all = []
# Also open a tsv to record the results
g = open('PCAWG-QC_Star_Rating.tsv', 'w')
temp = line.split('\t')
g.write(temp[0] + '\t' + temp[1] + '\t' + temp[2] + '\t' + temp[3] + '\t' + temp[4] + '\t' + temp[5] + '\tStar_rating\n')
for line in f:
temp = line.split('\t')
add = True
stars = 0
# Mean
if temp[7] != 'NA' and temp[8] != 'NA':
if float(temp[7]) > 25:
stars += 0.5
if temp[2] not in norm_ids_mean:
norm_ids_mean.append(temp[2])
if temp[0] in Mean_norm:
passed = Mean_norm[temp[0]]['pass']
passed += 1
Mean_norm[temp[0]]['pass'] = passed
else:
passed = {'pass':1, 'fail':0}
Mean_norm[temp[0]] = passed
else:
if temp[2] not in norm_ids_mean:
norm_ids_mean.append(temp[2])
if temp[0] in Mean_norm:
failed = Mean_norm[temp[0]]['fail']
failed += 1
Mean_norm[temp[0]]['fail'] = failed
else:
failed = {'pass':0, 'fail':1}
Mean_norm[temp[0]] = failed
if float(temp[8]) > 30:
if float(temp[7]) > 25:
stars += 0.5
if temp[0] in Mean_tumo:
passed = Mean_tumo[temp[0]]['pass']
passed += 1
Mean_tumo[temp[0]]['pass'] = passed
else:
passed = {'pass':1, 'fail':0}
Mean_tumo[temp[0]] = passed
else:
if temp[0] in Mean_tumo:
failed = Mean_tumo[temp[0]]['fail']
failed += 1
Mean_tumo[temp[0]]['fail'] = failed
else:
failed = {'pass':0, 'fail':1}
Mean_tumo[temp[0]] = failed
else:
add = False
# FWHM
if temp[10] != 'NA' and temp[12] != 'NA' and temp[9] != 'NA' and temp[11] != 'NA':
if (float(temp[10]) < 0.205) and (whiskers[0][1] <= float(temp[9]) <= whiskers[1][1]):
stars += 0.5
norm_pass = True
if temp[2] not in norm_ids_fwhm:
norm_ids_fwhm.append(temp[2])
if temp[0] in FWHM_norm:
passed = FWHM_norm[temp[0]]['pass']
passed += 1
FWHM_norm[temp[0]]['pass'] = passed
else:
passed = {'pass':1, 'fail':0}
FWHM_norm[temp[0]] = passed
elif float(temp[10]) >= 0.205:
norm_pass = False
if temp[2] not in norm_ids_fwhm:
norm_ids_fwhm.append(temp[2])
if temp[0] in FWHM_norm:
failed = FWHM_norm[temp[0]]['fail']
failed += 1
FWHM_norm[temp[0]]['fail'] = failed
else:
failed = {'pass':0, 'fail':1}
FWHM_norm[temp[0]] = failed
else:
norm_pass = False
if temp[2] not in norm_ids_fwhm:
norm_ids_fwhm.append(temp[2])
if temp[0] in FWHM_norm:
failed = FWHM_norm[temp[0]]['fail']
failed += 1
FWHM_norm[temp[0]]['fail'] = failed
else:
failed = {'pass':0, 'fail':1}
FWHM_norm[temp[0]] = failed
if (float(temp[12]) < 0.34) and (whiskers[2][1] <= float(temp[11]) <= whiskers[3][1]):
if norm_pass:
stars += 0.5
if temp[0] in FWHM_tumo:
passed = FWHM_tumo[temp[0]]['pass']
passed += 1
FWHM_tumo[temp[0]]['pass'] = passed
else:
passed = {'pass':1, 'fail':0}
FWHM_tumo[temp[0]] = passed
elif float(temp[12]) >= 0.34: # >= 0.54
if temp[0] in FWHM_tumo:
failed = FWHM_tumo[temp[0]]['fail']
failed += 1
FWHM_tumo[temp[0]]['fail'] = failed
else:
failed = {'pass':0, 'fail':1}
FWHM_tumo[temp[0]] = failed
else:
if temp[0] in FWHM_tumo:
failed = FWHM_tumo[temp[0]]['fail']
failed += 1
FWHM_tumo[temp[0]]['fail'] = failed
else:
failed = {'pass':0, 'fail':1}
FWHM_tumo[temp[0]] = failed
else:
add = False
# Call_Pow
if temp[13] != 'NA':
if int(temp[13]) >= 2.6*10**9:
stars += 1.0
if temp[0] in CallPow:
passed = CallPow[temp[0]]['pass']
passed += 1
CallPow[temp[0]]['pass'] = passed
else:
passed = {'pass':1, 'fail':0}
CallPow[temp[0]] = passed
else:
if temp[0] in CallPow:
failed = CallPow[temp[0]]['fail']
failed += 1
CallPow[temp[0]]['fail'] = failed
else:
failed = {'pass':0, 'fail':1}
CallPow[temp[0]] = failed
else:
add = False
# Diff_Chrom
if temp[14] != 'NA' and temp[15] != 'NA':
if float(temp[14]) < 3:
stars += 0.5
if temp[2] not in norm_ids_diff:
norm_ids_diff.append(temp[2])
if temp[0] in DiffChrom_norm:
passed = DiffChrom_norm[temp[0]]['pass']
passed += 1
DiffChrom_norm[temp[0]]['pass'] = passed
else:
passed = {'pass':1, 'fail':0}
DiffChrom_norm[temp[0]] = passed
else:
if temp[2] not in norm_ids_diff:
norm_ids_diff.append(temp[2])
if temp[0] in DiffChrom_norm:
failed = DiffChrom_norm[temp[0]]['fail']
failed += 1
DiffChrom_norm[temp[0]]['fail'] = failed
else:
failed = {'pass':0, 'fail':1}
DiffChrom_norm[temp[0]] = failed
if float(temp[15]) < 3:
if float(temp[14]) < 3:
stars += 0.5
if temp[0] in DiffChrom_tumo:
passed = DiffChrom_tumo[temp[0]]['pass']
passed += 1
DiffChrom_tumo[temp[0]]['pass'] = passed
else:
passed = {'pass':1, 'fail':0}
DiffChrom_tumo[temp[0]] = passed
else:
if temp[0] in DiffChrom_tumo:
failed = DiffChrom_tumo[temp[0]]['fail']
failed += 1
DiffChrom_tumo[temp[0]]['fail'] = failed
else:
failed = {'pass':0, 'fail':1}
DiffChrom_tumo[temp[0]] = failed
else:
add = False
# Base_Bias
if temp[16] != 'NA' and temp[17].rstrip() != 'NA':
if float(temp[16]) < 2:
stars += 0.5
if temp[2] not in norm_ids_base:
norm_ids_base.append(temp[2])
if temp[0] in BaseBias_norm:
passed = BaseBias_norm[temp[0]]['pass']
passed += 1
BaseBias_norm[temp[0]]['pass'] = passed
else:
passed = {'pass':1, 'fail':0}
BaseBias_norm[temp[0]] = passed
else:
if temp[2] not in norm_ids_base:
norm_ids_base.append(temp[2])
if temp[0] in BaseBias_norm:
failed = BaseBias_norm[temp[0]]['fail']
failed += 1
BaseBias_norm[temp[0]]['fail'] = failed
else:
failed = {'pass':0, 'fail':1}
BaseBias_norm[temp[0]] = failed
if float(temp[17].rstrip()) < 2:
if float(temp[16]) < 2:
stars += 0.5
if temp[0] in BaseBias_tumo:
passed = BaseBias_tumo[temp[0]]['pass']
passed += 1
BaseBias_tumo[temp[0]]['pass'] = passed
else:
passed = {'pass':1, 'fail':0}
BaseBias_tumo[temp[0]] = passed
else:
if temp[0] in BaseBias_tumo:
failed = BaseBias_tumo[temp[0]]['fail']
failed += 1
BaseBias_tumo[temp[0]]['fail'] = failed
else:
failed = {'pass':0, 'fail':1}
BaseBias_tumo[temp[0]] = failed
else:
add = False
if add:
if temp[0] in starred:
star_temp = starred[temp[0]]
star_temp.append(stars)
starred[temp[0]] = star_temp
else:
starred[temp[0]] = [stars]
all_dam.append(stars)
if temp[2] not in norm_ids_all:
norm_ids_all.append(temp[2])
Med_Mean_size_norm.append(float(temp[9]))
fwhm_norm.append(float(temp[10]))
# if float(temp[14]) < 20:
Mean_size_normal.append(float(temp[7]))
MedMean_size_normal.append(abs(1-float(temp[9])))
FWHM_size_normal.append(float(temp[10]))
CallPow_size_normal.append(float(temp[13]))
DiffChrom_size_normal.append(float(temp[14]))
BaseBias_size_normal.append(float(temp[16]))
Med_Mean_size_tumo.append(float(temp[11]))
fwhm_tumo.append(float(temp[12]))
Mean_size_tumour.append(float(temp[8]))
MedMean_size_tumour.append(abs(1-float(temp[11])))
FWHM_size_tumour.append(float(temp[12]))
CallPow_size_tumour.append(float(temp[13]))
DiffChrom_size_tumour.append(float(temp[15]))
BaseBias_size_tumour.append(float(temp[17].rstrip()))
# Write out the star rating to a tsv file
g.write(temp[0] + '\t' + temp[1] + '\t' + temp[2] + '\t' + temp[3] + '\t' + temp[4] + '\t' + temp[5] + '\t' + str(stars) + '\n')
else:
print 'We do not have complete QC data for this sample'
print temp[0] + '\t' + temp[1] + '\t' + temp[2] + '\t' + temp[3] + '\t' + temp[4] + '\t' + temp[5]
f.close()
g.close()
# Get the tissue type linked to each project
f = open('Supplementary_Table_2.tsv', 'r')
tissues = {}
line = f.next()
for line in f:
temp = line.split('\t')
if temp[1].strip() in tissues:
named = tissues[temp[1].strip()]
named.append(temp[0])
else:
named = [temp[0]]
tissues[temp[1].strip()] = named
f.close()
tissues_sorted = []
for key in tissues:
tissues_sorted.append(key)
tissues_sorted.sort()
### Third, denisty scatter plots for the nomral and tumour samples, to compare how the two evenness of coverage measures compare
#% Calculate the point density for the normal samples
x = np.array(Med_Mean_size_norm)
y = np.array(fwhm_norm)
xy = np.vstack([x,y])
z = gaussian_kde(xy)(xy)
# Sort the points by density, so that the densest points are plotted last
idx = z.argsort()
x, y, z = x[idx], y[idx], z[idx]
# Now the plot
fig, ax = plt.subplots()
ax.axvline(x=whiskers[0][1], color='k', linestyle='dashed', linewidth=2)
plt.text(.85,0.66, 'Fails for Med/Mean', color='red', rotation=90)
ax.axvline(x=whiskers[1][1], color='k', linestyle='dashed', linewidth=2)
plt.text(1.02,0.7,'Passes for Med/Mean', color='green',rotation=90)
plt.text(1.07,0.66, 'Fails for Med/Mean', color='red', rotation=90)
ax.axhline(y=0.205, color='k', linestyle='dashed', linewidth=2)
plt.text(0.71,0.17,'Passes for FWHM', color='green')
plt.text(0.71,0.215,'Fails for FWHM', color='red')
# ax.set_yscale('log')
# ax.set_xscale('log')
ax.set_xlim(.7,1.1)
ax.set_ylim(0,.8)
cax = ax.scatter(x, y, c=z, s=30, edgecolor='')
fig.colorbar(cax)
ax.set_xlabel('Median/Mean')
ax.set_ylabel('FWHM')
fig_name = 'Evenness_med-mean_fwhm_normal_scattterplot.pdf'
fig.savefig(fig_name)
plt.show()
plt.clf()
#% Calculate the point density for the tumour samples
x = np.array(Med_Mean_size_tumo)
y = np.array(fwhm_tumo)
xy = np.vstack([x,y])
z = gaussian_kde(xy)(xy)
# Sort the points by density, so that the densest points are plotted last
idx = z.argsort()
x, y, z = x[idx], y[idx], z[idx]
# Plot a new school scatter plot
fig, ax = plt.subplots()
ax.axvline(x=whiskers[2][1], color='k', linestyle='dashed', linewidth=2)
plt.text(whiskers[2][1]+.008,0.7,'Passes for Med/Mean', color='green',rotation=90)
plt.text(whiskers[2][1]-.018,0.66, 'Fails for Med/Mean', color='red', rotation=90)
ax.axvline(x=whiskers[3][1], color='k', linestyle='dashed', linewidth=2)
plt.text(whiskers[3][1]-.018,0.7,'Passes for Med/Mean', color='green',rotation=90)
plt.text(whiskers[3][1]+.008,0.66, 'Fails for Med/Mean', color='red', rotation=90)
ax.axhline(y=0.34, color='k', linestyle='dashed', linewidth=2)
plt.text(0.71,0.35,'Fails for FWHM', color='red')
plt.text(0.71,0.3,'Passes for FWHM', color='green')
ax.set_xlim(.7,1.1)
ax.set_ylim(0,.8)
cax = ax.scatter(x, y, c=z, s=30, edgecolor='')
fig.colorbar(cax)
ax.set_xlabel('Median/Mean')
ax.set_ylabel('FWHM')
fig_name = 'Evenness_med-mean_fwhm_tumour_scattterplot.pdf'
fig.savefig(fig_name)
plt.show()
plt.clf()
### Fourth, these are individual plots of the qc data, showing what proportion passed and failed for individual projects. These figures did not make it to the final paper, but are kept here for completeness sake.
qcs = ['Mean_norm', 'Mean_tumo', 'FWHM_norm', 'FWHM_tumo', 'CallPow', 'DiffChrom_norm', 'DiffChrom_tumo', 'BaseBias_norm', 'BaseBias_tumo']
for k,qc in enumerate([Mean_norm, Mean_tumo, FWHM_norm, FWHM_tumo, CallPow, DiffChrom_norm, DiffChrom_tumo, BaseBias_norm, BaseBias_tumo]):
faill = 0
passs = 0
for key in qc:
passs += qc[key]['pass']
faill += qc[key]['fail']
percent = (faill / float(passs + faill)) * 100
qc['Total'] = {'fail': faill, 'pass': passs}
print 'For ' + qcs[k] + ' we have ' + str(percent) + ' failing (total = ' + str(passs + faill) + ')'
labelled = []
tish = ['', 'Total', '']
organ = ['', 'Total', '']
passed = []
failed = []
total = []
for key in qc:
labelled.append(key)
labelled.sort()
for key in tissues_sorted:
c = True
for item in tissues[key]:
if item in labelled:
tish.append(item)
if c:
organ.append(key)
c = False
else:
organ.append(' ')
tish.append('')
organ.append('')
for key in tish:
if key == '':
passed.append(0)
failed.append(0)
total.append('')
else:
pass_temp = qc[key]['pass']
fail_temp = qc[key]['fail']
temp = float(pass_temp + fail_temp)
passed.append(pass_temp/temp * 100)
failed.append(fail_temp/temp * 100)
total.append(str(int(temp)))
N = len(tish)
ind = np.arange(N) # the x locations for the groups
width = 1 # the width of the bars: can also be len(x) sequence
p1 = plt.bar(ind, passed, width, color='blue')
p2 = plt.bar(ind, failed, width, color='red', bottom=passed)
plt.title(qcs[k])
locs, labels = plt.xticks(ind + width/2., (organ))
plt.setp(labels, rotation=90)
plt.tick_params(axis='both', which='major', labelsize=5)
plt.legend((p1[0], p2[0]), ('Pass', 'Fail'), bbox_to_anchor=(1.02, .55), fontsize='x-small')
plt.ylim(0,100)
plt.yticks(range(0, 101, 20), [str(x) + "%" for x in range(0, 101, 20)], fontsize=5)
for j,item in enumerate(ind+0.1):
plt.text(item,15, tish[j] +': '+ total[j], color='white', size=5, rotation=90, horizontalalignment='left')
fig_name = '' + qcs[k] + '_project_bias.pdf'
plt.savefig(fig_name)
plt.show()
plt.clf
### Fifth, plots of the star ratings for each project, as well as a bar summarising the star ratings for all the normal-tumour sample pairs in PCAWG.
# Get the star rating in a usuable form to plot
one = []
onehalf = []
two = []
twohalf = []
three = []
threehalf = []
four = []
fourhalf = []
five = []
total = []
see_all = []
equal_add = True
for key in tish:
if key != '':
if key in starred:
temp = Counter(starred[key])
if equal_add:
see_all = temp
equal_add = False
else:
see_all = see_all + temp
if 1.0 in temp:
one.append((temp[1.0]/float(len(starred[key])))*100)
else:
one.append(0)
if 1.5 in temp:
onehalf.append((temp[1.5]/float(len(starred[key])))*100)
else:
onehalf.append(0)
if 2.0 in temp:
two.append((temp[2.0]/float(len(starred[key])))*100)
else:
two.append(0)
if 2.5 in temp:
twohalf.append((temp[2.5]/float(len(starred[key])))*100)
else:
twohalf.append(0)
if 3.0 in temp:
three.append((temp[3.0]/float(len(starred[key])))*100)
else:
three.append(0)
if 3.5 in temp:
threehalf.append((temp[3.5]/float(len(starred[key])))*100)
else:
threehalf.append(0)
if 4.0 in temp:
four.append((temp[4.0]/float(len(starred[key])))*100)
else:
four.append(0)
if 4.5 in temp:
fourhalf.append((temp[4.5]/float(len(starred[key])))*100)
else:
fourhalf.append(0)
if 5.0 in temp:
five.append((temp[5.0]/float(len(starred[key])))*100)
else:
five.append(0)
total.append(str(len(starred[key])))
else:
one.append(0)
onehalf.append(0)
two.append(0)
twohalf.append(0)
three.append(0)
threehalf.append(0)
four.append(0)
fourhalf.append(0)
five.append(0)
total.append('')
else:
one.append(0)
onehalf.append(0)
two.append(0)
twohalf.append(0)
three.append(0)
threehalf.append(0)
four.append(0)
fourhalf.append(0)
five.append(0)
total.append('')
vote_all = 0
for item in see_all:
vote_all += see_all[item]
one[1] = (see_all[1.0]/float(vote_all)) * 100
onehalf[1] = (see_all[1.5]/float(vote_all)) * 100
two[1] = (see_all[2.0]/float(vote_all)) * 100
twohalf[1] = (see_all[2.5]/float(vote_all)) * 100
three[1] = (see_all[3.0]/float(vote_all)) * 100
threehalf[1] = (see_all[3.5]/float(vote_all)) * 100
four[1] = (see_all[4.0]/float(vote_all)) * 100
fourhalf[1] = (see_all[4.5]/float(vote_all)) * 100
five[1] = (see_all[5.0]/float(vote_all)) * 100
total[1] = str(vote_all)
N = len(tish)
ind = np.arange(N) # the x locations for the groups
width = 1 # the width of the bars: can also be len(x) sequence
pq = plt.bar(ind, one, width, color ='gray')
pp = plt.bar(ind, onehalf, width, color ='red', bottom=one)
p0 = plt.bar(ind, two, width, color= 'blue', bottom =[one[h] + onehalf[h] for h in range(len(threehalf))])
p1 = plt.bar(ind, twohalf, width, color='brown', bottom=[one[h] + onehalf[h] + two[h] for h in range(len(threehalf))])
p2 = plt.bar(ind, three, width, color='purple', bottom=[one[h] + onehalf[h] + two[h] + twohalf[h] for h in range(len(threehalf))])
p3 = plt.bar(ind, threehalf, width, color='hotpink', bottom=[one[h] + onehalf[h] + two[h] + twohalf[h] + three[h] for h in range(len(threehalf))])
p4 = plt.bar(ind, four, width, color='orange', bottom=[one[h] + onehalf[h] + two[h] + twohalf[h] + three[h]+ threehalf[h] for h in range(len(threehalf))])
p5 = plt.bar(ind, fourhalf, width, color='gold', bottom=[one[h] + onehalf[h] + two[h] + twohalf[h] + three[h] + threehalf[h] + four[h] for h in range(len(threehalf))])
p6 = plt.bar(ind, five, width, color='green', bottom=[one[h] + onehalf[h] + two[h] + twohalf[h] + three[h] + threehalf[h] + four[h] + fourhalf[h] for h in range(len(threehalf))])
locs, labels = plt.xticks(ind + width/2., (organ))
plt.setp(labels, rotation=90)
plt.tick_params(axis='both', which='major', labelsize=8)
plt.legend((p6[0], p5[0], p4[0], p3[0], p2[0], p1[0], p0[0], pp[0], pq[0]), ('5', '4.5', '4', '3.5', '3', '2.5', '2', '1.5', '1'), bbox_to_anchor=(1, .7), fontsize='x-small')
plt.ylim(0,100)
plt.yticks(range(0, 101, 20), [str(x) + "%" for x in range(0, 101, 20)], fontsize=8)
for j,item in enumerate(ind+0.1):
plt.text(item,95, tish[j] +': '+ total[j], color='white', size=5, rotation=90, horizontalalignment='left')
plt.tight_layout()
fig_name = 'starred_project_bias.pdf'
plt.savefig(fig_name)
plt.show()
plt.clf
#% Now a star plot of stars with all bars
one = []
onehalf = []
two = []
twohalf = []
three = []
threehalf = []
four = []
fourhalf = []
five = []
total =[]
temp = Counter(all_dam)
if 1.0 in temp:
one.append((temp[1.0]/float(len(all_dam)))*100)
else:
one.append(0)
if 1.5 in temp:
onehalf.append((temp[1.5]/float(len(all_dam)))*100)
else:
onehalf.append(0)
if 2.0 in temp:
two.append((temp[2.0]/float(len(all_dam)))*100)
else:
two.append(0)
if 2.5 in temp:
twohalf.append((temp[2.5]/float(len(all_dam)))*100)
else:
twohalf.append(0)
if 3.0 in temp:
three.append((temp[3.0]/float(len(all_dam)))*100)
else:
three.append(0)
if 3.5 in temp:
threehalf.append((temp[3.5]/float(len(all_dam)))*100)
else:
threehalf.append(0)
if 4.0 in temp:
four.append((temp[4.0]/float(len(all_dam)))*100)
else:
four.append(0)
if 4.5 in temp:
fourhalf.append((temp[4.5]/float(len(all_dam)))*100)
else:
fourhalf.append(0)
if 5.0 in temp:
five.append((temp[5.0]/float(len(all_dam)))*100)
else:
five.append(0)
total.append(len(all_dam))
N = 9
ind = np.arange(N) # the x locations for the groups
width = 1 # the width of the bars: can also be len(x) sequence
fig, ax = plt.subplots()
pq = ax.bar(ind[0], one, width, color='gray')
pp = ax.bar(ind[1], onehalf, width, color='red')
p0 = ax.bar(ind[2], two, width, color='blue')
p1 = ax.bar(ind[3], twohalf, width, color='brown')
p2 = ax.bar(ind[4], three, width, color='purple')
p3 = ax.bar(ind[5], threehalf, width, color='hotpink')
p4 = ax.bar(ind[6], four, width, color='orange')
p5 = ax.bar(ind[7], fourhalf, width, color='gold')
p6 = ax.bar(ind[8], five, width, color='green')
ax.set_ylabel('Percentage')
ax.set_xlabel('Star Rating')
locs, labels = plt.xticks(ind + width/2., (['1', '1.5', '2', '2.5', '3', '3.5', '4', '4.5', '5']))
plt.setp(labels)
plt.tick_params(axis='both', which='major', labelsize=10)
plt.yticks(range(0, 81, 20), [str(x) for x in range(0, 91, 20)], fontsize=10)
plt.ylim(0,80)
for y in range(10, 91, 10):
plt.plot(range(0, 10), [y] * len(range(0, 10)), "--", lw=0.5, color="black", alpha=0.2)
# Remove the tick marks; they are unnecessary with the tick lines we just plotted.
plt.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
total = [one[0], onehalf[0], two[0], twohalf[0], three[0], threehalf[0], four[0], fourhalf[0], five[0]]
for j,item in enumerate(ind+0.1):
if total[j] < 1:
rounded = str(round(total[j],2)) + '%'
else:
rounded = str(float('%.3g' % total[j])) + '%'
plt.text(item,total[j]+0.8, rounded, color='black', size=10) #absolute[j]
fig_name = 'all_stars.pdf'
plt.savefig(fig_name)
plt.show()
plt.clf
### Finally histograms to show the distributions of each QC measure for normal and tumour samples.
miscellanea = {'Mean_size_normal': [['green', 25], ['Mean coverage for normal', 'mean coverage', 'Number of samples'], [0,140], [0,300]],
'Mean_size_tumour': [['lightgreen', 30], ['Mean coverage for tumour', 'mean coverage', 'Number of samples'], [0,140], [0,300]],
'Med_Mean_size_norm': [['indigo', whiskers[0][1],whiskers[1][1]], ['Ratio of the median coverage over the mean coverage for normal', 'Ratio', 'Number of samples'], [0.5,1.15], [0,700]],
'Med_Mean_size_tumo': [['violet', whiskers[2][1],whiskers[3][1]], ['Ratio of the median coverage over the mean coverage for tumour', 'Ratio', 'Number of samples'], [0.5,1.15], [0,700]],
'FWHM_size_normal': [['brown', 0.205], ['FWHM for normal', 'FWHM', 'Number of samples'], [0,0.8], [0,500]],
'FWHM_size_tumour': [['khaki', 0.34], ['FWHM for tumour', 'FWHM', 'Number of samples'], [0,0.8], [0,500]],
'CallPow_size_normal': [['grey', 2.6*10**9], ['Somatic mutation calling power', 'Number of bases', 'Number of samples'], [1.5*10**9,2.9*10**9], [0,1200]],
'DiffChrom_size_normal': [['blue', 3], ['Paired reads mapping to different chromosomes for normal', 'Percentage', 'Number of samples'], [0,18], [0,600]],
'DiffChrom_size_tumour': [['aqua', 3], ['Paired reads mapping to different chromosomes for tumour', 'Percentage', 'Number of samples'], [0,18], [0,600]],
'BaseBias_size_normal': [['red', 2], ['Ratio of difference in edits between paired reads for normal', 'Ratio', 'Number of samples'], [1,5.5], [0,300]],
'BaseBias_size_tumour': [['orange', 2], ['Ratio of difference in edits between paired reads for tumour', 'Ratio', 'Number of samples'], [1,5.5], [0,300]]}
# Histograms
qcs = ['Mean_size_normal', 'Mean_size_tumour', 'Med_Mean_size_norm', 'Med_Mean_size_tumo', 'FWHM_size_normal', 'FWHM_size_tumour', 'CallPow_size_normal', 'DiffChrom_size_normal', 'DiffChrom_size_tumour', 'BaseBias_size_normal', 'BaseBias_size_tumour']
for k,qc in enumerate([Mean_size_normal, Mean_size_tumour, Med_Mean_size_norm, Med_Mean_size_tumo, FWHM_size_normal, FWHM_size_tumour, CallPow_size_normal, DiffChrom_size_normal, DiffChrom_size_tumour, BaseBias_size_normal, BaseBias_size_tumour]):
to_del = []
if qcs[k] == 'DiffChrom_size_normal':
for j,item in enumerate(qc):
if item > 20:
to_del.append(j)
to_del.reverse()
for index in to_del:
del qc[index]
elif qcs[k] == 'FWHM_size_tumour':
for j,item in enumerate(qc):
if item > 1:
to_del.append(j)
to_del.reverse()
for index in to_del:
del qc[index]
if len(miscellanea[qcs[k]][0]) == 2:
fig = plt.figure()
ax = fig.add_subplot(111)
result = ax.hist(qc, bins=100, color=miscellanea[qcs[k]][0][0])
ax.axvline(miscellanea[qcs[k]][0][1], color='k', linestyle='dashed', linewidth=2)
# ax.set_title(miscellanea[qcs[k]][1][0])
ax.set_xlabel(miscellanea[qcs[k]][1][1])
ax.set_ylabel(miscellanea[qcs[k]][1][2])
ax.set_xlim(miscellanea[qcs[k]][2][0] ,miscellanea[qcs[k]][2][1])
ax.set_ylim(miscellanea[qcs[k]][3][0] ,miscellanea[qcs[k]][3][1])
fig.savefig(qcs[k] + '_histogram.pdf')
elif len(miscellanea[qcs[k]][0]) == 3:
fig = plt.figure()
ax = fig.add_subplot(111)
result = ax.hist(qc, bins=100, color=miscellanea[qcs[k]][0][0])
ax.axvline(miscellanea[qcs[k]][0][1], color='k', linestyle='dashed', linewidth=2)
ax.axvline(miscellanea[qcs[k]][0][2], color='k', linestyle='dashed', linewidth=2)
# ax.set_title(miscellanea[qcs[k]][1][0])
ax.set_xlabel(miscellanea[qcs[k]][1][1])
ax.set_ylabel(miscellanea[qcs[k]][1][2])
ax.set_xlim(miscellanea[qcs[k]][2][0] ,miscellanea[qcs[k]][2][1])
ax.set_ylim(miscellanea[qcs[k]][3][0] ,miscellanea[qcs[k]][3][1])
fig.savefig(qcs[k] + '_histogram.pdf')
``` |
{
"source": "jpwhite3/botorum",
"score": 2
} |
#### File: servicecatalog/models/__init__.py
```python
import abc
import six
import boto3
from botorum.common import camel_to_snake
@six.add_metaclass(abc.ABCMeta)
class BaseModel():
session = boto3.Session()
def __init__(self, **object_attrs):
self._set_attrs(**object_attrs)
def __getattr__(self, name):
attr_name = camel_to_snake(name)
try:
return self.__dict__[attr_name]
except KeyError:
raise AttributeError("Attribute not found")
def __eq__(self, other):
return getattr(self, self.Meta.identity_attribute_name) == getattr(other, self.Meta.identity_attribute_name)
def __ne__(self, other):
return getattr(self, self.Meta.identity_attribute_name) != getattr(other, self.Meta.identity_attribute_name)
def __str__(self):
identity = getattr(self, self.Meta.identity_attribute_name)
return str(identity)
def __unicode__(self):
return self.__str__()
def _set_attrs(self, **attrs):
for attr, value in attrs.items():
setattr(self, camel_to_snake(attr), value)
@property
def client(self):
return self.__class__.get_client()
@classmethod
def get_client(cls):
return cls.session.client(cls.Meta.boto3_client_name)
@classmethod
@abc.abstractmethod
def list(cls, max_items=1000, page_size=20):
pass
@classmethod
@abc.abstractmethod
def create(cls, **kwargs):
pass
@classmethod
@abc.abstractmethod
def get(cls, object_id):
pass
@classmethod
def search(cls, attribute, search_terms):
assert search_terms, "At least one search term must exist"
search_results = []
for obj in cls.list():
attr_value = getattr(obj, attribute, "")
for search_term in search_terms:
if attr_value and search_term.lower() in attr_value.lower():
search_results.append(obj)
return search_results
@abc.abstractmethod
def update(self, **kwargs):
pass
@abc.abstractmethod
def delete(self):
pass
class Meta:
boto3_client_name = 'servicecatalog'
language = 'en'
identity_attribute_name = 'id'
```
#### File: servicecatalog/models/product.py
```python
from botorum.common import merge_dicts
from botorum.servicecatalog.models import BaseModel
class Product(BaseModel):
_tag_options = []
tags = {}
@classmethod
def list(cls, max_items=1000, page_size=20):
paginator = cls.get_client().get_paginator('search_products_as_admin')
response_iterator = paginator.paginate(
SortBy='CreationDate',
SortOrder='DESCENDING',
ProductSource='ACCOUNT',
PaginationConfig={'MaxItems': max_items, 'PageSize': page_size}
)
for response in response_iterator:
for object_details in response.get('ProductViewDetails', []):
flattened_details = cls._flatten_object_details(object_details)
yield Product(**flattened_details)
@classmethod
def create(cls, **kwargs):
"""
Create a new product and initial provisioning artifact (version).
Arguments mirror the boto3 "create_product" API call, link below.
Returns a Product object
https://boto3.readthedocs.io/en/latest/reference/services/servicecatalog.html#ServiceCatalog.Client.create_product
"""
response = cls.get_client().create_product(**kwargs)
object_details = cls._flatten_object_details(response)
return cls(**object_details)
@classmethod
def get(cls, object_id):
response = cls.get_client().describe_product_as_admin(Id=object_id)
object_details = cls._flatten_object_details(response)
return cls(**object_details)
@staticmethod
def _flatten_object_details(response):
product_view_detail = response.get('ProductViewDetail', {})
if 'ProductViewSummary' in product_view_detail:
product_view_summary = product_view_detail.pop('ProductViewSummary', {})
elif 'ProductViewSummary' in response:
product_view_summary = response.get('ProductViewSummary', {})
object_details = merge_dicts(product_view_summary, product_view_detail)
if 'Tags' in response:
object_details['tags'] = {x['Key']: x['Value'] for x in response.get('Tags', [])}
return object_details
def update(self, **kwargs):
response = self.client.update_product(
AcceptLanguage=self.Meta.language,
Id=self.product_id,
Name=kwargs.get('Name', self.name),
Owner=kwargs.get('Owner', self.owner),
Description=kwargs.get('Description', self.short_description),
Distributor=kwargs.get('Distributor', self.distributor),
SupportDescription=kwargs.get('SupportDescription', self.support_description),
SupportEmail=kwargs.get('SupportEmail', self.support_email),
SupportUrl=kwargs.get('SupportUrl', self.support_url),
AddTags=kwargs.get('AddTags', []),
RemoveTags=kwargs.get('RemoveTags', [])
)
object_details = self._flatten_object_details(response)
self._set_attrs(**object_details)
def delete(self):
return self.client.delete_product(
Id=self.product_id
)
class Meta(BaseModel.Meta):
identity_attribute_name = 'product_id'
``` |
{
"source": "jpwhite3/phoney",
"score": 3
} |
#### File: phoney/tests/test_api.py
```python
import unittest
from fastapi.testclient import TestClient
from phoney.app.main import app
from phoney.app.apis.provider import get_provider_list, get_generator_list, get_provider
client = TestClient(app)
class TestApi(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
def setUp(self):
pass
def test_endpoint_providers(self):
response = client.get("/providers")
self.assertEqual(response.status_code, 200)
expected_provider_set = set([x for x in get_provider_list()])
actual_provider_set = set(response.json().keys())
self.assertEqual(expected_provider_set, actual_provider_set)
def test_endpoint_provider(self):
for provider_name in get_provider_list():
response = client.get("/provider/%s" % provider_name)
self.assertEqual(response.status_code, 200)
provider = get_provider(provider_name)
generators = get_generator_list(provider)
excepted = {"provider": provider_name, "generators": generators}
actual = response.json()
self.assertEqual(excepted, actual)
``` |
{
"source": "jpwhite3/pygrep",
"score": 3
} |
#### File: pygrep/pygrep/__init__.py
```python
import re
def regex_search(expression, target_string, ignorecase=False):
raw_expression = re.escape(expression)
if ignorecase:
return re.search(raw_expression, target_string, re.I)
return re.search(raw_expression, target_string)
def grep(expression, filepath, ignorecase=False, invert=False):
results = []
with open(filepath) as file:
for line_no, line in enumerate(file):
matches = regex_search(expression, line, ignorecase)
# Invert matches if need be and print
current_line_info = (filepath, line_no, line)
if matches and not invert:
results.append(current_line_info)
elif invert and not matches:
results.append(current_line_info)
return results
``` |
{
"source": "jpwhite3/python-analytics-demo",
"score": 3
} |
#### File: jpwhite3/python-analytics-demo/micro_service.py
```python
from flask import Flask, request, jsonify
from flask_restful import Resource, Api
import numpy as np
import pandas as pd
import statsmodels.formula.api as sm
app = Flask(__name__)
api = Api(app)
# Setup our data
tips = pd.read_csv('input/tips.csv')
tips['tip_percent'] = (tips['tip'] / tips['total_bill'] * 100)
tips['tip_above_avg'] = np.where(tips['tip_percent'] >= tips['tip_percent'].mean(), 1, 0)
tips.replace({'Yes': 1, 'No': 0}, inplace=True)
# Setup our Prediction models
def predict_tip(df):
formula = 'tip ~ total_bill + party_size + C(ordered_alc_bev) + C(gender) + C(day) + C(time)'
model = sm.ols(formula, data=tips) # Describe model
results = model.fit() # Fit model
return results.predict(df).tolist()
def predict_if_tip_above_avg(df):
formula = 'tip_above_avg ~ total_bill + party_size + C(ordered_alc_bev) + C(gender) + C(day) + C(time)'
model = sm.ols(formula, data=tips) # Describe model
results = model.fit() # Fit model
return results.predict(df).tolist()
class PredictTip(Resource):
def post(self):
request_data = request.get_json()
try:
columns = ['total_bill', 'gender', 'ordered_alc_bev', 'day', 'time', 'party_size']
df = pd.DataFrame(request_data, columns=columns)
except:
return {'error': 'You posted bad data!'}
# Aggregation & conversion from NumPy types to native types
total_bill = df[['total_bill']].sum().item()
predictions = predict_tip(df)
predicted_tip = sum(predictions)
predicted_tip_percentage = round(predicted_tip/total_bill, 2)
predicted_tip_above_average = "Yes" if predicted_tip_percentage >= tips['tip_percent'].mean() else "No"
return {
'total_bill': round(total_bill, 2),
'predicted_tip': round(predicted_tip, 2),
'tip_percentage': predicted_tip_percentage,
'tip_above_average': predicted_tip_above_average
}
api.add_resource(PredictTip, '/')
if __name__ == '__main__':
app.debug = True
app.run()
``` |
{
"source": "jpwhite3/trmpt",
"score": 2
} |
#### File: jpwhite3/trmpt/app.py
```python
import os
from chalice import Chalice, Response
from chalice import BadRequestError, ChaliceViewError
from faker import Faker
from chalicelib.generator_list import blacklisted_generators, available_generators
fake = Faker()
app = Chalice(app_name='trmpt')
dir_path = os.path.dirname(os.path.realpath(__file__))
@app.route('/', methods=['GET'], cors=True)
def index():
with open('%s/chalicelib/index.html' % dir_path, 'r') as myfile:
html=myfile.read().replace('\n', '')
return Response(
body=html,
status_code=200,
headers={'Content-Type': 'text/html'}
)
@app.route('/schema', methods=['GET'], cors=True)
def get_schema():
return {
'available_generators': available_generators,
'example_request': '/generator/address?count=5'
}
@app.route('/generator/{data_type}', methods=['GET'], cors=True)
def get_data(data_type):
#return app.current_request.to_dict()
try:
data_type = data_type if not data_type in blacklisted_generators else ''
data_generator = getattr(fake, data_type)
count = 1
if app.current_request.query_params:
count = app.current_request.query_params.get('count', 1)
count = int(count) if str(count).isdigit() else 1
count = count if count <= 100 else 100
results = []
for i in range(0, count):
data = data_generator()
results.append(data)
return {
'record_count': len(results),
'data': results
}
except AttributeError as e:
raise BadRequestError("Invalid generator [%s]" % data_type)
except Exception as e:
raise ChaliceViewError(e.message)
``` |
{
"source": "jpwhitemn/edgex-docker-hub-documentation",
"score": 2
} |
#### File: jpwhitemn/edgex-docker-hub-documentation/generate-overviews.py
```python
import sys
import requests
MARKER = "$$"
LIST_IMAGES = 'https://hub.docker.com/v2/repositories/%(org)s/'
def get_file_content(filename):
content = ""
# if the content file can be opened, return the content in the file; otherwise specify missing content in markdown
try:
f = open(filename, "r")
content = f.read()
f.close()
except FileNotFoundError:
print("\tNo content file %s" % filename)
content = "**no content file**\n"
except:
print("\tTrouble opening file %s" % filename)
content = "**missing content**\n"
return content
def replace_line_with_content(input_line):
# get the input line minus the marker to get the content file
content_file_name = input_line[len(MARKER):-1]
# return the content from the content file
return get_file_content(content_file_name)
def add_content(input_line, output):
# if the line of content is markered - then get the replacement content specified by the file
if input_line.startswith(MARKER):
input_line = replace_line_with_content(input_line)
# return the line or the replacement content
output.write(input_line)
def create_overview(image_name):
try:
# open the template file for the image
template = open("./image-overview-templates/" + image_name + ".md", "r")
except FileNotFoundError:
print("No template file %s" % image_name)
return
except:
print("Trouble opening template file %s" % image_name)
return
try:
# open the output overview file for the image
output = open("./generated-overviews/" + image_name + ".md", "w")
except:
print("Cannot open overview file for: %s" % image_name)
return
# for each line in the template, write out the appropriate line of content in the output file
for template_line in template:
add_content(template_line, output)
output.close()
template.close()
print("Overview created for %s" % image_name)
# ------------------
org = "edgexfoundry"
if len(sys.argv) > 1:
org = sys.argv[1]
next_page = LIST_IMAGES % {"org": org}
count = 0
while next_page is not None:
resp = requests.get(next_page)
count += 1
next_page = None
if resp.status_code == 200:
data = resp.json()
# Read image data
for img in data['results']:
# request an overview markdown file for the image
create_overview(img['name'])
if data['next'] is not None:
next_page = data['next']
``` |
{
"source": "jpwhiting/svox-pico",
"score": 3
} |
#### File: lingware/tools/picoloadphones.py
```python
import argparse
import os
import symboltable
# valid property names
propertyNames = {
'mapval': 0,
'vowel': 0,
'diphth': 0,
'glott': 0,
'nonsyllvowel': 0,
'syllcons': 0
}
# valid unique property names (may occur once only)
uniquePropertyNames = {
'primstress': 0,
'secstress': 0,
'syllbound': 0,
'wordbound': 0,
'pause': 0
}
# init
args = argparse.Namespace()
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('infile', type=argparse.FileType('r'),
help='source file name of phones text data')
parser.add_argument('outfile', type=argparse.FileType('wb'),
help='destination file name of phones binary data')
parser.parse_args(namespace=args)
if not args.infile:
print("*** error: could not open input file: " + args.infile)
exit(1)
if not args.outfile:
print("*** error: could not open output file: " + args.outfile)
exit(1)
# tables
# table with symbol name keys (not really used currently)
symbols = {}
# table with symbol name number keys (specified with property mapval)
symbolNumbers = {}
# array of symbol name numer keys used (to check for unique mapvals)
symbolUsed = {}
table = symboltable.SymbolTable()
symbols = table.parseFile(args.infile)
args.infile.close()
for symbol in symbols:
properties = symbols[symbol]
mappedValue = properties.get('mapval')
# Parse otherProperties setting flags as appropriate
for property in properties.keys():
value = properties[property]
if not property == 'mapval' and not value == 1:
print("*** error in property list, optional properties"
" only accept \"1\": " + property)
continue
# Make sure this value isn't used yet
if mappedValue in symbolUsed:
print("*** error: mapval values must be unique, " +
str(mappedValue))
else:
symbolUsed[mappedValue] = True
symbolNumbers[int(mappedValue)] = properties
# check symbolNumbers
def checkSymbolTable(table):
for i in propertyNames:
propertyNames[i] = 0
for i in uniquePropertyNames:
uniquePropertyNames[i] = 0
# Check each symbol, which contains a dictionary of properties
for element in table.values():
# Check this symbol's properties
for key, value in element.items():
if key not in propertyNames and key not in uniquePropertyNames:
print("*** error: invalid property name: " + key)
exit(1)
if key in propertyNames:
propertyNames[key] = propertyNames[key] + 1
elif key in uniquePropertyNames:
uniquePropertyNames[key] = uniquePropertyNames[key] + 1
for key, value in uniquePropertyNames.items():
if value > 1:
print("*** error: property " + key + " must be unique")
exit(1)
checkSymbolTable(symbolNumbers)
# get IDs of unique specids
specialIDs = {}
# Initialize to 0 so 0s get written to .pkb file
for i in range(1, 9):
specialIDs[i] = 0
uniqueKeys = {'primstress': 1,
'secstress': 2,
'syllbound': 3,
'pause': 4,
'wordbound': 5
}
# Then set each specialIDs to which mapval it is assigned to
for key, element in symbolNumbers.items():
for test, value in uniqueKeys.items():
if test in element:
specialIDs[value] = int(element['mapval'])
# write out Phones pkb
propertyValues = {'vowel': 1,
'diphth': 2,
'glott': 4,
'nonsyllvowel': 8,
'syllcons': 16
}
def encodeProperties(dict):
properties = 0
for test, value in propertyValues.items():
if test in dict:
properties |= value
return properties
# First write the 8 special ids
for i in range(1, 9):
if specialIDs[i] == 0:
args.outfile.write(b'\x00')
else:
args.outfile.write(bytes([specialIDs[i]]))
# Then write the flags for each symbol
for i in range(0, 256):
value = 0
if i in symbolNumbers:
value = encodeProperties(symbolNumbers[i])
args.outfile.write(bytes([value]))
args.outfile.close()
```
#### File: lingware/tools/picoloadpos.py
```python
import argparse
import os
import struct
# symboltable used to parse utf input files
import symboltable
# valid property names
propertyNames = {
'mapval': 0,
'iscombined': 0,
'values': 0
}
# init
args = argparse.Namespace()
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('infile', type=argparse.FileType('r'),
help='source file name of phones text data')
parser.add_argument('outfile', type=argparse.FileType('wb'),
help='destination file name of phones binary data')
parser.parse_args(namespace=args)
if not args.infile:
print("*** error: could not open input file: " + args.infile)
exit(1)
if not args.outfile:
print("*** error: could not open output file: " + args.outfile)
exit(1)
# tables
# table with all symbols read from pos file
partsOfSpeech = {}
# table with symbol name keys used to do lookup of combined symbols
primaryPartsOfSpeech = {}
# table of combined symbols key is how many symbols were combined (2-8)
# single parts of speech are in partsOfSpeech, 2-8 symbol combinations are
# here
combinations = {}
# array of symbol name numer keys used (to check for unique mapvals)
symbolUsed = {}
table = symboltable.SymbolTable()
partsOfSpeech = table.parseFile(args.infile)
args.infile.close()
# parse dictionary checking for invalid values, duplicates, etc.
for symbol in partsOfSpeech.keys():
properties = partsOfSpeech[symbol]
mapValue = properties.get('mapval')
if mapValue:
for property in properties.keys():
if property != 'mapval' and properties[property] != 1:
# Parse otherProperties setting flags as appropriate
print("*** error in property list, optional properties"
" only accept \"1\": " + property)
continue
else:
pass
# Make sure this value isn't used yet
if mapValue in symbolUsed:
print("*** error: mapval values must be unique, symbol: " +
symbol + ", value: " + str(mapValue))
else:
symbolUsed[mapValue] = True
# Only add to partsOfSpeech list if it's not a combined symbol
if 'iscombined' not in properties:
primaryPartsOfSpeech[symbol] = properties
else:
# It is combined, so parse which symbols it's a combination of
symbollist = symbol.split('^')
combinedNumbers = []
for lookup in symbollist:
if lookup not in primaryPartsOfSpeech:
print("*** error: unable to find symbol " + lookup + " in combined symbol " + symbol)
exit(1)
else:
combinedNumbers.append(primaryPartsOfSpeech[lookup]['mapval'])
properties['values'] = combinedNumbers
length = len(combinedNumbers)
if length in combinations:
combinations[length][mapValue] = combinedNumbers
else:
combinations[length] = { mapValue: combinedNumbers }
# check table
def checkSymbolTable(table):
for i in propertyNames:
propertyNames[i] = 0
# Check each symbol, which contains a dictionary of properties
for element in table.values():
# Check this symbol's properties
for key, value in element.items():
if key not in propertyNames:
print("*** error: invalid property name: " + key)
exit(1)
if key in propertyNames:
propertyNames[key] = propertyNames[key] + 1
checkSymbolTable(partsOfSpeech)
# write out Phones pkb
def encodeProperties(dict):
properties = 0
for test, value in propertyValues.items():
if test in dict:
properties |= value
return properties
# First write out the index of how many of each length there are
runningoffset = 32
for i in range(1, 9):
# Offset starts at 32, then grows by how many the previous had
if i == 1:
offset = runningoffset
howmany = len(primaryPartsOfSpeech)
else:
if i == 2:
offset = runningoffset + howmany # Each single took 1 byte
else:
offset = runningoffset + howmany * i # Each multiple took 1+i which is what i is now
if i in combinations:
howmany = len(combinations.get(i))
else:
offset = 0
howmany = 0
if offset != 0:
runningoffset = offset
args.outfile.write(struct.pack('<H', howmany))
args.outfile.write(struct.pack('<H', offset))
# Next write out parts of speech
for i in primaryPartsOfSpeech:
args.outfile.write(struct.pack('<B', int(primaryPartsOfSpeech[i]['mapval'])))
# Finally write out the combined symbols and what they are combinations of
for i in range(2, 9):
if i in combinations:
symbolList = combinations[i]
for symbol, values in symbolList.items():
args.outfile.write(struct.pack('<B', int(symbol)))
for value in values:
args.outfile.write(struct.pack('<B', int(value)))
args.outfile.close()
``` |
{
"source": "jpwiedekopf/TimeSeriesMTL",
"score": 3
} |
#### File: dataio/opportunity/clean_opp_data.py
```python
import numpy as np
import os
import sys
#-------------------------------------------------------------------------------------------------------
# Hyper-parameters -> TODO: change and check before use
#-------------------------------------------------------------------------------------------------------
# Path to the folder containing the OPPORTUNITY data
dataFolder = '/data/opportunity/raw'
# Path to the folder to save the new clean data files
resultFolder = '/data/opportunity/clean-mtl'
allSensors=True
#-------------------------------------------------------------------------------------------------------
# Function to clean the OPPORTUNITY data:
#
# Inputs:
# - [string] dataFolder: path to the folder containing the .dat data files
# - [string] resultFolder: path to the folder to save the result files
# - [boolean] allSensors: if true, all sensors are used. Otherwise, only those on the right lower arm
#
# This script writes new .txt data files containing continuous data records from the files in the
# input data folder purged of NaN values. The format of the output files is nb_timetamps x nb_clean_sensors
#-------------------------------------------------------------------------------------------------------
def cleanOpportunityData(dataFolder, resultFolder, allSensors=True):
if not os.path.exists(resultFolder):
os.makedirs(resultFolder)
print('--------------------------------------------------------------------')
print('Processing the data files in '+dataFolder)
# Sensors indices of the OPPORTUNITY data
# Note: fully corrupted sensors are found by observing the data only
# Note2: the label information is contained in the last column
if allSensors:
sensorId = list(range(1,13)) + list(range(16,34)) + list(range(37,46)) + list(range(50,59)) + list(range(63,72)) + list(range(76,85)) + list(range(89,98)) + list(range(102,134)) + list(range(243,250))
else:
sensorId = [63,64,65,66,67,68] + list(range(243,250))
nbSensors = len(sensorId)
# For all .dat files in the target folder
for file in os.listdir(dataFolder):
if file.endswith('.dat'):
print('Processing file ' + file + ' ...')
# File name
fileName = file.replace('.dat','')
# Open and read the file
fhr = open(dataFolder+'/'+file,'r')
contents = fhr.readlines() # Contents of the file as string
fhr.close()
# Convert to a matrix of float and extract the labels
nbTimestamps = len(contents)
data = np.empty((nbTimestamps,nbSensors), dtype=float)
# Keep only the relevant content
# Note: also removes the NaN values at the end of the data file
# Removing criteria (arbitrary): if above 85% sensors have NaN values at a particular timestamp, then the following values are considered to be NaN
stoppingIdx = 0 # Index after which all values are considered to be NaN
for idx in range(nbTimestamps):
dataLineTmp = [float(e) for e in contents[idx].split()]
data[idx] = [e for i,e in enumerate(dataLineTmp) if i in sensorId]
nbNaNDataLine = np.isnan(dataLineTmp).sum()
if nbNaNDataLine >= 0.85*nbSensors:
stoppingIdx = idx
data = data[:stoppingIdx]
newNbTimestamps = len(data)
# Replace all remaining NaN values with the previous non-NaN one, for each sensor channel
for sensorIdx in range(nbSensors):
# Check if the sensor column contains any NaN
detectedNaN = np.isnan(data[:,sensorIdx]).any()
# If at least a NaN value is detected, replace any of them by the previous non-NaN one
if detectedNaN:
sensorColumn = data[:,sensorIdx]
# Find the first non-NaN value of the column
previousNonNaN = sensorColumn[0]
firstNonNanElementIdx = 1
while np.isnan(previousNonNaN) and firstNonNanElementIdx < newNbTimestamps:
previousNonNaN = sensorColumn[firstNonNanElementIdx]
firstNonNanElementIdx += 1
if np.isnan(previousNonNaN):
print('ERROR: all sensor readings for one channel are NaN!')
sys.exit()
# Replace NaN values
for timeIdx in range(newNbTimestamps):
if np.isnan(sensorColumn[timeIdx]):
data[timeIdx,sensorIdx] = previousNonNaN
else:
previousNonNaN = sensorColumn[timeIdx]
# Save the data and labels in a new file
if not allSensors:
outputFileName = fileName + '_RLA.txt'
else:
outputFileName = fileName + '.txt'
#pdb.set_trace()
outname = os.path.join(resultFolder, outputFileName)
np.savetxt(outname, data, delimiter=' ', fmt='%d')
print('New files created in '+resultFolder)
print('--------------------------------------------------------------------')
#-------------------------------------------------------------------------------------------------------
# Main
#-------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
cleanOpportunityData(dataFolder,resultFolder,allSensors)
```
#### File: dataio/opportunity/create_tf_dataset.py
```python
import os
import numpy as np
#from tfdataset_adapter import np_to_tfrecords
from tfdataset_adapter import tf_data_record_writer
import requests
def download_proto(tfoutpath):
protourl = r'https://raw.githubusercontent.com/tensorflow/tensorflow/master/tensorflow/core/example/feature.proto'
r = requests.get(protourl)
protoname = os.path.join(tfoutpath, 'feature.proto')
with open(protoname, 'wb') as f:
f.write(r.content)
print(f"Downloaded feature.proto file to {protoname}")
def generate_tf_records(datafile, labelfile, tfoutpath):
print(f"{datafile} -> {labelfile}")
datanp = np.load(datafile)
#examples = datanp.shape[0]
#steps = datanp.shape[1]
#channels = datanp.shape[2]
#data_reshape = np.reshape(datanp, (examples, steps * channels))
labelsnp = np.load(labelfile)
prefix = os.path.join(tfoutpath,
os.path.basename(datafile).replace("_data.npy", ""))
# np_to_tfrecords(data_reshape, labelsnp, prefix,
# verbose=True)
tf_data_record_writer(datanp, labelsnp, prefix)
if __name__ == "__main__":
path = r"/data/opportunity/window_labels-mtl/all_sensors/64/"
tfoutpath = path.replace('window_labels-mtl', 'window_labels-mtl-tf')
if not os.path.exists(tfoutpath):
os.makedirs(tfoutpath)
files = sorted(os.listdir(path))
for fn in files:
if "data" in fn:
datafile = os.path.join(path, fn)
labelfile = os.path.join(path, fn.replace("_data", "_labels"))
generate_tf_records(datafile, labelfile, tfoutpath)
```
#### File: jpwiedekopf/TimeSeriesMTL/evaluation.py
```python
from utils.profile import profile
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import json
from tqdm import tqdm
from decimal import Decimal
import numpy as np
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import fbeta_score, confusion_matrix, precision_score, recall_score, multilabel_confusion_matrix
from utils.utilitary_mtl import fmeasure
from utils.f_scores import F2Score
from utils.opportunity import opportunity_select_channels_tf
from dataio.opportunity.opportunity_adapter import opportunity_reader
import tensorflow as tf
import os
import sys
import math
import argparse
def construct_parser():
def int_list(s): return [int(item) for item in s.strip().split(',')]
parser = argparse.ArgumentParser()
parser.add_argument(
'labels', help='the labels to evaluate against', type=int_list)
subparsers = parser.add_subparsers(title="Operation", dest="op")
subparsers.required = True
modelparser = subparsers.add_parser(
'model', help='Evaluate a tf.keras model')
modelparser.add_argument('hdf5', help='input model file', type=str)
modelparser.add_argument(
'--from-config', help='If passed, load config and weights seperately. Must pass the name of the hdf5 file with extensions .hdf5 or .h5, and json file must have same name with extension .json if passed.', action='store_true')
modelparser.add_argument('--data', help='input dataset dir', type=str,
default='/data/opportunity/window_labels-mtl-tf/all_sensors/64')
modelparser.add_argument('--out', help='output directory', type=str,
default='/models/OPPORTUNITY/MTL-HPS/evaluation')
modelparser.add_argument('--batch-size', type=int, default=500)
evalparser = subparsers.add_parser(
'eval', help='Plot from a given evaluation file generated by this utility or the included callback')
evalparser.add_argument('evaljson', help='Path to the evaluation file')
evalparser.add_argument('tag', help='A tag to prefix to the output files')
evalparser.add_argument('outdir', help='where to save the output files')
masterevalparser = subparsers.add_parser(
'mastereval', help='Compare multiple models with each other, by generating a CSV file with comparisons that will be plotted to a facetted lmplot')
masterevalparser.add_argument(
'masterevalfile', type=str, help='the filename and path of the master evaluation csv and png file')
masterevalparser.add_argument(
'evaljson', type=str, help='evaluation json to read into the master evaluation csv')
masterevalparser.add_argument(
'modelname', type=str, help='the model name to be used in graphs')
masterevalparser.add_argument('--plotonly', action='store_true')
masterevalparser.add_argument('--dpi', default=180, type=int)
return parser
if __name__ == "__main__":
parser = construct_parser()
args = parser.parse_args()
print("Command line arguments:")
for arg in vars(args):
print(f' {arg} : {getattr(args, arg)}')
print("\n")
class EvaluationCallback(tf.keras.callbacks.Callback):
def __init__(self, dataset, label_names, num_classes, base_out_dir):
self.evaluator = Evaluator(label_names, num_classes)
self.dataset = dataset
self.label_names = label_names
self.num_classes = num_classes
self.base_out_dir = base_out_dir
@profile
def do_eval(self, epoch):
outdir = os.path.join(self.base_out_dir)
if not os.path.isdir(outdir):
os.makedirs(outdir)
prefix = f"ep{epoch + 1}"
print()
print('Commencing evaluation')
self.evaluator.evaluate_model(
self.model, self.dataset,
self.label_names, self.num_classes,
outdir,
prefix=prefix
)
tf.keras.backend.clear_session()
print("tf.keras session cleared")
self.evaluator.save_evaluation(outdir, prefix=prefix)
def on_epoch_end(self, epoch, logs=None):
self.do_eval(epoch)
class Evaluator:
def __init__(self, label_names=None, num_classes=None):
self.modes = ['micro', 'macro', 'weighted', None]
self.betas = [1]
if label_names is not None and num_classes is not None:
self.initialize(label_names, num_classes)
def initialize(self, label_names, num_classes):
self.label_names = label_names
self.num_classes = num_classes
self.evaluation = []
def load_evaluation(self, args):
with open(args.evaljson, "r") as jf:
self.evaluation = json.load(jf)
def master_evaluate(self, args):
if not os.path.isfile(args.masterevalfile) and args.plotonly:
raise FileNotFoundError(
f"The file {args.masteronly} could not be found, so plotting is not possible")
if args.plotonly:
self.mastereval = pd.read_csv(
filepath_or_buffer=args.masterevalfile)
print(f"Read {args.masterevalfile}")
evaldf = self.make_evaluation_dataframe(args.modelname)
if os.path.isfile(args.masterevalfile) and not args.plotonly:
self.mastereval = pd.read_csv(
filepath_or_buffer=args.masterevalfile)
print(f"Read {args.masterevalfile}")
self.mastereval = self.mastereval.append(evaldf)
self.mastereval.to_csv(path_or_buf=args.masterevalfile,
index=False)
print(f"Appended {args.masterevalfile}")
if not os.path.isfile(args.masterevalfile) and not args.plotonly:
self.mastereval = evaldf
self.mastereval.to_csv(
path_or_buf=args.masterevalfile,
index=False)
print(f'Wrote {args.masterevalfile}')
#p = self.plot_metrics_plot_mastereval(args)
#pname = os.path.join(os.path.dirname(
# args.masterevalfile), f"{os.path.basename(args.masterevalfile)}.png")
#p.savefig(pname, dpi=args.dpi)
#print(f"Wrote to {pname}")
def plot_metrics_plot_mastereval(self, args):
p = sns.lmplot(data=self.mastereval,
x="Epoch",
y="Metric value",
col="Label channel",
hue="Model name",
col_wrap=3,
truncate=True,
lowess=True,
markers='.',
sharex=False,
sharey=False,
#line_kws={"lw": 1.25},
#scatter_kws={"s" : 4}
)
p.set(ylim=(0.7, 0.93))
return p
def save_evaluation(self, outdir, prefix=None):
def _convert(o):
if isinstance(o, np.int64):
return int(o)
if isinstance(o, np.ndarray):
if o.dtype == np.dtype('float64'):
return o.astype('float32').tolist()
return o.tolist()
raise TypeError
if prefix is None:
dest_name = os.path.join(outdir, 'eval.json')
else:
dest_name = os.path.join(outdir, f"{prefix}_eval.json")
with open(dest_name, 'w') as f:
json.dump(self.evaluation, f, indent=2, default=_convert)
print(f'Wrote evaluation data to {dest_name}')
def make_evaluation_dataframe(self, modelname=None):
metric_name = "fbeta"
average_names = ["micro", "macro", "weighted"]
fbeta_names = [
f"f{int(beta)}-{av}" for beta in self.betas for av in average_names]
allowed_metric_names = fbeta_names
data = {
"Label channel": [],
"Epoch": [],
"Metric": [],
"Metric value": []
}
if modelname is not None:
data["Model name"] = []
for eval_epoch, eval_data in enumerate(self.evaluation):
met_data = eval_data[metric_name]
for ln in self.label_names:
met_data_label = met_data[ln]
for av in met_data_label.keys():
if av in allowed_metric_names:
data["Metric value"].append(met_data_label[av])
data["Metric"].append(av)
data["Label channel"].append(ln)
data["Epoch"].append(eval_epoch + 1)
if modelname is not None:
data["Model name"].append(modelname)
data = pd.DataFrame(data)
print(data)
return data
def plot_metrics_plot_single_eval(self):
# fig = plt.figure(figsize=(10 * len(self.label_names), 10))
data = self.make_evaluation_dataframe()
# plot = sns.relplot(data=data, x='Epoch', y='Metric value',
# col='Label Channel', hue='Metric', style='Metric',
# kind='line', col_wrap=2, markers=True,
# height=10, aspect=1.5)
plot = sns.lmplot(x="Epoch", y="Metric value", data=data,
hue="Label Channel", order=4,
height=10, aspect=1.5,
truncate=True,
ci=95, scatter=True)
x = data["Epoch"]
xint = range(min(x), math.ceil(max(x))+1)
plt.xticks(xint) # , rotation=30)
return plot, data
def save_metrics_plot(self, outdir, prefix=None):
if prefix is None:
dest_name = os.path.join(outdir, 'metrics.png')
csvname = os.path.join(outdir, 'metrics-sklearn.csv')
else:
dest_name = os.path.join(outdir, f"{prefix}_metrics.png")
csvname = os.path.join(outdir, f'{prefix}_metrics-sklearn.csv')
fig, dataframe = self.plot_metrics_plot_single_eval()
fig.savefig(dest_name, dpi=320)
dataframe.to_csv(path_or_buf=csvname, index=False)
print(f'Wrote metrics plot to {dest_name}')
def load_test_data(self, args):
label_names, num_classes = opportunity_select_channels_tf(args.labels)
all_label_names, _ = opportunity_select_channels_tf(list(range(7)))
print(f"Loading dataset from {args.data}")
test_file_criteria = ["ADL4", "ADL5"]
test_files = []
filelist = os.listdir(args.data)
for fn in filelist:
if not fn.find(".tfrecords"):
continue
is_test = any([fn.find(c) > 0 for c in test_file_criteria])
if is_test:
test_files.append(os.path.join(args.data, fn))
test_dataset = opportunity_reader(
test_files[0:1],
all_label_names=all_label_names,
selected_label_names=label_names,
number_classes=num_classes,
validation=True)
test_dataset = test_dataset.batch(args.batch_size, drop_remainder=True)
self.initialize(label_names, num_classes)
return test_dataset, label_names, num_classes
def load_model(self, args):
if args.from_config:
if (args.hdf5.find('hdf5') > -1) or (args.hdf5.find('h5') > -1):
jsonname = args.hdf5.replace('_weights.h5', '.json').replace(
'.hdf5', '.json').replace('.h5', '.json')
hdf5name = args.hdf5
print(f"Loading model config from {jsonname}")
with open(jsonname, 'r') as jf:
config = jf.read()
model = tf.keras.models.model_from_json(config)
print(f'Loading model weights from {hdf5name}')
model.load_weights(hdf5name)
else:
raise AttributeError(
"Please pass the name of the HDF5 file with extension '.hdf5' or '.h5', not the '.json' file, when loading from config")
else:
print(f'Loading model weights and config from {args.hdf5}')
model = tf.keras.models.load_model(args.hdf5)
print()
model.summary()
return model
def save_confusion_matrix(self, y_true, y_pred, num_classes, label_name, outdir, prefix=None):
cm = confusion_matrix(y_true, y_pred)
ax = self.plot_confusion_matrix(
cm,
np.array(list(range(num_classes))),
normalize=True, title=label_name)
if prefix is not None:
dest_path = os.path.join(
outdir, f'{prefix}_confusion_{label_name}.png')
else:
dest_path = os.path.join(outdir, f'confusion_{label_name}.png')
plt.savefig(dest_path)
print(f'Wrote confusion matrix {dest_path}')
def plot_confusion_matrix(self,
cm,
classes,
normalize=False,
title=None,
verbose=False,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
# cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
# classes = classes[unique_labels(y_true, y_pred)]
if normalize:
s = cm.sum(axis=1)[:, np.newaxis]
cm = np.divide(cm.astype('float'), s, where=s != 0)
if verbose and normalize:
print("Normalized confusion matrix")
elif verbose and not normalize:
print('Confusion matrix, without normalization')
if verbose:
print(cm)
fig = plt.figure(figsize=(10, 10), dpi=160)
ax = plt.gca()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
def evaluate_model(self, model, dataset, label_names, num_classes, outdir, prefix=None, beta=1.0, write_confusion=False):
confusion_matrices = {label_names[i]: np.zeros(
(nc, nc), dtype=int) for i, nc in enumerate(num_classes)}
for x, y_true_all in tqdm(iter(dataset), file=sys.stdout):
y_pred_all = model.predict(x)
for li, ln in enumerate(label_names):
if not type(y_pred_all) is list:
y_pred = y_pred_all
else:
y_pred = y_pred_all[li]
y_true = y_true_all[li]
y_true_a = tf.math.argmax(y_true, axis=1)
y_pred_a = np.argmax(y_pred, axis=1)
confusion_matrices[ln] += confusion_matrix(
y_true_a, y_pred_a, labels=list(range(self.num_classes[li])))
# for i in range(len(y_pred_a)):
# y_t = int(y_true_a[i])
# y_p = int(y_pred_a[i])
# confusion_matrices[ln][y_t, y_p] += 1
multilabel_confision_matrices = {ln: self.cmat_to_mlcmat(
confusion_matrices[ln]) for ln in self.label_names}
precisions = {ln: {} for ln in self.label_names}
recalls = {ln: {} for ln in self.label_names}
fmeasure = {ln: {} for ln in self.label_names}
for li, ln in enumerate(self.label_names):
labels = list(range(self.num_classes[li]))
for mode in self.modes:
prec, rec, _, _ = self.precision_recall_fscore_support(
multilabel_confision_matrices[ln], labels=labels,
average=mode)
mode_text = "none" if mode is None else mode
precisions[ln][mode_text] = prec
recalls[ln][mode_text] = rec
for beta in self.betas:
_, _, fb, _ = self.precision_recall_fscore_support(
multilabel_confision_matrices[ln], labels=labels,
beta=beta,
average=mode
)
fb_mode_text = f"f{int(beta)}-{mode_text}"
fmeasure[ln][fb_mode_text] = fb
eval = {
# 'confusion': {ln: cm.tolist() for ln, cm in confusion_matrices.items()},
# 'confusion-ml': multilabel_confision_matrices,
'precision': precisions,
'recall': recalls,
'fbeta': fmeasure
}
if prefix is not None:
eval['prefix'] = prefix
if write_confusion:
eval['confusion'] = confusion_matrices
eval['confusion-ml'] = multilabel_confision_matrices
self.evaluation.append(eval)
return eval
def cmat_to_mlcmat(self, cmat):
# layout is:
# tn fn
# fp tp
num_classes = cmat.shape[1]
mlc = np.zeros((num_classes, 2, 2), dtype=int)
for label in range(num_classes):
tp = cmat[label, label]
a = set(range(num_classes))
a.remove(label)
a = [(x, y) for x in a for y in a]
tn = np.sum([cmat[y] for y in a])
fp = np.sum(cmat[label, :]) - tp
fn = np.sum(cmat[:, label]) - tp
mlc[label, 1, 1] = tp
mlc[label, 0, 0] = tn
mlc[label, 0, 1] = fn
mlc[label, 1, 0] = fp
return mlc
def precision_recall_fscore_support(self,
MCM, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Adapted from SciKit Learn, Source: https://github.com/scikit-learn/scikit-learn/blob/1495f6924/sklearn/metrics/classification.py#L1263
This variant allows for passing in multi-label confusion matrices that have been pre-calculated, as opposed to arrays of predictions and ground truths"""
tp_sum = MCM[:, 1, 1]
pred_sum = tp_sum + MCM[:, 0, 1]
true_sum = tp_sum + MCM[:, 1, 0]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
# Divide, and on zero-division, set scores to 0 and warn:
precision = self._prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = self._prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
denom = beta2 * precision + recall
denom[denom == 0.] = 1 # avoid division by 0
f_score = (1 + beta2) * precision * recall / denom
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
# assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def _prf_divide(self, numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
mask = denominator == 0.0
denominator = denominator.copy()
denominator[mask] = 1 # avoid infs/nans
result = numerator / denominator
if not np.any(mask):
return result
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
return result
if __name__ == "__main__":
sns.set()
sns.set_style("whitegrid")
sns.set_context("paper")
evaluator = Evaluator()
if args.op == 'model':
dataset, label_names, num_classes = evaluator.load_test_data(args)
model = evaluator.load_model(args)
if args.out is not None:
outdir = os.path.dirname(args.hdf5)
else:
outdir = args.out
evaluator.evaluate_model(
model, dataset, label_names, num_classes, outdir)
evaluator.save_evaluation(outdir, "model_evaluation")
eval_name = 'model_evaluation'
elif args.op == 'eval':
label_names, num_classes = opportunity_select_channels_tf(args.labels)
evaluator.initialize(label_names, num_classes)
evaluator.load_evaluation(args)
outdir = args.outdir
if not os.path.isdir(outdir):
os.makedirs(outdir)
print(f"Created directory {outdir}")
eval_name = args.tag
elif args.op == 'mastereval':
label_names, num_classes = opportunity_select_channels_tf(args.labels)
evaluator.initialize(label_names, num_classes)
evaluator.load_evaluation(args)
evaluator.master_evaluate(args)
if not args.op == 'mastereval':
evaluator.save_metrics_plot(outdir, eval_name)
```
#### File: TimeSeriesMTL/models/base_models_mtl.py
```python
import tensorflow as tf
import numpy as np
from operator import mul
from functools import reduce
from tensorflow import keras
from tensorflow.keras import layers, models
def gen_inputs(shape, with_batch_normalization=True):
inputs = tf.keras.layers.Input(shape=shape, name="input")
if with_batch_normalization:
x = tf.keras.layers.BatchNormalization()(inputs)
else:
x = inputs
return x, inputs
def normConv3(
input_shape,
num_kernels=[64, 32, 16],
filter_sizes=[(1, 8), (1, 6), (1, 4)],
pool_sizes=[(1, 4), (1, 3), (1, 2)],
activation_conv='relu',
units_mlp=[56],
activation_mlp='relu',
nb_classes=None,
label_names=None,
with_head=False,
with_batch_normalization=True
):
x, inputs = gen_inputs(input_shape, with_batch_normalization)
for conv_id in range(len(filter_sizes)):
conv_size = filter_sizes[conv_id]
pool_size = pool_sizes[conv_id]
conv_kernels = num_kernels[conv_id]
x = tf.keras.layers.Conv2D(conv_kernels,
kernel_size=conv_size,
activation=activation_conv,
name=f"conv{conv_id}")(x)
if pool_size != 0:
x = tf.keras.layers.MaxPooling2D(pool_size=pool_size,
name=f"pool{conv_id}")(x)
if len(units_mlp) > 0:
x = tf.keras.layers.Flatten()(x)
for mlp_id in range(len(units_mlp)):
units = units_mlp[mlp_id]
x = tf.keras.layers.Dense(units,
activation=activation_mlp,
name=f"dense{mlp_id}_{units}")(x)
if with_head:
out = []
for li, ln in enumerate(label_names):
out.append(tf.keras.layers.Dense(
nb_classes[li], activation='softmax', name=f'{ln}_out')(x))
x = out
return tf.keras.models.Model(inputs=inputs, outputs=x)
def mlp(
input_shape,
num_classes,
label_names,
generate_head=False,
num_units=[2000, 1000],
with_batch_normalization=True,
dropout=0.4
):
x, inputs = gen_inputs(input_shape, with_batch_normalization)
x = tf.keras.layers.Flatten()(x)
for i, nu in enumerate(num_units):
x = tf.keras.layers.Dense(nu, activation='relu', name=f'dense{i}')(x)
x = tf.keras.layers.Dropout(dropout)(x)
if generate_head:
out = []
for li, ln in enumerate(label_names):
out.append(tf.keras.layers.Dense(
num_classes[li], activation='softmax', name=f'{ln}_out')(x))
x = out
model = tf.keras.Model(inputs=inputs, outputs=x)
return model
def convNetDEAPTripathiReluSingleChannel(
input_shape,
num_classes,
label_names,
generate_head,
):
return convNetDEAPTripathi(
input_shape,
num_classes=num_classes,
label_names=label_names,
generate_head=generate_head,
conv_layers=2,
conv_filters=[100, 100],
conv_activations=["relu", "relu"],
conv_shapes=[(3, 3), (3, 3)],
pool_shape=(2, 2),
poolconv_dropout=0.5,
fc_neurons=128,
fc_dropout=0.25,
fc_activation="relu",
output_activation="softmax",
with_batch_normalization=False
)
def convNetDEAPTripathi(
input_shape,
num_classes=[2, 2],
label_names=["valence", "arousal"],
conv_layers=2,
conv_filters=[100, 100],
conv_activations=["tanh", "tanh"],
conv_shapes=[(3, 3), (3, 3)],
pool_shape=(2, 2),
poolconv_dropout=0.5,
fc_neurons=128,
fc_dropout=0.25,
fc_activation="tanh",
output_activation="softplus",
generate_head=True,
with_batch_normalization=False
):
x, inputs = gen_inputs(input_shape, with_batch_normalization)
for conv_id in range(conv_layers):
activation = conv_activations[conv_id]
shape = conv_shapes[conv_id]
filters = conv_filters[conv_id]
if activation == 'both':
conv1 = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=shape,
activation="tanh",
padding="valid",
name=f"conv_{conv_id}_tanh"
)(x)
conv2 = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=shape,
activation="relu",
padding="valid",
name=f"conv_{conv_id}_relu"
)(x)
x = tf.keras.layers.Concatenate(axis=1)([conv1, conv2])
else:
conv_name = f"conv_{conv_id}"
x = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=shape,
activation=activation,
padding="valid",
name=conv_name
)(x)
x = tf.keras.layers.MaxPooling2D(pool_size=pool_shape)(x)
x = tf.keras.layers.Dropout(poolconv_dropout)(x)
x = tf.keras.layers.Flatten()(x)
if generate_head:
x = tf.keras.layers.Dense(
units=fc_neurons, activation=fc_activation)(x)
x = tf.keras.layers.Dropout(fc_dropout)(x)
outs = [tf.keras.layers.Dense(units=nc, activation=output_activation, name=f'out_{ln}_{nc}class')(
x) for ln, nc in zip(label_names, num_classes)]
x = outs
return tf.keras.Model(inputs=inputs, outputs=x)
def convNet2SPS(
input_shape,
nbClasses,
label_names,
args,
withBatchNormalization=True
):
x, inputs = gen_inputs(input_shape, withBatchNormalization)
tops = [x] * len(nbClasses)
ttn_layers = [[] for _ in range(args.numlayers)]
# convolutional feature generators
for layer_i in range(args.numlayers):
for li, ln in enumerate(label_names):
in_tensor = tops[li]
conv_name = f"conv{layer_i}_{ln}"
c = tf.keras.layers.Conv2D(
filters=args.numfilters[layer_i],
kernel_size=(args.numkerns[layer_i], 1),
activation='relu',
padding="valid",
name=conv_name)(in_tensor)
x = tf.keras.layers.MaxPooling2D(
pool_size=(args.poolsizes[layer_i], 1),
name=f"pool{layer_i}_{ln}")(c)
tops[li] = x
ttn_layers[layer_i].append(conv_name)
for li, ln in enumerate(label_names):
in_tensor = tops[li]
tops[li] = tf.keras.layers.Flatten(
name=f"flatten_{ln}")(in_tensor)
# dense units on top of convolutional feature generators
for dense_i in range(args.numdenselayers):
for li, ln in enumerate(label_names):
in_tensor = tops[li]
x = tf.keras.layers.Dense(
units=args.numdenseunits[dense_i],
activation='relu',
name=f"dense{dense_i}_{ln}")(in_tensor)
tops[li] = x
# final outputs on top of dense classifiers
for li, ln in enumerate(label_names):
in_tensor = tops[li]
out = tf.keras.layers.Dense(
units=nbClasses[li],
activation="softmax",
name=f"out_{ln}")(in_tensor)
tops[li] = out
model = tf.keras.Model(inputs=inputs, outputs=tops)
return model, ttn_layers
def norm_conv3_crossstitch(
input_shape,
nb_classes=None,
label_names=None,
num_kernels=[64, 32, 16],
filter_sizes=[(1, 8), (1, 6), (1, 4)],
pool_sizes=[(1, 4), (1, 3), (1, 2)],
cross_stitch_after_layer=[True, True, True],
activation_conv='relu',
units_mlp=[56],
activation_mlp='relu',
with_batch_normalization=True
):
x, inputs = gen_inputs(input_shape, with_batch_normalization)
tops = [x] * len(label_names)
for layer_i in range(len(num_kernels)):
layer_tensors = []
filters = num_kernels[layer_i]
kernel_size = filter_sizes[layer_i]
pool_size = pool_sizes[layer_i]
for li, ln in enumerate(label_names):
in_tensor = tops[li]
conv_name = f'conv_{layer_i}_{ln}'
pool_name = f'pool_{layer_i}_{ln}'
x = layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
activation=activation_conv,
padding='valid',
name=conv_name)(in_tensor)
if pool_size is not None:
x = layers.MaxPooling2D(
pool_size=pool_size,
name=pool_name
)(x)
tops[li] = x
layer_tensors.append(x)
if cross_stitch_after_layer[li]:
cross_stitch_name = f'cs_{layer_i}'
cs = CrossStitch(
len(label_names),
name=cross_stitch_name)(layer_tensors)
# HACK
unstack_name = f'cs_unstack_{layer_i}'
tops = tf.unstack(cs, axis=0, name=unstack_name)
for li, ln in enumerate(label_names):
in_tensor = tops[li]
tops[li] = layers.Flatten(
name=f"flatten_{ln}")(in_tensor)
for dense_i in range(len(units_mlp)):
units = units_mlp[dense_i]
for li, ln in enumerate(label_names):
dense_name = f'dense_{dense_i}_{ln}'
in_tensor = tops[li]
x = layers.Dense(
units=units,
activation=activation_mlp,
name=dense_name)(in_tensor)
tops[li] = x
for li, ln in enumerate(label_names):
in_tensor = tops[li]
out = layers.Dense(
units=nb_classes[li],
activation="softmax",
name=f"out_{ln}")(in_tensor)
tops[li] = out
model = tf.keras.Model(inputs=inputs, outputs=tops)
return model
def norm_conv3_sps(
input_shape,
nb_classes=None,
label_names=None,
num_kernels=[64, 32, 16],
filter_sizes=[(1, 8), (1, 6), (1, 4)],
pool_sizes=[(1, 4), (1, 3), (1, 2)],
cross_stitch_after_layer=[True, True, True],
activation_conv='relu',
units_mlp=[56],
activation_mlp='relu',
with_batch_normalization=True
):
x, inputs = gen_inputs(input_shape, with_batch_normalization)
tops = {ln: x for ln in label_names}
ttn_layers = [[] for _ in range(len(filter_sizes))]
for layer_i in range(len(num_kernels)):
filters = num_kernels[layer_i]
kernel_size = filter_sizes[layer_i]
pool_size = pool_sizes[layer_i]
for ln in label_names:
in_tensor = tops[ln]
conv_name = f"conv_{layer_i}_{ln}"
pool_name = f"pool_{layer_i}_{ln}"
c = layers.Conv2D(
filters=filters,
kernel_size=kernel_size,
padding='valid',
activation=activation_conv,
name=conv_name
)(in_tensor)
x = layers.MaxPooling2D(
pool_size=pool_size,
name=pool_name
)(c)
tops[ln] = x
ttn_layers[layer_i].append(conv_name)
for ln in label_names:
in_tensor = tops[ln]
tops[ln] = layers.Flatten(
name=f"flatten_{ln}"
)(in_tensor)
for dense_i in range(len(units_mlp)):
units = units_mlp[dense_i]
for ln in label_names:
dense_name = f"dense_{dense_i}_{ln}"
in_tensor = tops[ln]
x = layers.Dense(
units=units,
activation=activation_mlp,
name=dense_name
)(in_tensor)
tops[ln] = x
for li, ln in enumerate(label_names):
in_tensor = tops[ln]
out = layers.Dense(
units=nb_classes[li],
activation="softmax",
name=f"out_{ln}")(in_tensor)
tops[ln] = out
model = models.Model(inputs=inputs, outputs=tops)
return model, ttn_layers
def convNet2CrossStitch(
input_shape,
nbClasses,
label_names,
args,
withBatchNormalization=True
):
x, inputs = gen_inputs(input_shape, withBatchNormalization)
tops = [x] * len(label_names)
for layer_i in range(args.numlayers):
layer_tensors = []
for li, ln in enumerate(label_names):
in_tensor = tops[li]
conv_name = f"conv{layer_i}_{ln}"
c = tf.keras.layers.Conv2D(
filters=args.numfilters[layer_i],
kernel_size=(args.numkerns[layer_i], 1),
activation='relu',
padding='valid',
name=conv_name)(in_tensor)
x = tf.keras.layers.MaxPooling2D(
pool_size=(args.poolsizes[layer_i], 1),
name=f"pool{layer_i}_{ln}")(c)
tops[li] = x
layer_tensors.append(x)
cross_stitch_name = f"cross_stitch_{layer_i}"
cs = CrossStitch(
len(label_names), name=cross_stitch_name)(layer_tensors)
# THIS IS A HACK!
tops = tf.unstack(cs, axis=0)
for li, ln in enumerate(label_names):
in_tensor = tops[li]
tops[li] = tf.keras.layers.Flatten(
name=f"flatten_{ln}")(in_tensor)
# dense units on top of convolutional feature generators
for dense_i in range(args.numdenselayers):
for li, ln in enumerate(label_names):
in_tensor = tops[li]
x = tf.keras.layers.Dense(
units=args.numdenseunits[dense_i],
activation='relu',
name=f"dense{dense_i}_{ln}")(in_tensor)
tops[li] = x
# final outputs on top of dense classifiers
for li, ln in enumerate(label_names):
in_tensor = tops[li]
out = tf.keras.layers.Dense(
units=nbClasses[li],
activation="softmax",
name=f"out_{ln}")(in_tensor)
tops[li] = out
model = tf.keras.Model(inputs=inputs,
outputs=tops)
return model
def convNet2Sparseish(
input_shape,
nkerns=[50, 40, 30],
filterSizes=[11, 10, 6],
activationConv='relu',
dropout=0.4,
withHead=False,
cnnPadding="valid",
withBatchNormalization=True,
nbClasses=None
):
add_flatten = not withHead
return convNet2(
inputShape=input_shape,
nkerns=nkerns,
filterSizes=filterSizes,
activationConv=activationConv,
dropout=dropout,
cnnPadding=cnnPadding,
withBatchNormalization=withBatchNormalization,
withHead=withHead,
add_flatten_on_top=add_flatten,
activationMLP='relu',
neuronsMLP=[1000],
nbClasses=nbClasses
)
def convNetDeapFFT(
inputShape,
nbClasses=None,
nkerns=[96, 64],
filterSizes=[(2, 2), (2, 2)],
poolSizes=[2, 2],
activationConv='relu',
neuronsMLP=[1000],
activationMLP='relu',
dropout=0.4,
withHead=False,
cnnPadding='valid',
withBatchNormalization=True,
add_flatten_on_top=False
):
data_format = 'channels_first'
x, inputs = gen_inputs(inputShape, withBatchNormalization)
for i in range(len(nkerns)):
kernel_size = filterSizes[i]
pool_size = poolSizes[i]
print("using kernel size", kernel_size)
print("using pool size", pool_size)
x = tf.keras.layers.Conv2D(filters=nkerns[i],
kernel_size=kernel_size,
activation=activationConv,
padding=cnnPadding,
data_format=data_format
)(x)
if pool_size > 1:
x = tf.keras.layers.MaxPooling2D(
pool_size=pool_size,
data_format=data_format
)(x)
out = mlpPart(
dropout=dropout,
activationMLP=activationMLP,
withHead=withHead,
neuronsMLP=neuronsMLP,
nbClasses=nbClasses,
x=x
)
model = tf.keras.Model(inputs=inputs, outputs=out)
return model
def mlpPart(
dropout,
activationMLP,
withHead,
neuronsMLP,
nbClasses,
x
):
if len(neuronsMLP) >= 1:
x = tf.keras.layers.Flatten()(x)
for i in range(len(neuronsMLP)):
x = tf.keras.layers.Dense(
units=neuronsMLP[i], activation=activationMLP)(x)
x = tf.keras.layers.Dropout(dropout)(x)
if withHead:
out = tf.keras.layers.Dense(
units=nbClasses[0], activation='softmax', name='out')(x)
else:
out = x
return out
def convNet2(
inputShape,
nbClasses=None,
nkerns=[50, 40, 30],
filterSizes=[11, 10, 6],
poolSizes=[2, 3, 1],
activationConv='relu',
neuronsMLP=[1000, 1000, 1000],
activationMLP='relu',
dropout=0.4,
withHead=False,
cnnPadding="valid",
withBatchNormalization=True,
add_flatten_on_top=False,
axes_order="time-first"):
assert(len(filterSizes) == len(nkerns))
assert(len(filterSizes) == len(poolSizes))
x, inputs = gen_inputs(inputShape, withBatchNormalization)
for i in range(len(nkerns)):
# if (i == 0):
# x = Conv2D#(filters=nkerns[i],
# kernel_size=#(filterSizes[i], 1),
# activation=activationCo#nv,
# input_shape=inputShape)#(x)
# else:
if axes_order == 'time-first':
kernel_size = (filterSizes[i], 1)
pool_size = (poolSizes[i], 1)
else:
kernel_size = (1, filterSizes[i])
pool_size = (1, poolSizes[i])
print("using kernel size", kernel_size)
print("using pool size", pool_size)
x = tf.keras.layers.Conv2D(filters=nkerns[i],
kernel_size=kernel_size,
activation=activationConv,
padding=cnnPadding)(x)
if (poolSizes[i] > 0):
x = tf.keras.layers.MaxPooling2D(pool_size=pool_size)(x)
out = mlpPart(
dropout=dropout,
activationMLP=activationMLP,
withHead=withHead,
neuronsMLP=neuronsMLP,
nbClasses=nbClasses,
x=x
)
model = tf.keras.Model(inputs=inputs, outputs=out)
return model
def cnnSparse(inputShape, withHead=False, nbClasses=None):
return convNet2(inputShape,
nkerns=[50, 40, 30, 30],
filterSizes=[11, 11, 11, 7],
poolSizes=[2, 0, 0, 1],
neuronsMLP=[],
withHead=withHead,
nbClasses=nbClasses
)
class CrossStitch(tf.keras.layers.Layer):
"""Cross-Stitch implementation according to arXiv:1604.03539
Implementation adapted from https://github.com/helloyide/Cross-stitch-Networks-for-Multi-task-Learning"""
def __init__(self, num_tasks, *args, **kwargs):
"""initialize class variables"""
self.num_tasks = num_tasks
super(CrossStitch, self).__init__(**kwargs)
def build(self, input_shape):
"""initialize the kernel and set the instance to 'built'"""
self.kernel = self.add_weight(name="kernel",
shape=(self.num_tasks,
self.num_tasks),
initializer='identity',
trainable=True)
super(CrossStitch, self).build(input_shape)
def call(self, xl):
"""
called by TensorFlow when the model gets build.
Returns a stacked tensor with num_tasks channels in the 0 dimension,
which need to be unstacked.
"""
if (len(xl) != self.num_tasks):
# should not happen
raise ValueError()
out_values = []
for this_task in range(self.num_tasks):
this_weight = self.kernel[this_task, this_task]
out = tf.math.scalar_mul(this_weight, xl[this_task])
for other_task in range(self.num_tasks):
if this_task == other_task:
continue # already weighted!
other_weight = self.kernel[this_task, other_task]
out += tf.math.scalar_mul(other_weight, xl[other_task])
out_values.append(out)
# HACK!
# unless we stack, and then unstack the tensors, TF (2.0.0) can't follow
# the graph, so it aborts during model initialization.
return tf.stack(out_values, axis=0)
def compute_output_shape(self, input_shape):
return [self.num_tasks] + input_shape
def get_config(self):
"""implemented so keras can save the model to json/yml"""
config = {
"num_tasks": self.num_tasks
}
base_config = super(CrossStitch, self).get_config()
return dict(list(config.items()) + list(base_config.items()))
if __name__ == '__main__':
model = norm_conv3_crossstitch(
input_shape=(40, 128, 1),
args=None,
nb_classes=[2, 2],
label_names=['v', 'a']
)
model.summary()
outpath = "~/cs_normconv3.json"
with open(outpath, "w") as jf:
model_json = model.to_json(indent=2)
jf.write(model_json)
```
#### File: TimeSeriesMTL/utils/app_hps.py
```python
import argparse
import json
def build_parser(model_choices):
def intlist(s): return [int(item) for item in s.strip().split(',')]
def floatlist(s): return [float(item) for item in s.strip().split(',')]
def js(s): return json.loads(s)
topparser = argparse.ArgumentParser()
topparser.add_argument("model", choices=model_choices,
help="The model to use")
topparser.add_argument("tag", type=str,
help="Tag which to append to all generated files to describe this training run")
topparser.add_argument("dataset", type=str.lower,
choices=['deap', 'opportunity'])
topparser.add_argument("--opportunity-path", type=str,
default=r'/data/opportunity/window_labels-mtl-tf')
topparser.add_argument("--opportunity-num-sensors", type=int, default=107,
choices=[5, 10, 20, 50, 80, 107])
topparser.add_argument("--opportunity-time-window", type=int,
default=64, choices=[32, 64, 96])
topparser.add_argument(
"--cnn2-no-dense", action="store_false", default=True, dest="cnn2dense")
# topparser.add_argument("--deap-validation", type=intlist,
# default=intlist("5"))
topparser.add_argument("--deap-path", type=str,
default=r'/data/deap/windowed')
topparser.add_argument("--deap-no-one-hot",
action='store_false', dest='deap_one_hot')
topparser.set_defaults(deap_one_hot=True)
topparser.add_argument(
"--output", default='/models/$dataset$/MTL-HPS', type=str)
topparser.add_argument('--labels',
type=intlist, help='The channel IDs for labelling, delimit with comma',
required=True)
topparser.add_argument("--dry-run", help="If given, do not train,\
just instantiate the model and save the image to\
disk", action="store_true")
topparser.add_argument('--shuffle-buffer', type=int, default=1000)
topparser.add_argument("-e", "--epochs", type=int, default=50)
topparser.add_argument("-s", "--steps", type=int,
default=1000, help='Steps per epoch')
topparser.add_argument("-b", "--batch", type=int, default=500)
topparser.add_argument("--rate", type=float, default=0.05),
topparser.add_argument("--dropout", type=float, default=0.4)
topparser.add_argument(
'--loss-weights', type=floatlist, default=[1, 1])
topparser.add_argument("--null-weight", type=float, default=1.0)
topparser.add_argument("--non-null-weight", type=float, default=1.0)
topparser.add_argument("--optimizer-args", type=js, default=None)
subparsers = topparser.add_subparsers(title='MTL Head Layouts',
dest='head_layout')
subparsers.required = True
subparsers.add_parser('none', )
denseparser = subparsers.add_parser('dense', )
denseparser.add_argument('--neurons-per-head',
type=intlist, required=True, action="append")
denseparser.add_argument(
'--layers-per-head', type=intlist, required=True)
denseparser.add_argument(
"--head-dropout", type=floatlist, required=True, action="append")
sparseparser = subparsers.add_parser('sparse')
sparseparser.add_argument(
'--layers-per-head', type=intlist, required=True)
sparseparser.add_argument(
'--sizes-per-head', type=intlist, required=True)
sparseparser.add_argument('--filters-per-head',
type=intlist, required=True)
return topparser
def parse_args(model_choices):
parser = build_parser(model_choices)
return parser.parse_args()
``` |
{
"source": "JPWILSON/ActivityTracker",
"score": 3
} |
#### File: JPWILSON/ActivityTracker/project.py
```python
import os, sys
from database_setup import Base, User, Activity, Subactivity, Event
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from flask import Flask, render_template, url_for, redirect, request, flash, jsonify
app = Flask(__name__)
from flask import session as login_session
import random, string #These are used to identify each section
#For step 5:
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
import json
from flask import make_response
import requests
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
engine = create_engine('sqlite:///activitytracker.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind = engine)
session = DBSession()
#First, the login page:
@app.route('/login')
def showLogin():
state = ''.join(random.choice(string.ascii_uppercase+string.digits) for x in xrange(32))
login_session['state'] = state
#return "The current session state is: %s"%login_session['state']
return render_template('login.html', STATE=state)
@app.route('/gconnect', methods=['POST'])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state parameter.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json', scope='')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(
json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid.
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
# If there was an error in the access token info, abort.
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
# Verify that the access token is used for the intended user.
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(
json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app.
if result['issued_to'] != CLIENT_ID:
response = make_response(
json.dumps("Token's client ID does not match app's."), 401)
print "Token's client ID does not match app's."
response.headers['Content-Type'] = 'application/json'
return response
stored_access_token = login_session.get('access_token')
stored_gplus_id = login_session.get('gplus_id')
if stored_access_token is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps('Current user is already connected.'),
200)
response.headers['Content-Type'] = 'application/json'
return response
# Store the access token in the session for later use.
login_session['access_token'] = credentials.access_token
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
login_session['username'] = data['name']
login_session['picture'] = data['picture']
login_session['email'] = data['email']
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("you are now logged in as %s" % login_session['username'])
print "done!"
return output
# DISCONNECT - Revoke a current user's token and reset their login_session
@app.route('/gdisconnect')
def gdisconnect():
access_token = login_session.get('access_token')
print 'In gdisconnect access token is %s', access_token
print 'User name is: '
print login_session['username']
if access_token is None:
print 'Access Token is None'
response = make_response(json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % login_session['access_token']
h = httplib2.Http()
result = h.request(url, 'GET')[0]
print 'result is '
print result
if result['status'] == '200':
del login_session['access_token']
del login_session['gplus_id']
del login_session['username']
del login_session['email']
del login_session['picture']
response = make_response(json.dumps('Successfully disconnected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
else:
response = make_response(json.dumps('Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
#Making an API Endpoint (GET Request)
#Below is for a list of all users in the db
@app.route('/users/JSON')
def welcomeJSON():
users = session.query(User).all()
return jsonify(userDetails = [u.serialize for u in users])
#Below is for a list of all activities in the db
@app.route('/activities/JSON')
def activitiesJSON():
activities = session.query(Activity).all()
return jsonify( activityList = [a.serialize for a in activities])
#Below is for a list of all subactivities in the db
@app.route('/subactivities/JSON')
def subactivitiesJSON():
subactivities = session.query(Subactivity).all()
return jsonify(subactivityList = [s.serialize for s in subactivities])
#Below is for a list of all events carried out by all users in the list:
@app.route('/events/JSON')
def allEventsJSON():
events = session.query(Event).all()
return jsonify(eventList = [e.serialize for e in events])
#This is the JSON endpoint where all events are listed per user.
@app.route('/welcome/<int:user_id>/JSON')
@app.route('/welcome/<int:user_id>/activities/JSON')
def homepageJSON(user_id):
userSpecificEvents = session.query(Event).filter_by(done_by_id = user_id).all()
return jsonify(thisUsersEvents = [u.serialize for u in userSpecificEvents])
@app.route('/')
@app.route('/users')
def welcome():
users = session.query(User).all()
return render_template('welcomeNonReg.html', users = users)
@app.route('/welcome/add_user',methods = ['GET','POST'])
def addUser():
if 'username' not in login_session:
return redirect(url_for('showLogin'))
if request.method == 'POST':
newUser = User(name = request.form['name'], email = request.form['email'])
session.add(newUser)
session.commit()
flash("Well done! %s has been added as a user." % newUser.name)
return redirect(url_for('welcome'))
else:
return render_template('addUser.html')
@app.route('/welcome/<int:user_id>/remove_user', methods = ['GET','POST'])
def remUser(user_id):
user2Del = session.query(User).filter_by(id = user_id).one()
if 'username' not in login_session:
return redirect(url_for('showLogin'))
if request.method == 'POST':
session.delete(user2Del)
session.commit()
flash("User %s has successfully been deleted" % user2Del.name)
return redirect(url_for('welcome'))
else:
return render_template('deleteUser.html', user = user2Del)
@app.route('/welcome/<int:user_id>')
@app.route('/welcome/<int:user_id>/activities')
def homepage(user_id):
user = session.query(User).filter_by(id = user_id).one()
events = session.query(Event).filter_by(done_by_id = user_id).all()
return render_template('welcomeAndList.html', descr = events, name = user.name, user_id = user.id)
#This function tells the details of a specific instance.
#As each instance is unique, only need 1 input: The instance_id (but will ad user_id so name is known)
@app.route('/instances/<int:instance_id>')
def activityPage(instance_id):
#Want to show i.User name ii.subactivity iii.activity iv.instance details
instance = session.query(Event).filter_by(id = instance_id).one()
#instance = session.query(Event).first()
subactivity = session.query(Subactivity).filter_by(id = instance.subactivity_id).one()
activity = session.query(Activity).filter_by(id = subactivity.activity_id).one()
user = session.query(User).filter_by(id = instance.done_by_id).one()
return render_template('eventDetails.html', name = user.name, subact = subactivity.name,
act = activity.name, instObj = instance, user_id = user.id)
@app.route('/instances/<int:user_id>/add',methods = ['GET','POST'])
def addActivityInstance(user_id):
#Want to get i.Activity ii.Subactivity iii.
#Note! new act & subact should redirect to this page, not the homepage
#This doesn't link to activity list yet, need to first have subactivity list depending on which activity is listed
act_list = []
subact_list = []
user = session.query(User).filter_by(id = user_id).one()
activities = session.query(Activity).all()
for a in activities:
act_list.append(a.name)
subactivities = session.query(Subactivity).all()
for s in subactivities:
subact_list.append(s.name)
if 'username' not in login_session:
return redirect(url_for('showLogin'))
if request.method == 'POST':
activityName = request.form['act']
act = session.query(Activity).filter_by(name = activityName).one()
subactName = request.form['subact']
sub = session.query(Subactivity).filter_by(name = subactName).one()
newInstance = Event(location = request.form['location'],
date = request.form['date'], description=request.form['description'],
subactivity_id = sub.id, done_by_id=user_id)
session.add(newInstance)
session.commit()
return redirect(url_for('homepage', user_id = user_id))
else:
return render_template('newInstance.html', name = user.name,
al = act_list, sal = subact_list, user_id = user_id,)
@app.route('/instances/<int:instance_id>/<int:user_id>/edit', methods = ['GET', 'POST'])
def editActivityInstance(instance_id, user_id):
subact_list = []
subactivities = session.query(Subactivity).all()
for s in subactivities:
subact_list.append(s.name)
event2Edit = session.query(Event).filter_by(id = instance_id).one()
user = session.query(User).filter_by(id = user_id).one()
name = user.name
#Now, the actual activity will not be recorded until link btwn it and subact made
activities = session.query(Activity).all()
if 'username' not in login_session:
return redirect(url_for('showLogin'))
if request.method == 'POST':
if request.form['description']:
subactName = request.form['subact']
sub = session.query(Subactivity).filter_by(name = subactName).one()
event2Edit.location = request.form['location']
event2Edit.date = request.form['date']
event2Edit.description = request.form['description']
event2Edit.done_by_id = user_id
event2Edit.subactivity_id = sub.id
session.add(event2Edit)
session.commit()
return redirect(url_for('activityPage', instance_id = instance_id))
else:
return render_template('editInstance.html', al = activities, sal = subact_list, user_id = user_id, event2Edit = event2Edit, name = name, instance_id=instance_id)
@app.route('/instances/<int:user_id>/<int:instance_id>/delete', methods = ['GET', 'POST'])
def deleteActivityInstance(user_id,instance_id):
item2Delete = session.query(Event).filter_by(id = instance_id).one()
if 'username' not in login_session:
return redirect(url_for('showLogin'))
if request.method == 'POST':
session.delete(item2Delete)
session.commit()
return redirect(url_for('homepage', user_id = user_id))
else:
return render_template('deleteInstance.html', user_id = user_id,
instance_id = instance_id, item = item2Delete)
@app.route('/activity/<int:user_id>/add', methods = ['GET', 'POST'])
def addActivity(user_id):
acts = session.query(Activity).all()
#Perhaps add the user_id so that we know who added this activity
#Have, but haven't used it yet - yes, will add to database setup: added_by!
if 'username' not in login_session:
return redirect(url_for('showLogin'))
if request.method == 'POST':
newActivity = Activity(name=request.form['name'])
session.add(newActivity)
session.commit()
return redirect(url_for('addActivityInstance', user_id = user_id))
else:
return render_template('newActivity.html', activities=acts, user_id = user_id)
@app.route('/activity/<int:act_id>/<int:user_id>/edit', methods = ['GET', 'POST'])
def editActivity(act_id, user_id):
acts = session.query(Activity).all()
act2Edit = session.query(Activity).filter_by(id = act_id).one()
if 'username' not in login_session:
return redirect(url_for('showLogin'))
if request.method == 'POST':
if request.form['name']:
act2Edit.name = request.form['name']
session.add(act2Edit)
session.commit()
return redirect(url_for('addActivityInstance', user_id = user_id))
else:
return render_template('editActivity.html', activity2Edit = act2Edit, act_id = act_id, user_id = user_id)
@app.route('/activity/<int:user_id>/<int:act_id>/delete', methods = ['GET', 'POST'])
def deleteActivity(user_id, act_id):
activity2Delete = session.query(Activity).filter_by(id = act_id)
if 'username' not in login_session:
return redirect(url_for('showLogin'))
if request.method == 'POST':
session.delete(activity2Delete)
session.commit()
return redirect(url_for('homepage', user_id = user_id))
else:
return render_template('deleteActivity.html', act = activity2Delete, user_id = user_id,
act_id = act_id)
@app.route('/subactivity/<int:user_id>/add', methods=['GET', 'POST'])
def addSubactivity(user_id):
acts = session.query(Activity).all()
subacts = session.query(Subactivity).all()
if 'username' not in login_session:
return redirect(url_for('showLogin'))
if request.method == 'POST':
act_name = request.form['act']
act = session.query(Activity).filter_by(name = act_name).one()
newSubactivity = Subactivity(name = request.form['name'],
activity_id = act.id)
session.add(newSubactivity)
session.commit()
return redirect(url_for('addActivityInstance', user_id = user_id))
else:
return render_template('newSubactivity.html', al = acts, subactivities = subacts, user_id = user_id)
@app.route('/subactivity/<int:subact_id>/<int:user_id>/edit', methods = ['GET', 'POST'])
def editSubActivity(subact_id, user_id):
subactivity2Edit = session.query(Subactivity).filter_by(id = subact_id).one()
if 'username' not in login_session:
return redirect(url_for('showLogin'))
if request.method == 'POST':
if request.form['name']:
subactivity2Edit.name = request.form['name']
session.add(subactivity2Edit)
session.commit()
return redirect(url_for('addActivityInstance', user_id = user_id))
else:
return render_template('editSubactivity.html', subact = subactivity2Edit, subact_id = subact_id, user_id = user_id)
@app.route('/subactivity/<int:user_id>/<int:subact_id>/delete', methods = ['GET', 'POST'])
def deleteSubActivity(user_id,subact_id):
subact2Del = session.query(Subactivity).filter_by(id = subact_id)
if 'username' not in login_session:
return redirect(url_for('showLogin'))
if request.method == 'POST':
session.delete(subact2Del)
session.commit()
return redirect(url_for('homepage', user_id = user_id))
else:
return render_template('deleteSubactivity.html', suba = subact2Del,
subact_id = subact_id, user_id = user_id)
if __name__ == '__main__':
app.secret_key = "super_secret_key"
app.debug = True
app.run(host = '0.0.0.0', port = 5000)
#Now, I need to sort out all of the users, and incorporate them in all the steps that I have done so far...
``` |
{
"source": "jpwilson/OurFamilySocials",
"score": 3
} |
#### File: OurFamilySocials/albums/models.py
```python
from django.utils import timezone
from django.db import models
from django.contrib.auth import get_user_model
from django.db.models.signals import post_save
class Profile(models.Model):
user = models.OneToOneField(get_user_model(), null=True, on_delete=models.CASCADE)
bio = models.CharField(max_length=400, null=True, blank="")
pub_date = models.DateTimeField("date published", auto_now_add=True)
@property
def member_duration(self):
return timezone.now() - self.pub_date
"""
MALE = "male"
FEMALE = "female"
REFUSE = "refuse"
GENDER = [
(MALE, _("Male")),
(FEMALE, _("Female")),
(REFUSE, _("Choose not to say")),
]
gender = models.CharField(
max_length=32,
choices=GENDER,
default=REFUSE,
)
def save(self, *args, **kwargs):
created = not self.pk
super().save(*args, **kwargs)
if created:
RelativeList.objects.create(user=self)
"""
def __str__(self):
return str(self.user.username)
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(
user=instance, bio="this is bio of {}".format(instance.username)
)
print("We have created a profile via a post save signal")
post_save.connect(create_profile, sender=get_user_model())
def update_profile(sender, instance, created, **kwargs):
if not created:
instance.profile.save()
print("We have now updated a profile via a signal")
post_save.connect(update_profile, sender=get_user_model())
class Location(models.Model):
name = models.CharField(max_length=200)
description = models.CharField(max_length=200, null=True, blank="")
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=200)
description = models.CharField(max_length=200, null=True, blank="")
def __str__(self):
return self.name
class Album(models.Model):
title = models.CharField(max_length=200)
author = models.ForeignKey(
get_user_model(), on_delete=models.CASCADE, related_name="author"
)
description = models.CharField(max_length=400, null=True, blank="")
pub_date = models.DateTimeField("date published", auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
blog = models.TextField(
null=True, blank=""
) # TODO 22Sept Change this to use tinyMCE or similar......
# All fields to add in the future
# TODO 22Sept add a 'last edit date' field
# TODO 22Sept add a 'django-location-field' field; unrelated, also possibly a 'duration' of album?
locations = models.ManyToManyField(Location, blank=True)
tags = models.ManyToManyField(Tag, blank=True)
people = models.ManyToManyField(get_user_model(), related_name="people", blank=True)
# TODO 22Sept - modify people list in form (see queryset in https://medium.com/swlh/django-forms-for-many-to-many-fields-d977dec4b024)
# TODO 22Sept: Add a 'comments' section/feature
# TODO 22Sept: Add a 'likes' section/feature
class Meta:
ordering = ["title", "description", "pub_date", "author"]
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse("album_detail", kwargs={"pk": str(self.pk)})
# def upload_gallery_image(instance, filename):
# return f"/media/{instance.album.title}/gallery/{filename}"
class Image(models.Model):
image = models.ImageField(
upload_to="pictures/", blank=True
) # upload_gallery_image)
caption = models.CharField(max_length=400, null=True, blank="")
album = models.ForeignKey(Album, on_delete=models.CASCADE, related_name="images")
locations = models.ManyToManyField(Location, blank=True)
pub_date = models.DateTimeField("date published", auto_now_add=True)
def get_absolute_url(self):
return reverse("image_detail", kwargs={"pk": str(self.pk)})
def __str__(self):
return str(self.caption)
# TODO -22Sept21 - add likes and comments...
```
#### File: OurFamilySocials/albums/views.py
```python
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http.response import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.views.generic import ListView
from django.forms import modelformset_factory, inlineformset_factory
from django.urls import reverse
from .models import Image, Album
from .forms import ImageForm, AlbumForm
class AlbumListView(LoginRequiredMixin, ListView):
template_name = "albums/album_list.html"
context_object_name = "albums"
def get_queryset(self):
return Album.objects.filter(author=self.request.user)
# TODO NNB! test these views!
# TODO change name from add_album_view to: add_album
# TODO the 'cancel' button on edit page should go back to album_view, not homepage (list of albums)
def add_album_view(request):
ImageFormSet = modelformset_factory(Image, form=ImageForm, extra=10)
if request.method == "GET":
album_form = AlbumForm()
formset = ImageFormSet(queryset=Image.objects.none())
return render(
request, "albums/index.html", {"album_form": album_form, "formset": formset}
)
elif request.method == "POST":
album_form = AlbumForm(request.POST)
formset = ImageFormSet(request.POST, request.FILES)
if album_form.is_valid() and formset.is_valid():
album_obj = album_form.save(commit=False)
album_obj.author = request.user
album_obj.save()
for form in formset.cleaned_data:
if form:
image = form["image"]
Image.objects.create(
image=image, album=album_obj, caption=form["caption"]
)
return HttpResponseRedirect(
reverse("albums:view_album", args=(album_obj.id,))
)
else:
# print("Out errors are: ", album_form.errors, formset.errors)
return render(
request,
"albums/index.html",
{
"album_form": album_form,
"formset": formset,
"album_errors": album_form.errors,
"form_errors": formset.errors,
},
)
def edit_album(request, pk):
ImageFormSet = inlineformset_factory(Album, Image, form=ImageForm, extra=2)
album = Album.objects.get(id=pk)
if request.method == "GET":
album_form = AlbumForm(instance=album)
formset = ImageFormSet(queryset=Image.objects.none())
return render(
request, "albums/index.html", {"album_form": album_form, "formset": formset}
)
elif request.method == "POST":
album_form = AlbumForm(request.POST, instance=album)
formset = ImageFormSet(request.POST, request.FILES)
if album_form.is_valid() and formset.is_valid():
print("Things are VALIDIO!!!")
album_obj = album_form.save(commit=False)
album_obj.author = request.user
album_obj.save()
for form in formset.cleaned_data:
if form:
image = form["image"]
Image.objects.create(
image=image, album=album_obj, caption=form["caption"]
)
return HttpResponseRedirect(
reverse("albums:view_album", args=(album_obj.id,))
)
else:
print(album_form.errors, formset.errors)
def delete_album(request, pk):
album = Album.objects.get(id=pk)
if request.method == "POST":
album.delete()
return HttpResponseRedirect(reverse("home"))
return render(request, "albums/delete.html", {"album": album})
def album_gallery_view(request, pk):
album = Album.objects.get(id=pk)
return render(request, "albums/gallery.html", {"album": album})
```
#### File: OurFamilySocials/pages/tests.py
```python
from django.http import response
from django.test import TestCase, SimpleTestCase
from django.contrib.auth import get_user_model
from django.urls import reverse, resolve
from .views import AboutPageView, HomePageView
# TODO Swith to setUpTestData class for full test setup at class level (for effic) p83 dfp
class LoggedInHomePageTests(TestCase):
def setUp(self):
test_user = get_user_model().objects.create_user( # new
username="test_user", email="<EMAIL>", password="<PASSWORD>"
)
test_user.save()
# self.client.login(username=test_user.username, password=<PASSWORD>)
login = self.client.login(username="test_user", password="<PASSWORD>")
url = reverse("home")
self.response = self.client.get(url)
def test_homepage_status_code(self):
self.assertEqual(self.response.status_code, 200)
def test_homepage_template_used(self):
self.assertTemplateUsed(self.response, "home.html")
def test_homepage_contains_correct_html(self):
self.assertContains(self.response, "Homepage")
def test_homepage_does_not_contain_incorrect_html(self):
self.assertNotContains(self.response, "Hi there! I should not be on the page.")
def test_homepage_url_resolves_homepageview(self): # new
view = resolve("/")
self.assertEqual(view.func.__name__, HomePageView.as_view().__name__)
class LoggedOutHomePageTests(TestCase):
def setUp(self):
url = reverse("home")
self.response = self.client.get(url)
def test_homepage_status_code(self):
self.assertEqual(self.response.status_code, 302)
class AboutPageTests(SimpleTestCase):
def setUp(self):
url = reverse("about")
self.response = self.client.get(url)
def test_aboutpage_status_code(self):
self.assertEqual(self.response.status_code, 200)
def test_aboutpage_template(self):
self.assertTemplateUsed(self.response, "about.html")
def test_aboutpage_contains_correct_html(self):
self.assertContains(self.response, "About Our Family Socials")
def test_aboutpage_does_not_contain_incorrect_html(self):
self.assertNotContains(self.response, "Hi there! I should not be on the page.")
def test_aboutpage_url_resolves_aboutpageview(self):
view = resolve("/about/")
self.assertEqual(view.func.__name__, AboutPageView.as_view().__name__)
``` |
{
"source": "JPWILSON/RestaurantMenuApp",
"score": 3
} |
#### File: JPWILSON/RestaurantMenuApp/project.py
```python
import sys
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Restaurant, MenuItem
from flask import Flask, render_template, url_for, request, redirect, flash, jsonify
engine = create_engine('sqlite:///restaurantmenu.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind = engine)
session = DBSession()
app = Flask(__name__)
@app.route("/restaurants/<int:rest_id>/menu/JSON/")
def menuJSON(rest_id):
restaurant = session.query(Restaurant).filter_by(id = rest_id).one()
items = session.query(MenuItem).filter_by(restaurant_id = rest_id).all()
return jsonify(MenuItems = [i.serialize for i in items])
@app.route("/restaurants/<int:rest_id>/menu/<int:item_id>/JSON/")
def itemJSON(rest_id, item_id):
menuItem = session.query(MenuItem).filter_by(id = item_id).one()
return jsonify(MenuItem = menuItem.serialize)
@app.route("/")
@app.route("/restaurants/")
def restaurants():
restaurants = session.query(Restaurant).all()
return render_template('restaurants.html', restaurants = restaurants)
@app.route("/restaurants/<int:rest_id>/menu")
def menu(rest_id):
restaurant = session.query(Restaurant).filter_by(id= rest_id).one()
items = session.query(MenuItem).filter_by(restaurant_id = rest_id).all()
return render_template('menu.html', restaurant = restaurant, items = items)
@app.route("/restaurants/<int:rest_id>/newItem/", methods = ['GET', 'POST'])
def newItem(rest_id):
restaurantery = session.query(Restaurant).filter_by(id = rest_id).one()
if request.method == 'POST':
newItem = MenuItem(name = request.form['name'], price = request.form['price'],
description = request.form['description'], restaurant_id = rest_id)
session.add(newItem)
session.commit()
flash("New menu item: '%s' created!" % newItem.name)
return redirect(url_for('menu', rest_id = rest_id))
else:
return render_template('newItem.html', restaurant = restaurantery)
@app.route("/restaurants/<int:rest_id>/<int:item_id>/editItem/", methods = ['GET', 'POST'])
def editItem(rest_id, item_id):
restaurant = session.query(Restaurant).filter_by(id = rest_id).one()
itemToEdit = session.query(MenuItem).filter_by(id = item_id).one()
if request.method == 'POST':
if request.form['name']:
itemToEdit.name = request.form['name']
itemToEdit.price = request.form['price']
itemToEdit.description = request.form['description']
session.add(itemToEdit)
session.commit()
flash("Menu item '%s' was edited" %itemToEdit.name)
return redirect(url_for('menu', rest_id = rest_id))
else:
return render_template('editItem.html', rest = restaurant, item = itemToEdit)
@app.route("/restaurants/<int:rest_id>/<int:item_id>/deleteItem/", methods = ['GET', 'POST'])
def deleteItem(rest_id, item_id):
restaurant = session.query(Restaurant).filter_by(id = rest_id).one()
itemToDelete = session.query(MenuItem).filter_by(id = item_id).one()
if request.method == 'POST':
session.delete(itemToDelete)
session.commit()
flash("'%s' was deleted from the menu!" % itemToDelete.name)
return redirect(url_for('menu', rest_id = rest_id))
else:
return render_template('deleteItem.html', rest = restaurant, item = itemToDelete)
if __name__ == '__main__':
app.secret_key = 'super_secret_key'
app.debug = True
app.run(host = '0.0.0.0', port= 5000)
#flash("insert message to flash here")
#get_flashed_messages()
``` |
{
"source": "JPWKU/unix-agent",
"score": 2
} |
#### File: agent/cmd/add_plugin.py
```python
import argparse
import configparser
import importlib
import pkgutil
import sys
import types
import dcm.agent.config as agent_config
import dcm.agent.plugins.api.base as plugin_base
def setup_command_line_parser():
parser = argparse.ArgumentParser(
description='DCM Agent Plugin Configuration Program.')
parser.add_argument("--configfile", "-c",
dest="conffile",
help="A path to the agents configuration file",
default="/dcm/etc/agent.conf")
parser.add_argument("--prefix", "-p",
dest="prefix",
help="A string to prepend to all the command names found",
default="")
parser.add_argument("--name", "-n",
dest="shortname",
help="Force the plugin name. Only used without -f.",
default=None)
parser.add_argument("-f", "--find", help="Search the module for plugins",
action="store_true")
parser.add_argument("-o", "--overwrite", help="Overwrite existing entries.",
action="store_true")
parser.add_argument("-d", "--delete", help="Delete the plugin name.",
action="store_true")
parser.add_argument('module_name', type=str, metavar="<module name>",
help="The name of the module where this program will search for plugins.")
return parser
def get_plugin_details(full_module_name, short_module_name):
mod = importlib.import_module(full_module_name)
lp_func = getattr(mod, 'load_plugin', None)
if lp_func is None and isinstance(types.FunctionType, lp_func):
return False
for d in mod.__dict__:
c = getattr(mod, d)
try:
if issubclass(c, plugin_base.Plugin):
cmd_name = getattr(c, 'command_name', None)
if cmd_name is None:
cmd_name = short_module_name
long_runner = getattr(c, 'long_runner', None)
return (cmd_name, long_runner)
except TypeError:
pass
return None
def find_plugins(base_module_name):
plugin_list = []
try:
base_module = importlib.import_module(base_module_name)
for loader, module_name, is_pkg in pkgutil.walk_packages(
base_module.__path__):
full_mod_name = base_module_name + '.' + module_name
if is_pkg:
fnd_list = find_plugins(full_mod_name)
plugin_list.extend(fnd_list)
else:
plugin_info = get_plugin_details(full_mod_name, module_name)
if plugin_info is not None:
plugin_list.append({'module_name': full_mod_name,
'command_name': plugin_info[0],
'long_runner': plugin_info[1]})
except Exception as ex:
print(str(ex))
return plugin_list
def rewrite_conf(conf_file, module_list, prefix, force):
parser = configparser.ConfigParser()
parser.read(conf_file)
for m in module_list:
section_name = "plugin:%s%s" % (prefix, m['command_name'])
try:
parser.add_section(section_name)
except configparser.DuplicateSectionError:
if not force:
raise Exception(
"The plugin %s already exists. Please rename it."
% m['command_name'])
parser.set(section_name, "type", "python_module")
parser.set(section_name, "module_name", m['module_name'])
if m['long_runner'] is not None:
parser.set(section_name, "long_runner", str(m['long_runner']))
with open(conf_file, "w") as fptr:
parser.write(fptr)
def delete_plugin(conf_file, plugin_name):
parser = configparser.ConfigParser()
parser.read(conf_file)
section_name = "plugin:%s" % plugin_name
new_config = configparser.ConfigParser()
found = False
for s in parser.sections():
if s != section_name:
new_config.add_section(s)
for o in parser.options(s):
v = parser.get(s, o)
new_config.set(s, o, v)
else:
found = True
if not found:
return False
with open(conf_file, "w") as fptr:
new_config.write(fptr)
return True
def main(args=sys.argv):
parser = setup_command_line_parser()
opts = parser.parse_args(args=args[1:])
conf = agent_config.AgentConfig([opts.conffile])
if opts.delete:
found = delete_plugin(conf.plugin_configfile, opts.module_name)
if not found:
print("The plugin name %s was not found." % opts.module_name)
return 1
return 0
if opts.find:
module_list = find_plugins(opts.module_name)
else:
short_module_name = opts.module_name[opts.module_name.rfind(".")+1:]
plugin_info = get_plugin_details(opts.module_name, short_module_name)
if plugin_info is None:
raise Exception(
"The module %s is not a valid plugin" % opts.module_name)
plugin_name = plugin_info[0]
if opts.shortname is not None:
plugin_name = opts.shortname
module_list = [{'module_name': opts.module_name,
'command_name': plugin_name,
'long_runner': plugin_info[1]}]
rewrite_conf(conf.plugin_configfile, module_list,
opts.prefix, opts.overwrite)
print("Updated the plugin configuration file %s" % conf.plugin_configfile)
for m in module_list:
print("\tAdded command %s" % m['command_name'])
print("Restart the agent for changes to take effect.")
return 0
if __name__ == '__main__':
rc = main()
sys.exit(rc)
```
#### File: dcm/agent/config.py
```python
import configparser
import logging
import logging.config
import os
import tempfile
import yaml
import dcm
import dcm.agent.cloudmetadata as cloudmetadata
from dcm.agent.cloudmetadata import CLOUD_TYPES
import dcm.agent.connection.websocket as websocket
import dcm.agent.exceptions as exceptions
import dcm.agent.job_runner as job_runner
from dcm.agent.plugins.api.exceptions import AgentPluginConfigException
import dcm.agent.tests.utils.test_connection as test_connection # TODO
import dcm.agent.utils as utils
_g_logger = logging.getLogger(__name__)
_g_conf_file_env = "DCM_AGENT_CONF"
class PLATFORM_TYPES(object):
PLATFORM_UBUNTU = "ubuntu"
PLATFORM_RHEL = "rhel"
PLATFORM_CENTOS = "centos"
PLATFORM_DEBIAN = "debian"
def get_python_script_dir():
# we allow it to pull out of the python package for tests and
# installs that are done from something other than out packaging
_ROOT = dcm.agent.get_root_location()
return os.path.join(_ROOT, 'scripts')
def get_connection_object(conf):
con_type = conf.connection_type
if not con_type:
raise exceptions.AgentOptionValueNotSetException("connection_type")
# XXX should we stevedore load this or __import__ it or go with a
# hard coded list? for now hard coded list
if con_type == "success_tester":
source_file = conf.connection_source_file
if not source_file:
raise exceptions.AgentOptionValueNotSetException(
"[connection]source_file",
msg="Using the %s connection type." % con_type)
fptr = open(source_file, "r")
if not conf.connection_dest_file:
raise exceptions.AgentOptionValueNotSetException(
"[connection]dest_file",
msg="Using the %s connection type." % con_type)
outf = open(conf.connection_dest_file, "w")
con = test_connection.TestConnection(fptr, outf)
elif con_type == "ws":
if not conf.connection_agentmanager_url:
raise exceptions.AgentOptionValueNotSetException(
"[connection]agentmanager_url")
con = websocket.WebSocketConnection(
conf.connection_agentmanager_url,
backoff_amount=conf.connection_backoff,
max_backoff=conf.connection_max_backoff,
heartbeat=conf.connection_heartbeat_frequency,
allow_unknown_certs=conf.connection_allow_unknown_certs,
ca_certs=conf.connection_ca_cert)
else:
raise exceptions.AgentOptionValueException(
"[connection]type", con_type, "ws,success_tester,dummy")
return con
class ConfigOpt(object):
def __init__(self, section, name, t, default=None,
options=None, minv=None, maxv=None, help_msg=None,
hidden=False):
self.section = section
self.name = name
self.my_type = t
self.options = options
self.default = default
self.minv = minv
self.maxv = maxv
self.help_msg = help_msg
self.features = {}
self.hidden = hidden
def get_option_name(self):
option_name = "%s_%s" % (self.section, self.name)
return option_name
def get_default(self):
return self.default
def get_help(self):
return self.help_msg
def get_value(self, parser, default=None, **kwargs):
if default is None:
default = self.default
try:
v = parser.get(self.section, self.name, fallback=default)
except configparser.NoOptionError:
v = default
except configparser.NoSectionError:
v = default
if v is None:
return v
try:
if self.my_type == list:
v = v.split(",")
elif self.my_type == bool:
if type(v) == str:
v = (v.lower() == "true" or v.lower() == "yes")
else:
v = bool(v)
else:
v = self.my_type(v)
except ValueError:
raise exceptions.AgentOptionTypeException(
self.name, self.my_type, v)
if self.options is not None:
vx = v
if type(v) == str:
vx = vx.lower()
if vx not in self.options:
raise exceptions.AgentOptionValueException(
self.name, self.options, v)
if self.my_type == int or self.my_type == float:
if self.minv is not None and v < self.minv:
raise exceptions.AgentOptionRangeException(
self.name, self.minv, self.maxv)
if self.maxv is not None and v > self.maxv:
raise exceptions.AgentOptionValueException(
self.name, self.minv, self.maxv)
return v
class FilenameOpt(ConfigOpt):
def __init__(self, section, name, default=None, help_msg=None):
super(FilenameOpt, self).__init__(section, name, str, default=default,
help_msg=help_msg)
def get_value(self, parser, relative_path=None, **kwarg):
v = super(FilenameOpt, self).get_value(parser)
if v is None:
return None
if not os.path.isabs(v):
v = os.path.join(relative_path, v)
return os.path.abspath(v)
class AgentConfig(object):
"""
This is a serializable object that is threaded through to all classes.
When/if multiprocessing is used it will be send to the worker threads.
It is semi-read-only. Any write operation must be done with thread
primitives. The exception is set handshake because that will be done
before any thread is created.
"""
def __init__(self, conf_files):
self._cli_args = None
self._remaining_argv = None
self.instance_id = None
self.jr = None
self.state = "STARTING"
self.features = {}
self.agent_id = None
self.customer_id = None
self.server_id = None
self.server_name = None
self.storage_dbfile = None
self.meta_data_object = None # until we call set_metadata_object
self.config_files = conf_files
self.parse_config_files(build_options_list(), add_features="features")
# here is where we set which Meta object to use from cloudmetadata.py
cloudmetadata.set_metadata_object(self)
self._normalize_options()
setup_logging(self.logging_configfile)
def _normalize_options(self):
if self.storage_dbfile is None:
self.storage_dbfile = \
os.path.join(self.storage_base_dir, "secure", "agentdb.sql")
if self.storage_script_dir is None:
self.storage_script_dir = \
os.path.join(self.storage_base_dir, "bin")
if self.storage_script_dir == "/PYTHON_LIBS_SCRIPTS":
self.storage_script_dir = None
if self.platform_name is None or self.platform_version is None:
distro_name, distro_version = utils.identify_platform(self)
self.platform_name = distro_name
self.platform_version = distro_version
def get_script_location(self, name):
if self.storage_script_dir is not None:
path = os.path.join(self.storage_script_dir, name)
_g_logger.debug("Script location %s" % path)
if not os.path.exists(path):
raise AgentPluginConfigException(
"There is no proper configuration for %s" % name)
return path
script_dir = get_python_script_dir()
_g_logger.debug("Script Dir %s" % script_dir)
for platform in self.platform_script_locations:
_g_logger.debug("Script platform %s" % platform)
path = os.path.join(script_dir, platform, name)
_g_logger.debug("Script location %s" % path)
if os.path.exists(path):
return path
return None
def is_upgrading(self):
return False
def start_job_runner(self):
self.jr = job_runner.JobRunner(self)
def stop_job_runner(self):
if self.jr:
self.jr.shutdown()
self.jr = None
def get_temp_file(self, filename, isdir=False):
new_dir = tempfile.mkdtemp(dir=self.storage_temppath)
if isdir:
return new_dir
return os.path.join(new_dir, filename)
def parse_config_files(self, option_list, add_features=None):
# set all the default values on the agent conf object
for o in option_list:
k = o.get_option_name()
v = o.get_default()
setattr(self, k, v)
for config_file in self.config_files:
relative_path = os.path.dirname(config_file)
parser = configparser.ConfigParser()
parser.read(config_file)
if add_features is not None:
try:
features = parser.items(add_features)
for k, v in features:
self.features[k] = v
except configparser.NoSectionError:
pass
for opt in option_list:
try:
oname = opt.get_option_name()
v = opt.get_value(parser, relative_path=relative_path,
default=getattr(self, oname))
setattr(self, oname, v)
except configparser.NoSectionError:
raise exceptions.AgentOptionSectionNotFoundException(
opt.name)
def get_secure_dir(self):
token_dir = os.path.join(self.storage_base_dir, "secure")
if not os.path.exists(token_dir):
os.mkdir(token_dir, 0o700)
# At some point we should validate that only this user can read this
# file
# utils.validate_file_permissions(
# token_dir, username=self.conf.system_user, permissions=0700)
#
return token_dir
def build_options_list():
option_list = [
ConfigOpt("pydev", "host", str, default=None, options=None,
help_msg="The hostname of the pydev debugger"),
ConfigOpt("pydev", "port", int, default=None, options=None,
help_msg="The port where the pydev debugger is listening"),
ConfigOpt("workers", "count", int, default=2, options=None,
help_msg="The number of worker threads that will be "
"processing incoming requests"),
ConfigOpt("workers", "long_runner_threads", int, default=1,
options=None,
help_msg="The number of worker threads that will be "
"processing long running plugins (anything that "
"returns a job description)"),
ConfigOpt("connection", "type", str, default="ws", options=None,
help_msg="The type of connection object to use. Supported "
"types are ws and fallback"),
FilenameOpt("connection", "source_file", default=None),
FilenameOpt("connection", "dest_file", default=None),
ConfigOpt("connection", "agentmanager_url", str, default=None,
help_msg="The url of the agent manager with which this "
"agent will communicate."),
ConfigOpt("connection", "backoff", int, default=1000,
help_msg="The number of milliseconds to add to the wait "
"time before retrying a failed connection."),
ConfigOpt("connection", "max_backoff", int, default=300000,
help_msg="The maximum number of milliseconds to wait before "
"retrying a failed connection."),
ConfigOpt("connection", "heartbeat_frequency", int, default=30,
help_msg="The maximum number of milliseconds to wait before "
"retrying a failed connection."),
ConfigOpt("connection", "allow_unknown_certs", bool, default=False,
help_msg="A flag to disable DCM certificate verification. "
"When disabled certificates will be ignored. This "
"is useful for testing but should otherwise be "
"set to False."),
FilenameOpt("connection", "ca_cert", default=None,
help_msg="A path to the location of the CA certificate to"
"be used when authenticating with DCM."),
FilenameOpt("logging", "configfile", default=None,
help_msg="The location of the log configuration file"),
FilenameOpt("plugin", "configfile",
help_msg="The location of the plugin configuration file"),
FilenameOpt("storage", "temppath", default="/tmp"),
FilenameOpt("storage", "base_dir", default="/dcm"),
FilenameOpt("storage", "mountpoint", default="/mnt/dcmdata"),
FilenameOpt("storage", "dbfile", default=None),
FilenameOpt("storage", "script_dir", default=None),
ConfigOpt("storage", "db_timeout", int, default=60*60*4,
help_msg="The amount of time in seconds for a request id to "
"stay in the database."),
ConfigOpt("storage", "default_filesystem", str, default="ext3"),
ConfigOpt("system", "user", str, default="dcm"),
ConfigOpt("system", "sudo", str, default="/usr/bin/sudo"),
ConfigOpt("intrusion_detection", "ossec", bool, default=False),
ConfigOpt("intrusion_detection", "max_process_time", float, default=5.0,
help_msg="This value specifics the amount of time that must "
"expire between processing the alerts file. This "
"value is here to prevent too many frequent alerts "
"from overwhelming the agent."),
ConfigOpt("intrusion_detection", "alert_threshold", int, default=10,
help_msg="The ossec alert level threshold to send to dcm."
" Any alert level below this threshold will be"
" logged locally but not forwarded back to DCM."),
ConfigOpt("cloud", "type", str, default=CLOUD_TYPES.UNKNOWN,
help_msg="The type of cloud on which this agent is running"),
ConfigOpt("cloud", "metadata_url", str,
default=None,
help_msg="The url of the metadata server. Not applicable "
"to all clouds."),
ConfigOpt("messaging", "retransmission_timeout", float,
default=5.0),
ConfigOpt("messaging", "max_at_once", int, default=-1,
help_msg="The maximum number of commands that can be "
"outstanding at once. -1 means infinity."),
ConfigOpt("platform", "script_locations", list,
default="common-linux"),
ConfigOpt("platform", "name", str, default=None,
help_msg="The platform/distribution on which this agent is"
"being installed. Must be used with "
"[platform]version.",
options=["ubuntu", "debian", "rhel",
"centos", "fedora"]),
ConfigOpt(
"platform", "version", str, default=None,
help_msg="The platform/distribution version on which this "
"agent is being installed. Must be used with "
"[platform]name."),
ConfigOpt("jobs", "retain_job_time", int, default=3600),
ConfigOpt("test", "skip_handshake", bool, default=False,
help_msg="This value is for internal testing only. "
"Do not change it.", hidden=True),
ConfigOpt("extra", "location", str,
default='http://s3.amazonaws.com/es-pyagent/',
help_msg="Location of extra packages"),
ConfigOpt("extra", "package_name", str, default=None,
help_msg="Name of extra package to be installed"),
ConfigOpt("extra", "base_path", str, default="/opt/dcm-agent-extras",
help_msg="The location where the extras package will be "
"installed. This should only change in conjunction"
" with the extras omnibus installer."),
ConfigOpt("configuration_management", "chef_client_version", str, default="11.16.4",
help_msg="Version of chef client to be installed")
]
return option_list
def setup_logging(logging_configfile):
top_logger = 'dcm.agent'
if logging_configfile is None:
loghandler = logging.StreamHandler()
top_logger = logging.getLogger("")
top_logger.setLevel(logging.DEBUG)
top_logger.addHandler(loghandler)
return
if not os.path.exists(logging_configfile):
raise exceptions.AgentOptionPathNotFoundException(
"logging:configfile", logging_configfile)
with open(logging_configfile, 'rt') as f:
config = yaml.load(f.read())
logging.config.dictConfig(config)
def get_config_files(conffile=None):
candidates = ["/dcm/etc/agent.conf"]
if _g_conf_file_env in os.environ:
candidates.append(os.environ[_g_conf_file_env])
if conffile:
candidates.append(conffile)
locations = []
for f in candidates:
f = os.path.abspath(f)
if os.path.exists(f):
if f not in locations:
locations.append(f)
return locations
```
#### File: agent/connection/websocket.py
```python
import datetime
import errno
import json
import logging
import queue
import socket
import ssl
import threading
import ws4py.client.threadedclient as ws4py_client
import dcm.agent.exceptions as exceptions
import dcm.agent.handshake as handshake
import dcm.agent.logger as dcm_logger
import dcm.agent.events.state_machine as state_machine
import dcm.agent.utils as agent_utils
from dcm.agent.events.globals import global_space as dcm_events
_g_logger = logging.getLogger(__name__)
_g_wire_logger = agent_utils.get_wire_logger()
class WsConnEvents:
POLL = "POLL"
CONNECTING_FINISHED = "CONNECTING_FINISHED"
CONNECT_TIMEOUT = "CONNECT_TIMEOUT"
INCOMING_MESSAGE = "INCOMING_MESSAGE"
SUCCESSFUL_HANDSHAKE = "SUCCESSFUL_HANDSHAKE"
ERROR = "ERROR"
CLOSE = "CLOSE"
class WsConnStates:
WAITING = "WAITING"
CONNECTING = "CONNECTING"
HANDSHAKING = "HANDSHAKING"
HANDSHAKE_RECEIVED = "HANDSHAKE_RECEIVED"
OPEN = "OPEN"
DONE = "DONE"
class Backoff(object):
def __init__(self, max_backoff_seconds,
initial_backoff_second=0.5):
self._backoff_seconds = initial_backoff_second
self._max_backoff = max_backoff_seconds
if self._backoff_seconds > self._max_backoff:
self._backoff_seconds = self._max_backoff
self._ready_time = datetime.datetime.now()
self._last_activity = self._ready_time
self._initial_backoff_second = initial_backoff_second
def activity(self):
self._backoff_seconds = self._initial_backoff_second
self._ready_time = datetime.datetime.now()
self._last_activity = self._ready_time
def _set_ready_time(self, backoff):
if backoff > self._max_backoff:
backoff = self._max_backoff
self._backoff_seconds = backoff
new_ready_time = datetime.datetime.now() +\
datetime.timedelta(seconds=self._backoff_seconds)
if new_ready_time > self._ready_time:
self._ready_time = new_ready_time
def error(self):
self._set_ready_time(self._backoff_seconds*2.0)
def ready(self):
return self._ready_time < datetime.datetime.now()
def force_backoff_time(self, backoff_seconds):
self._ready_time = datetime.datetime.now() +\
datetime.timedelta(seconds=backoff_seconds)
def seconds_until_ready(self):
d = self._ready_time - datetime.datetime.now()
return max(0.0, d.total_seconds())
class RepeatQueue(object):
def __init__(self, max_req_id=500):
self._q = queue.Queue()
self._lock = threading.RLock()
self._message_id_set = set()
self._request_id_count = {}
self._max_req_id = max_req_id + 1
def put(self, item, block=True, timeout=None):
self._lock.acquire()
try:
try:
if 'message_id' in item:
message_id = item['message_id']
if message_id in self._message_id_set:
_g_logger.info("Skipping sending a retransmission "
"of message id %s" % message_id)
return
else:
_g_logger.debug("Adding the message with id %s " %
message_id)
self._message_id_set.add(message_id)
if 'request_id' in item:
request_id = item['request_id']
if request_id not in self._request_id_count:
self._request_id_count[request_id] = 0
self._request_id_count[request_id] += 1
if self._request_id_count[request_id] >= self._max_req_id:
msg = "TOO MANY MESSAGES FOR %s!" % request_id
_g_logger.error(msg)
agent_utils.build_assertion_exception(_g_logger, msg)
if self._request_id_count[request_id] ==\
self._max_req_id:
dcm_logger.log_to_dcm_console_overloaded(msg=msg)
return
except Exception as ex:
_g_logger.warn("Exception checking if message is a retrans "
"%s" % str(ex))
return self._q.put(item, block=block, timeout=timeout)
finally:
self._lock.release()
def get(self, block=True, timeout=None):
self._lock.acquire()
try:
item = self._q.get(block=block, timeout=timeout)
try:
if 'message_id' in item:
if item['message_id'] in self._message_id_set:
self._message_id_set.remove(item['message_id'])
except Exception as ex:
_g_logger.info("Exception checking if message has an id "
"%s" % str(ex))
return item
finally:
self._lock.release()
def task_done(self):
return self._q.task_done()
class _WebSocketClient(ws4py_client.WebSocketClient):
def __init__(self, manager, url, receive_callback, protocols=None,
extensions=None,
heartbeat_freq=None, ssl_options=None, headers=None):
ws4py_client.WebSocketClient.__init__(
self, url, protocols=protocols, extensions=extensions,
heartbeat_freq=heartbeat_freq, ssl_options=ssl_options,
headers=headers)
_g_logger.info("Attempting to connect to %s" % url)
self._receive_callback = receive_callback
self.manager = manager
self._url = url
self._dcm_closed_called = False
def opened(self):
_g_logger.debug("Web socket %s has been opened" % self._url)
def closed(self, code, reason=None):
_g_logger.info("Web socket %s has been closed %d %s"
% (self._url, code, reason))
_g_logger.debug("Sending error event to connection manager.")
self.manager.event_error(exception=Exception(
"Connection unexpectedly closed: %d %s" % (code, reason)))
def close(self, code=1000, reason=''):
self._dcm_closed_called = True
return ws4py_client.WebSocketClient.close(
self, code=code, reason=reason)
def received_message(self, m):
_g_wire_logger.debug("INCOMING\n--------\n%s\n--------" % str(m.data))
json_doc = json.loads(m.data.decode())
self.manager.event_incoming_message(json_doc)
def send(self, payload, binary=False):
_g_wire_logger.debug("OUTGOING\n--------\n%s\n--------" % str(payload))
super(_WebSocketClient, self).send(payload, binary=binary)
class WebSocketConnection(threading.Thread):
def __init__(self, server_url,
backoff_amount=5000, max_backoff=300000,
heartbeat=None, allow_unknown_certs=False, ca_certs=None):
super(WebSocketConnection, self).__init__()
self._send_queue = RepeatQueue()
self._ws_manager = None
self._server_url = server_url
self._cond = threading.Condition()
self._done_event = threading.Event()
self._connect_timer = None
self._backoff = Backoff(float(max_backoff) / 1000.0,
float(backoff_amount) / 1000.0)
self._sm = state_machine.StateMachine(
WsConnStates.WAITING, logger=_g_logger)
self._setup_states()
self._handshake_manager = None
self._heartbeat_freq = heartbeat
if allow_unknown_certs:
cert_reqs = ssl.CERT_NONE
else:
cert_reqs = ssl.CERT_REQUIRED
self._ssl_options = {'cert_reqs': cert_reqs, 'ca_certs': ca_certs}
self.pre_hs_message_queue = queue.Queue()
@agent_utils.class_method_sync
def set_backoff(self, backoff_seconds):
self._backoff.force_backoff_time(backoff_seconds)
@agent_utils.class_method_sync
def connect(self, receive_callback, handshake_manager):
self._receive_callback = receive_callback
self._handshake_manager = handshake_manager
self.start()
def _register_connect(self):
_g_logger.debug("Registering a connection to DCM")
if self._connect_timer is not None:
raise exceptions.AgentRuntimeException(
"There is already a connection registered")
self._connect_timer = dcm_events.register_callback(
self.event_connect_timeout,
delay=self._backoff.seconds_until_ready())
@agent_utils.class_method_sync
def event_connect_timeout(self):
self._connect_timer = None
self._sm.event_occurred(WsConnEvents.CONNECT_TIMEOUT)
@agent_utils.class_method_sync
def send(self, doc):
_g_logger.debug("Adding a message to the send queue")
self._send_queue.put(doc)
self._cond.notify_all()
@agent_utils.class_method_sync
def close(self):
_g_logger.debug("Websocket connection closed.")
self.event_close()
@agent_utils.class_method_sync
def run(self):
self._register_connect()
while not self._done_event.is_set():
try:
self._sm.event_occurred(WsConnEvents.POLL)
self._cond.wait()
except Exception as ex:
_g_logger.exception("The ws connection poller loop had "
"an unexpected exception.")
self._throw_error(ex)
#########
# incoming events
#########
@agent_utils.class_method_sync
def event_close(self):
self._sm.event_occurred(WsConnEvents.CLOSE)
@agent_utils.class_method_sync
def event_incoming_message(self, incoming_data):
self._sm.event_occurred(WsConnEvents.INCOMING_MESSAGE,
incoming_data=incoming_data)
@agent_utils.class_method_sync
def event_error(self, exception=None):
self._sm.event_occurred(WsConnEvents.ERROR)
_g_logger.error(
"State machine received an exception %s" % str(exception))
@agent_utils.class_method_sync
def event_successful_handshake(self, hs):
self._sm.event_occurred(WsConnEvents.SUCCESSFUL_HANDSHAKE)
def _throw_error(self, exception, notify=True):
_g_logger.warning("throwing error %s" % str(exception))
dcm_events.register_callback(self.event_error,
kwargs={"exception": exception})
if notify:
self._cond.notify()
def throw_error(self, exception):
self._throw_error(exception)
def lock(self):
self._cond.acquire()
def unlock(self):
self._cond.release()
def _forming_connection_thread(self):
try:
self._ws.connect()
self.lock()
try:
self._sm.event_occurred(WsConnEvents.CONNECTING_FINISHED)
finally:
self.unlock()
except BaseException as ex:
self.event_error(exception=ex)
#########
# state transitions
#########
def _sm_connect_poll(self):
"""
Attempting to connect and setup the handshake
"""
pass
def _sm_connect(self):
try:
self._ws = _WebSocketClient(
self, self._server_url, self._receive_callback,
protocols=['dcm'], heartbeat_freq=self._heartbeat_freq,
ssl_options=self._ssl_options)
dcm_events.register_callback(
self._forming_connection_thread, in_thread=True)
except Exception as ex:
_g_logger.exception("Failed to connect to %s" % self._server_url)
self._throw_error(ex, notify=False)
self._cond.notify()
def _sm_start_handshake(self):
try:
hs_doc = self._handshake_manager.get_send_document()
_g_logger.debug("Sending handshake")
self._ws.send(json.dumps(hs_doc))
except Exception as ex:
_g_logger.exception("Failed to send handshake")
self._throw_error(ex, notify=False)
self._cond.notify()
def _sm_close_while_connecting(self):
try:
self._ws.close()
except Exception as ex:
_g_logger.warn("Error closing the connection " + str(ex))
self._done_event.set()
self._cond.notify_all()
def _sm_error_while_connecting(self):
try:
self._ws.close()
except Exception as ex:
_g_logger.warn("Error closing the connection " + str(ex))
self._backoff.error()
self._cond.notify_all()
self._register_connect()
def _sm_received_hs(self, incoming_data=None):
"""
The handshake has arrived
"""
try:
# if the handshake is rejected an exception will be thrown
hs = self._handshake_manager.incoming_document(incoming_data)
_g_logger.debug("We received a handshake with reply code %d"
% hs.reply_type)
if hs.reply_type != handshake.HandshakeIncomingReply.REPLY_CODE_SUCCESS:
_g_logger.warn("The handshake was rejected.")
if hs.reply_type == handshake.HandshakeIncomingReply.REPLY_CODE_FORCE_BACKOFF:
_g_logger.info("Backing off for %f seconds"
% float(hs.force_backoff))
self._backoff.force_backoff_time(hs.force_backoff)
self._ws.close()
ex = exceptions.AgentHandshakeException(hs)
self._throw_error(ex)
else:
dcm_events.register_callback(self.event_successful_handshake,
kwargs={"hs": hs})
self._cond.notify()
except Exception as ex:
self._throw_error(ex)
def _sm_pre_handshake_message(self, incoming_data=None):
"""
This happens when a handshake has already been received and in the
unlock window while waiting to process success/failure another messages
comes in from DCM. In this case we queue the message and process it
it the handshake is determined to be successful.
"""
_g_logger.debug(
"New message received before the handshake was processed")
self.pre_hs_message_queue.put(incoming_data)
def _sm_successful_handshake(self):
"""
This is the standard case when a handshake is successfully processed
"""
_g_logger.debug("The handshake was successfully processed")
while not self.pre_hs_message_queue.empty:
incoming_data = self.pre_hs_message_queue.get()
dcm_events.register_callback(
self._receive_callback, args=[incoming_data])
self._backoff.activity()
def _sm_open_incoming_message(self, incoming_data=None):
_g_logger.debug("New message received")
dcm_events.register_callback(
self._receive_callback, args=[incoming_data])
self._backoff.activity()
def _sm_hs_failed(self):
"""
An error occurred while waiting for the handshake
"""
_g_logger.debug("close called while handshaking")
try:
self._ws.close()
except Exception:
_g_logger.exception(
"Got an error while closing in handshake state")
self._backoff.error()
self._cond.notify()
self._register_connect()
def _sm_close_open(self):
"""
A user called close while the connection was open
"""
_g_logger.debug("close called when open")
self._done_event.set()
self._cond.notify_all()
self._ws.close()
def _sm_hs_close(self):
"""
A user called close while waiting for a handshake
"""
_g_logger.debug("close event while handshaking")
self._done_event.set()
self._cond.notify_all()
def _sm_not_open_close(self):
"""
A user called close when the connection was not open
"""
_g_logger.debug("close event while not open")
self._done_event.set()
self._cond.notify_all()
def _sm_open_poll(self):
"""
A poll event occurred in the open state. Check the send queue
"""
# TODO XXXX find a way to send the data not in a lock
# check the send queue
done = False
while not done:
try:
doc = self._send_queue.get(False)
self._send_queue.task_done()
msg = json.dumps(doc)
self._ws.send(msg)
except socket.error as er:
if er.errno == errno.EPIPE:
_g_logger.info(
"The ws connection broke for %s" % self._server_url)
else:
_g_logger.info(
"A WS socket error occurred %s" % self._server_url)
self._throw_error(er)
done = True
except queue.Empty:
done = True
except Exception as ex:
_g_logger.exception(str(ex))
self._throw_error(ex)
done = True
def _sm_open_error(self):
"""
An error occurred while the connection was open
"""
self._cond.notify()
self._register_connect()
def _sm_waiting_error(self):
"""
An error occurred while waiting on the connection. This is an odd
case
"""
_g_logger.warn("An error occurred while waiting to try a new "
"connection.")
self._backoff.error()
def _sm_handshake_poll(self):
"""
While waiting for the handshake a poll event occurred
"""
pass
def _sm_handshake_connect(self):
"""
This could happen if the POLL event happened twice in the waiting
state before the first one could try to connect. Just ignore
"""
pass
def _sm_connection_finished_right_after_error(self):
"""
This case occurs if the connection is registered and finished but
an error occurs that gets the lock before the connection can
report in successfully. In this case we should have a websocket
to clean up
:return:
"""
try:
self._ws.close()
except Exception:
_g_logger.exception(
"Got an error while closing in handshake state")
def _sm_connection_finished_right_after_done(self):
"""
This case occurs if the connection is registered and finishes but
a close is called that gets the lock before the connection can
report in successfully. In this case we should have a websocket
to clean up
:return:
"""
try:
self._ws.close()
except Exception:
_g_logger.exception(
"Got an error while closing in handshake state")
def _setup_states(self):
self._sm.add_transition(WsConnStates.WAITING,
WsConnEvents.POLL,
WsConnStates.WAITING,
self._sm_connect_poll)
self._sm.add_transition(WsConnStates.WAITING,
WsConnEvents.ERROR,
WsConnStates.WAITING,
self._sm_waiting_error)
self._sm.add_transition(WsConnStates.WAITING,
WsConnEvents.CONNECT_TIMEOUT,
WsConnStates.CONNECTING,
self._sm_connect)
self._sm.add_transition(WsConnStates.WAITING,
WsConnEvents.CLOSE,
WsConnStates.DONE,
self._sm_not_open_close)
self._sm.add_transition(WsConnStates.WAITING,
WsConnEvents.CONNECTING_FINISHED,
WsConnStates.WAITING,
self._sm_connection_finished_right_after_error)
self._sm.add_transition(WsConnStates.CONNECTING,
WsConnEvents.CLOSE,
WsConnStates.DONE,
self._sm_close_while_connecting)
self._sm.add_transition(WsConnStates.CONNECTING,
WsConnEvents.ERROR,
WsConnStates.WAITING,
self._sm_error_while_connecting)
self._sm.add_transition(WsConnStates.CONNECTING,
WsConnEvents.CONNECTING_FINISHED,
WsConnStates.HANDSHAKING,
self._sm_start_handshake)
self._sm.add_transition(WsConnStates.CONNECTING,
WsConnEvents.CONNECT_TIMEOUT,
WsConnStates.CONNECTING,
None)
self._sm.add_transition(WsConnStates.CONNECTING,
WsConnEvents.POLL,
WsConnStates.CONNECTING,
None)
self._sm.add_transition(WsConnStates.HANDSHAKING,
WsConnEvents.INCOMING_MESSAGE,
WsConnStates.HANDSHAKE_RECEIVED,
self._sm_received_hs)
self._sm.add_transition(WsConnStates.HANDSHAKING,
WsConnEvents.ERROR,
WsConnStates.WAITING,
self._sm_hs_failed)
self._sm.add_transition(WsConnStates.HANDSHAKING,
WsConnEvents.POLL,
WsConnStates.HANDSHAKING,
self._sm_handshake_poll)
self._sm.add_transition(WsConnStates.HANDSHAKING,
WsConnEvents.CONNECT_TIMEOUT,
WsConnStates.HANDSHAKING,
self._sm_handshake_connect)
self._sm.add_transition(WsConnStates.HANDSHAKING,
WsConnEvents.CLOSE,
WsConnStates.DONE,
self._sm_hs_close)
self._sm.add_transition(WsConnStates.HANDSHAKE_RECEIVED,
WsConnEvents.INCOMING_MESSAGE,
WsConnStates.HANDSHAKE_RECEIVED,
self._sm_pre_handshake_message)
self._sm.add_transition(WsConnStates.HANDSHAKE_RECEIVED,
WsConnEvents.SUCCESSFUL_HANDSHAKE,
WsConnStates.OPEN,
self._sm_successful_handshake)
self._sm.add_transition(WsConnStates.HANDSHAKE_RECEIVED,
WsConnEvents.ERROR,
WsConnStates.WAITING,
self._sm_hs_failed)
self._sm.add_transition(WsConnStates.HANDSHAKE_RECEIVED,
WsConnEvents.POLL,
WsConnStates.HANDSHAKE_RECEIVED,
self._sm_handshake_poll)
self._sm.add_transition(WsConnStates.HANDSHAKE_RECEIVED,
WsConnEvents.CONNECT_TIMEOUT,
WsConnStates.HANDSHAKE_RECEIVED,
self._sm_handshake_connect)
self._sm.add_transition(WsConnStates.HANDSHAKE_RECEIVED,
WsConnEvents.CLOSE,
WsConnStates.DONE,
self._sm_hs_close)
self._sm.add_transition(WsConnStates.OPEN,
WsConnEvents.CLOSE,
WsConnStates.DONE,
self._sm_close_open)
self._sm.add_transition(WsConnStates.OPEN,
WsConnEvents.POLL,
WsConnStates.OPEN,
self._sm_open_poll)
self._sm.add_transition(WsConnStates.OPEN,
WsConnEvents.INCOMING_MESSAGE,
WsConnStates.OPEN,
self._sm_open_incoming_message)
self._sm.add_transition(WsConnStates.OPEN,
WsConnEvents.ERROR,
WsConnStates.WAITING,
self._sm_open_error)
self._sm.add_transition(WsConnStates.DONE,
WsConnEvents.POLL,
WsConnStates.DONE,
None)
self._sm.add_transition(WsConnStates.DONE,
WsConnEvents.CONNECT_TIMEOUT,
WsConnStates.DONE,
None)
self._sm.add_transition(WsConnStates.DONE,
WsConnEvents.ERROR,
WsConnStates.DONE,
None)
self._sm.add_transition(WsConnStates.DONE,
WsConnEvents.INCOMING_MESSAGE,
WsConnStates.DONE,
None)
# This can happen if close is called by the agent after the handshake
# has been determine to be successful but before the the event comes in
self._sm.add_transition(WsConnStates.DONE,
WsConnEvents.SUCCESSFUL_HANDSHAKE,
WsConnStates.DONE,
None)
self._sm.add_transition(WsConnStates.DONE,
WsConnEvents.CONNECTING_FINISHED,
WsConnStates.DONE,
self._sm_connection_finished_right_after_done)
```
#### File: dcm/agent/exceptions.py
```python
class AgentBaseException(Exception):
pass
class AgentNotImplementedException(AgentBaseException):
def __init__(self, func_name):
message = "The function %s must be implemented." % (func_name)
super(AgentNotImplementedException, self).__init__(message)
class AgentOptionException(AgentBaseException):
pass
class AgentExtrasNotInstalledException(AgentBaseException):
def __init__(self, exmsg):
message = "The package install failed with: %s" % exmsg
super(AgentExtrasNotInstalledException, self).__init__(message)
class AgentPageNotFoundException(AgentBaseException):
def __init__(self, page_token):
message = ("The page set with token %(page_token)s was not found."
% locals())
super(AgentPageNotFoundException, self).__init__(message)
class AgentOptionTypeException(AgentOptionException):
def __init__(self, name, expected_type, given_value):
message = ("The config option %(name)s had the value "
"%(given_value)s could not be converted "
"to %(expected_type)s" % locals())
super(AgentOptionTypeException, self).__init__(message)
class AgentOptionSectionNotFoundException(AgentOptionException):
def __init__(self, name):
message = ("The section %(name)s is required and was not "
"found" % locals())
super(AgentOptionSectionNotFoundException, self).__init__(message)
class AgentOptionValueException(AgentOptionException):
def __init__(self, name, given_value, expected_values):
message = ("The config option %(name)s must have one of the "
"values %(expected_values)s not %(given_value)s" % locals())
super(AgentOptionValueException, self).__init__(message)
class AgentOptionValueNotSetException(AgentOptionException):
def __init__(self, name, msg=None):
message = ("The config option %(name)s must be set." % locals())
if msg:
message = message + " " + msg
super(AgentOptionValueNotSetException, self).__init__(message)
class AgentOptionValueAlreadySetException(AgentOptionException):
def __init__(self, opt_name, msg=None):
message = ("%(opt_name)s has already been used." % locals())
if msg:
message = message + " " + msg
super(AgentOptionValueAlreadySetException, self).__init__(message)
class AgentOptionPathNotFoundException(AgentOptionException):
def __init__(self, name, path):
message = ("The config option %(name)s points to an invalid path: "
"%(path)s " % locals())
super(AgentOptionPathNotFoundException, self).__init__(message)
class AgentOptionRangeException(AgentOptionException):
def __init__(self, name, given_value, minv, maxv):
message = ("The config option %(name)s must be between %(minv) "
"and %(maxv)s not %(given_value)" % locals())
super(AgentOptionValueException, self).__init__(message)
class AgentConnectionException(Exception):
def __init__(self, error_code, error_msg):
message = ("The connection to DCM has failed is an unrecoverable way. "
"Error code: %(error_code)s Error Message %(error_msg)s"
% locals())
super(AgentConnectionException, self).__init__(error_msg)
class AgentHandshakeUnknownTypeException(AgentBaseException):
pass
class StateMachineException(AgentBaseException):
pass
class DoNotChangeStateException(StateMachineException):
pass
class IllegalStateTransitionException(StateMachineException):
msg = "The event %(event)s is not valid when in state %(state)s"
def __init__(self, event, state):
super(IllegalStateTransitionException, self).__init__(
self.msg % {"event": event, "state": state})
class AssertionFailure(AgentBaseException):
pass
class MessagingException(AgentBaseException):
pass
class RequesterMessagingException(MessagingException):
pass
class MalformedMessageException(MessagingException):
pass
class MissingMessageParameterException(MalformedMessageException):
msg = "The message requires the attribute %(missing_name)s but " \
"it was not found."
def __init__(self, missing_name):
super(MissingMessageParameterException, self).__init__(
self.msg % {'missing_name': missing_name})
class InvalidMessageParameterValueException(MalformedMessageException):
msg = "The attribute %(attr_name)s is set to the illegal value " \
"%(attr_value)s."
def __init__(self, attr_name, attr_value):
super(InvalidMessageParameterValueException, self).__init__(
self.msg % {'attr_name': attr_name,
'attr_value': attr_value})
class AgentHandshakeException(Exception):
def __init__(self, handshake_doc, extra_msg=None):
if handshake_doc:
msg = "The handshake failed with code %s."\
% handshake_doc.reply_type
else:
msg = "Handshake Error."
if extra_msg:
msg = msg + " " + extra_msg
super(AgentHandshakeException, self).__init__(msg)
class PerminateConnectionException(MessagingException):
msg = "This connection has perminately failed. This should almost " \
"never happen. %(details)s."
def __init__(self, details):
super(PerminateConnectionException, self).__init__(self.msg % locals())
class AgentRuntimeException(AgentBaseException):
pass
class AgentFilePermissionsException(AgentBaseException):
pass
class AgentConnectionDriverException(AgentBaseException):
pass
class AgentExecutableException(AgentBaseException):
msg = "The external process run with %(command_line)s returned an " \
"error. rc=%(rc)s stderr=%(stderr)s stdout=%(stdout)s"
def __init__(self, command_line, rc, stdout, stderr):
super(AgentExecutableException, self).__init__(self.msg % locals())
class AgentUnsupportedCloudFeature(AgentBaseException):
pass
class PersistenceException(AgentBaseException):
pass
class AgentPlatformNotDetectedException(AgentBaseException):
def __init__(self):
message = ("The platform was not detected")
super(AgentPlatformNotDetectedException, self).__init__(message)
```
#### File: dcm/agent/handshake.py
```python
import logging
import os
import random
import string
import uuid
import dcm.agent
import dcm.agent.exceptions as exceptions
import dcm.agent.plugins.loader as plugin_loader
_g_logger = logging.getLogger(__name__)
def get_plugin_handshake_descriptor(conf):
items = plugin_loader.get_all_plugins(conf)
command_name_list = [i for i in items]
return command_name_list
class HandshakeIncomingReply:
DEFAULT_FORCE_BACKOFF = 60.0
REPLY_CODE_SUCCESS = 200
REPLY_CODE_BAD_TOKEN = 409
REPLY_CODE_UNAUTHORIZED = 401
REPLY_CODE_FORCE_BACKOFF = 503
REPLY_KEY_FORCE_BACKOFF = "FORCE_BACKOFF"
def __init__(self, reply_type, force_backoff=None,
agent_id=None, cloud_id=None, customer_id=None,
region_id=None, zone_id=None, server_id=None,
server_name=None, mount_point=None, pk=None,
dcm_version=None, cloud_delegate=None):
self.reply_type = reply_type
self.force_backoff = force_backoff
self.agent_id = agent_id
self.cloud_id = cloud_id
self.customer_id = customer_id
self.region_id = region_id
self.zone_id = zone_id
self.server_id = server_id
self.server_name = server_name
self.mount_point = mount_point
self.pk = pk
self.dcm_version = dcm_version
self.cloud_delegate = cloud_delegate
class HandshakeManager(object):
def __init__(self, conf, db):
self.agent_id = None
self.conf = conf
self._db = db
self._token_file_path = self.validate_token_file()
self._token = None
self._incoming_handshake_payload = None
self._hs_doc = None
if os.path.exists(self._token_file_path):
try:
with open(self._token_file_path, "r") as fptr:
self._token = fptr.readline().strip()
except BaseException:
_g_logger.exception("Failed to read the token file %s"
% self._token_file_path)
if self._token is None:
self._generate_token()
_g_logger.debug("TOKEN IS " + self._token)
if 'FOR_TEST_AGENT_ID_ENV' in os.environ:
self.agent_id = os.environ['FOR_TEST_AGENT_ID_ENV']
def validate_token_file(self):
token_dir = self.conf.get_secure_dir()
# At some point we should validate that only this user can read this
# file
# utils.validate_file_permissions(
# token_dir, username=self.conf.system_user, permissions=0700)
#
token_file_path = os.path.join(token_dir, "token")
return token_file_path
def _generate_token(self):
with os.fdopen(os.open(self._token_file_path,
os.O_WRONLY | os.O_CREAT,
int("0600", 8)), "w") as fptr:
l = 30 + random.randint(0, 29)
self._token = ''.join(random.choice(string.ascii_letters +
string.digits +
"-_!@#^(),.=+")
for _ in range(l)) + str(uuid.uuid4())
fptr.write(self._token)
def incoming_document(self, incoming_doc):
if incoming_doc['return_code'] == HandshakeIncomingReply.REPLY_CODE_SUCCESS:
# this means that everything worked out well and we can move on
payload = incoming_doc["handshake"]
self._incoming_handshake_payload = payload
self.agent_id = payload.get('agentID')
customer_id = payload.get('customerId')
# this next line should be a noop all but the first time
self._db.check_agent_id(self.agent_id)
self.conf.agent_id = self.agent_id
self.conf.customer_id = customer_id
hs = HandshakeIncomingReply(
reply_type=HandshakeIncomingReply.REPLY_CODE_SUCCESS,
mount_point=payload.get('mountPoint'),
pk=payload.get('pk'),
dcm_version=payload.get('version'),
cloud_delegate=payload.get('cloudDelegate'),
agent_id=self.agent_id,
cloud_id=payload.get('cloudId'),
customer_id=customer_id,
region_id=payload.get('regionId'),
zone_id=payload.get('zoneId'),
server_id=payload.get('serverId'),
server_name=payload.get('serverName'))
elif incoming_doc['return_code'] ==\
HandshakeIncomingReply.REPLY_CODE_BAD_TOKEN:
# This signals that we used a bad token but have the chance to
# recover by trying a new one
_g_logger.warn("A stale token was used. The agent is generating a new token.")
self._generate_token()
hs = HandshakeIncomingReply(
HandshakeIncomingReply.REPLY_CODE_BAD_TOKEN)
elif incoming_doc['return_code'] ==\
HandshakeIncomingReply.REPLY_CODE_UNAUTHORIZED:
# unauthorized, like anything else can be transient. Sometimes
# dcm is just not ready for the agent when it comes up
hs = HandshakeIncomingReply(
HandshakeIncomingReply.REPLY_CODE_UNAUTHORIZED)
elif incoming_doc['return_code'] ==\
HandshakeIncomingReply.REPLY_CODE_FORCE_BACKOFF:
try:
backoff = incoming_doc[HandshakeIncomingReply.REPLY_KEY_FORCE_BACKOFF]
except KeyError:
backoff = HandshakeIncomingReply.DEFAULT_FORCE_BACKOFF
hs = HandshakeIncomingReply(
HandshakeIncomingReply.REPLY_CODE_FORCE_BACKOFF,
force_backoff=backoff)
else:
raise exceptions.AgentHandshakeUnknownTypeException(
"Unknown exception type")
self._hs_doc = hs
return hs
def get_send_document(self):
plugin_dict = plugin_loader.get_all_plugins(self.conf)
features = self.conf.features.copy()
for plugin_name in plugin_dict:
p_feature = plugin_loader.get_module_features(
self.conf, plugin_name, plugin_dict[plugin_name])
features.update(p_feature)
features['plugins'] = get_plugin_handshake_descriptor(self.conf)
meta_data_object = self.conf.meta_data_object
ipv4s = meta_data_object.get_handshake_ip_address()
ipv6s = []
injected_id = meta_data_object.get_injected_id()
vm_instance = meta_data_object.get_instance_id()
handshake_doc = {
'ipv4': ipv4s,
'ipv6': ipv6s,
'agent_id': self.agent_id,
'token': self._token,
'vm_instance': vm_instance,
'injected_id': injected_id,
'version': dcm.agent.g_version,
'protocol_version': dcm.agent.g_protocol_version,
'platform': self.conf.platform_name,
'platform_version': self.conf.platform_version,
'features': features
}
return handshake_doc
```
#### File: dcm/agent/logger.py
```python
import functools
import glob
import logging
from logging.handlers import RotatingFileHandler
import os
import urllib.parse
import urllib.error
import urllib.request
import pwd
import grp
from dcm.agent.events.globals import global_space as dcm_events
def send_log_to_dcm_callback(conn=None, token=None, message=None, level=None):
max_size = 10*1024
if len(message) > max_size:
message = message[:max_size]
message = urllib.parse.quote(message)
msg = {
"type": "LOG",
"token": token,
"level": level,
"message": message
}
conn.send(msg)
class dcmLogger(logging.Handler):
def __init__(self, encoding=None):
super(dcmLogger, self).__init__()
self._conn = None
self._conf = None
self._unsent_msgs = []
def emit(self, record):
msg = self.format(record)
if self._conn is None:
self._unsent_msgs.append(msg)
else:
dcm_events.register_callback(
send_log_to_dcm_callback, kwargs={"conn": self._conn,
"token": "",
"message": msg,
"level": record.levelname})
def set_conn(self, conf, conn):
self._conn = conn
self._conf = conf
if conn is None:
return
for msg in self._unsent_msgs:
dcm_events.register_callback(
send_log_to_dcm_callback, kwargs={"conn": self._conn,
"message": msg})
self._unsent_msgs = []
def set_dcm_connection(conf, conn):
for key in logging.Logger.manager.loggerDict:
logger = logging.Logger.manager.loggerDict[key]
if type(logger) == logging.Logger:
for h in logger.handlers:
if type(h) == dcmLogger:
h.set_conn(conf, conn)
def clear_dcm_logging():
# effectively just for tests
for key in logging.Logger.manager.loggerDict:
logger = logging.Logger.manager.loggerDict[key]
if type(logger) == logging.Logger:
for h in logger.handlers:
if type(h) == dcmLogger:
h.set_conn(None, None)
def delete_logs():
# effectively just for tests
for key in logging.Logger.manager.loggerDict:
logger = logging.Logger.manager.loggerDict[key]
if type(logger) == logging.Logger:
for h in logger.handlers:
if isinstance(h, DCMAgentLogger):
h.clear_logs()
class DCMAgentLogger(RotatingFileHandler):
def __init__(self, filename, owner=None, mode='a', maxBytes=0,
backupCount=0, encoding=None, delay=False):
self._uid = pwd.getpwnam(owner).pw_uid
self._gid = grp.getgrnam(owner).gr_gid
super(DCMAgentLogger, self).__init__(
filename, mode=mode, maxBytes=maxBytes, backupCount=backupCount,
encoding=encoding, delay=delay)
self.log_perms()
def _open(self):
s = super(DCMAgentLogger, self)._open()
self.log_perms()
return s
def log_perms(self):
for l in glob.glob("%s*" % os.path.abspath(self.baseFilename)):
try:
os.chown(l, self._uid, self._gid)
except Exception:
logging.exception("We could not set the log file ownership.")
def clear_logs(self):
with open(self.baseFilename, "w"):
pass
for l in glob.glob("%s.*" % self.baseFilename):
try:
os.remove(l)
except:
logging.exception("Failed to remove a rotated file.")
# Events to log to DCM
def log_to_dcm_console(level, base_message, msg=None, **kwargs):
if not kwargs:
out_message = base_message
else:
out_message = base_message % kwargs
if msg:
out_message = out_message + " : " + msg
l_logger = logging.getLogger("dcm.agent.log.to.agent.manager")
l_logger.log(level, out_message)
log_to_dcm_console_agent_started_log = functools.partial(
log_to_dcm_console,
logging.CRITICAL,
"The agent has started. Version %(version)s.")
log_to_dcm_console_successful_first_handshake = functools.partial(
log_to_dcm_console,
logging.CRITICAL,
"The agent has connected.")
log_to_dcm_console_critical_error = functools.partial(
log_to_dcm_console,
logging.CRITICAL,
"The agent experienced a critical error.")
log_to_dcm_console_overloaded = functools.partial(
log_to_dcm_console,
logging.CRITICAL,
"The agent is overloaded.")
log_to_dcm_console_shutting_down = functools.partial(
log_to_dcm_console,
logging.CRITICAL,
"The agent is shutting down.")
log_to_dcm_console_job_failed = functools.partial(
log_to_dcm_console,
logging.ERROR,
"The job %(job_name)s failed with request_id %(request_id)s.")
log_to_dcm_console_unknown_job = functools.partial(
log_to_dcm_console,
logging.ERROR,
"The job %(job_name)s is unknown.")
log_to_dcm_console_messaging_error = functools.partial(
log_to_dcm_console,
logging.ERROR,
"The agent experienced a problem when communicating with DCM.")
log_to_dcm_console_unknown_job_parameter = functools.partial(
log_to_dcm_console,
logging.WARN,
"The job %(job_name)s received the unknown parameter %(parameter_name)s. The parameter will be ignored.")
log_to_dcm_console_successful_reconnect = functools.partial(
log_to_dcm_console,
logging.INFO,
"The agent successfully reconnected.")
log_to_dcm_console_job_succeeded = functools.partial(
log_to_dcm_console,
logging.INFO,
"The job %(job_name)s successfully completed with request_id %(request_id)s.")
log_to_dcm_console_job_started = functools.partial(
log_to_dcm_console,
logging.INFO,
"The job %(job_name)s has started with request_id %(request_id)s.")
log_to_dcm_console_job_details = functools.partial(
log_to_dcm_console,
logging.DEBUG,
"Details from %(job_name)s : %(details)s.")
log_to_dcm_console_incoming_message = functools.partial(
log_to_dcm_console,
logging.DEBUG,
"An incoming message for the command %(job_name)s.")
log_to_dcm_console_configuration_management_error = functools.partial(
log_to_dcm_console,
logging.WARN,
"Configuration management reported the errors: %(stderr)s.")
log_to_dcm_console_configuration_management_output = functools.partial(
log_to_dcm_console,
logging.INFO,
"Configuration management reported: %(stdout)s.")
```
#### File: agent/messaging/alert_msg.py
```python
import hashlib
import logging
import threading
import dcm.agent.utils as utils
import dcm.agent.events.state_machine as state_machine
from dcm.agent.events.globals import global_space as dcm_events
_g_logger = logging.getLogger(__name__)
class States:
NEW = "NEW"
WAITING_FOR_ACK = "WAITING_FOR_ACK"
COMPLETE = "COMPLETE"
class Events:
TIMEOUT = "TIMEOUT"
SEND = "SEND"
ACK_RECEIVED = "ACK_RECEIVED"
STOP = "STOP"
class AlertAckMsg(object):
def __init__(self, doc, conn, timeout=5.0):
self._timeout = timeout
self.doc = doc
self._sm = state_machine.StateMachine(States.NEW)
self.setup_states()
self._timer = None
self._lock = threading.RLock()
self._conn = conn
h = hashlib.sha256()
h.update(str(doc['alert_timestamp']).encode())
h.update(doc['subject'].encode())
h.update(doc['message'].encode())
self.alert_hash = h.hexdigest()
self.doc['alert_hash'] = self.alert_hash
@utils.class_method_sync
def incoming_message(self):
_g_logger.debug("ENTERING INCOMING MESSAGE")
self._sm.event_occurred(Events.ACK_RECEIVED)
_g_logger.debug("EXITING INCOMING MESSAGE")
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
@utils.class_method_sync
def timeout(self):
self._sm.event_occurred(Events.TIMEOUT)
@utils.class_method_sync
def send(self):
self._sm.event_occurred(Events.SEND)
@utils.class_method_sync
def stop(self):
self._sm.event_occurred(Events.STOP)
def _send_timeout(self):
self._timer = dcm_events.register_callback(
self.timeout, delay=self._timeout)
_g_logger.debug("Sending the alert message " + str(self.doc))
self._conn.send(self.doc)
def _sm_send_message(self):
self._send_timeout()
def _sm_ack_received(self):
# the timer must be active
_g_logger.debug("ENTERING SM_ACK_RECEIVED")
dcm_events.cancel_callback(self._timer)
self._timer = None
_g_logger.debug("EXITING SM_ACK_RECEIVED")
def _sm_resend_message(self):
self._send_timeout()
def _sm_stopping_early(self):
dcm_events.cancel_callback(self._timer)
self._timer = None
def setup_states(self):
self._sm.add_transition(States.NEW,
Events.SEND,
States.WAITING_FOR_ACK,
self._sm_send_message)
self._sm.add_transition(States.NEW,
Events.STOP,
States.COMPLETE,
None)
self._sm.add_transition(States.WAITING_FOR_ACK,
Events.TIMEOUT,
States.WAITING_FOR_ACK,
self._sm_resend_message)
self._sm.add_transition(States.WAITING_FOR_ACK,
Events.ACK_RECEIVED,
States.COMPLETE,
self._sm_ack_received)
self._sm.add_transition(States.WAITING_FOR_ACK,
Events.STOP,
States.COMPLETE,
self._sm_stopping_early)
self._sm.add_transition(States.COMPLETE,
Events.ACK_RECEIVED,
States.COMPLETE,
None)
self._sm.add_transition(States.COMPLETE,
Events.TIMEOUT,
States.COMPLETE,
None)
self._sm.add_transition(States.COMPLETE,
Events.STOP,
States.COMPLETE,
None)
```
#### File: agent/messaging/reply.py
```python
import logging
import os
import threading
import signal
import sys
import dcm.agent.exceptions as exceptions
import dcm.agent.logger as dcm_logger
import dcm.agent.messaging.states as states
import dcm.agent.messaging.types as message_types
import dcm.agent.messaging.utils as utils
import dcm.agent.events.state_machine as state_machine
import dcm.agent.utils as agent_util
import dcm.eventlog.tracer as tracer
from dcm.agent.events.globals import global_space as dcm_events
_g_logger = logging.getLogger(__name__)
class ReplyRPC(object):
MISSING_VALUE_STRING = "DEADBEEF"
def __init__(self,
reply_listener,
agent_id,
connection,
request_id,
request_document,
db,
timeout=1.0,
reply_doc=None,
start_state=states.ReplyStates.REQUESTING):
self._agent_id = agent_id
self._request_id = request_id
self._request_document = request_document
self._cancel_callback = None
self._cancel_callback_args = None
self._cancel_callback_kwargs = None
self._reply_message_timer = None
self._reply_listener = reply_listener
self._timeout = timeout
self._conn = connection
self._resend_reply_cnt = 0
self._resend_reply_cnt_threshold = 5
self._lock = threading.RLock()
self._response_doc = reply_doc
self._sm = state_machine.StateMachine(start_state)
self._setup_states()
self._db = db
def get_request_id(self):
return self._request_id
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
def get_message_payload(self):
return self._request_document["payload"]
def shutdown(self):
with tracer.RequestTracer(self._request_id):
try:
if self._reply_message_timer:
self._reply_message_timer.cancel()
self._reply_listener.message_done(self)
except Exception as ex:
_g_logger.warn("Error shutting down the request", ex)
def kill(self):
with tracer.RequestTracer(self._request_id):
if self._reply_message_timer:
try:
self._reply_message_timer.cancel()
except Exception as ex:
_g_logger.info("an exception occurred when trying to "
"cancel the timer: " + str(ex))
@agent_util.class_method_sync
def ack(self,
cancel_callback, cancel_callback_args, cancel_callback_kwargs):
"""
Indicate to the messaging system that you have successfully received
this message and stored it for processing.
"""
with tracer.RequestTracer(self._request_id):
self._cancel_callback = cancel_callback
self._cancel_callback_args = cancel_callback_args
if self._cancel_callback_args is None:
self._cancel_callback_args = []
self._cancel_callback_args.insert(0, self)
self._cancel_callback_kwargs = cancel_callback_kwargs
self._sm.event_occurred(states.ReplyEvents.USER_ACCEPTS_REQUEST,
message={})
@agent_util.class_method_sync
def nak(self, response_document):
"""
This function is called to out right reject the message. The user
is signifying that this message will not be processed at all.
A call to this function signifies that this object will no longer be
referenced by the user.
"""
with tracer.RequestTracer(self._request_id):
self._sm.event_occurred(states.ReplyEvents.USER_REJECTS_REQUEST,
message=response_document)
@agent_util.class_method_sync
def reply(self, response_document):
"""
Send a reply to this request. This signifies that the user is
done with this object.
"""
with tracer.RequestTracer(self._request_id):
_g_logger.debug("reply() has been called")
self._sm.event_occurred(states.ReplyEvents.USER_REPLIES,
message=response_document)
@agent_util.class_method_sync
def reply_timeout(self, message_timer):
with tracer.RequestTracer(self._request_id):
_g_logger.debug("reply timeout occurred, resending.")
self._sm.event_occurred(states.RequesterEvents.TIMEOUT,
message_timer=message_timer)
@agent_util.class_method_sync
def incoming_message(self, json_doc):
with tracer.RequestTracer(self._request_id):
type_to_event = {
message_types.MessageTypes.ACK:
states.ReplyEvents.REPLY_ACK_RECEIVED,
message_types.MessageTypes.NACK:
states.ReplyEvents.REPLY_NACK_RECEIVED,
message_types.MessageTypes.CANCEL:
states.ReplyEvents.CANCEL_RECEIVED,
message_types.MessageTypes.STATUS:
states.ReplyEvents.STATUS_RECEIVED,
message_types.MessageTypes.REQUEST:
states.ReplyEvents.REQUEST_RECEIVED
}
if 'type' not in json_doc:
raise exceptions.MissingMessageParameterException('type')
if json_doc['type'] not in type_to_event:
raise exceptions.InvalidMessageParameterValueException(
'type', json_doc['type'])
# this next call drives the state machine
self._sm.event_occurred(type_to_event[json_doc['type']],
message=json_doc)
def _send_reply_message(self, message_timer):
self._reply_message_timer = message_timer
message_timer.send(self._conn)
###################################################################
# state machine event handlers
# ever method that starts with _sm_ is called under the same lock.
###################################################################
def _sm_initial_request_received(self, **kwargs):
"""
This is the initial request, we simply set this to the requesting
state.
"""
pass
def _sm_requesting_retransmission_received(self, **kwargs):
"""
After receiving an initial request we receive a retransmission of it.
The user has not yet acked the message but they have been notified
that the message exists. In this case we do nothing but wait for
the user to ack the message
"""
pass
def _sm_requesting_cancel_received(self, **kwargs):
"""
A cancel message flows over the wire after the request is received
but before it is acknowledged. Here we will tell the user about the
cancel. It is important that the cancel notification comes after
the message received notification.
"""
dcm_events.register_callback(
self._cancel_callback,
args=self._cancel_callback_args,
kwargs=self._cancel_callback_kwargs)
def _sm_requesting_user_accepts(self, **kwargs):
"""
The user decided to accept the message. Here we will send the ack
"""
self._db.new_record(self._request_id,
self._request_document,
None,
states.ReplyStates.ACKED,
self._agent_id)
ack_doc = {'type': message_types.MessageTypes.ACK,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'entity': "user_accepts",
'agent_id': self._agent_id}
self._conn.send(ack_doc)
def _sm_requesting_user_replies(self, **kwargs):
"""
The user decides to reply before acknowledging the message. Therefore
we just send the reply and it acts as the ack and the reply
"""
self._response_doc = kwargs['message']
self._db.update_record(self._request_id,
states.ReplyStates.REPLY,
reply_doc=self._response_doc)
reply_doc = {'type': message_types.MessageTypes.REPLY,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'payload': self._response_doc,
'entity': "user_replies",
'agent_id': self._agent_id}
message_timer = utils.MessageTimer(self._timeout,
self.reply_timeout,
reply_doc)
self._send_reply_message(message_timer)
def _sm_requesting_user_rejects(self, **kwargs):
"""
The user decides to reject the incoming request so we must send
a nack to the remote side.
"""
self._db.new_record(self._request_id,
self._request_document,
None,
states.ReplyStates.ACKED,
self._agent_id)
nack_doc = {'type': message_types.MessageTypes.NACK,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'entity': "user_rejects",
'error_message': "The agent rejected the request.",
'agent_id': self._agent_id}
self._conn.send(nack_doc)
def _sm_acked_request_received(self, **kwargs):
"""
In this case a retransmission of the request comes in after the user
acknowledged the message. Here we resend the ack.
"""
# reply using the latest message id
ack_doc = {'type': message_types.MessageTypes.ACK,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'entity': "request_received",
'agent_id': self._agent_id}
self._conn.send(ack_doc)
def _sm_acked_cancel_received(self, **kwargs):
"""
A cancel is received from the remote end. We simply notify the user
of the request and allow the user to act upon it.
"""
dcm_events.register_callback(
self._cancel_callback,
args=self._cancel_callback_args,
kwargs=self._cancel_callback_kwargs)
def _sm_acked_reply(self, **kwargs):
"""
This is the standard case. A user has accepted the message and is
now replying to it. We send the reply.
"""
self._response_doc = kwargs['message']
self._db.update_record(self._request_id,
states.ReplyStates.REPLY,
reply_doc=self._response_doc)
reply_doc = {'type': message_types.MessageTypes.REPLY,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'payload': self._response_doc,
'entity': "acked_reply",
'agent_id': self._agent_id}
message_timer = utils.MessageTimer(self._timeout,
self.reply_timeout,
reply_doc)
self._send_reply_message(message_timer)
def _sm_acked_re_reply(self, **kwargs):
self._db.update_record(self._request_id,
states.ReplyStates.REPLY,
reply_doc=self._response_doc)
reply_doc = {'type': message_types.MessageTypes.REPLY,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'payload': self._response_doc,
'entity': "acked_reply",
'agent_id': self._agent_id}
message_timer = utils.MessageTimer(self._timeout,
self.reply_timeout,
reply_doc)
self._send_reply_message(message_timer)
def _sm_reply_request_retrans(self, **kwargs):
"""
After replying to a message we receive a retransmission of the
original request. This can happen if the remote end never receives
an ack and the reply message is either lost or delayed. Here we
retransmit the reply.
"""
reply_doc = {'type': message_types.MessageTypes.REPLY,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'payload': self._response_doc,
'entity': "request_retrans",
'agent_id': self._agent_id}
message_timer = utils.MessageTimer(self._timeout,
self.reply_timeout,
reply_doc)
self._send_reply_message(message_timer)
def _sm_reply_cancel_received(self, **kwargs):
"""
This occurs when a cancel is received after a reply is sent. It can
happen if the remote end sends a cancel before the reply is received.
Because we have already finished with this request we simply ignore
this message.
"""
pass
def _sm_reply_ack_received(self, **kwargs):
"""
This is the standard case. A reply is sent and the ack to that
reply is received. At this point we know that the RPC was
successful.
"""
self._db.update_record(self._request_id,
states.ReplyStates.REPLY_ACKED)
self._reply_message_timer.cancel()
self._reply_message_timer = None
self._reply_listener.message_done(self)
_g_logger.debug("Messaging complete. State event transition: "
+ str(self._sm.get_event_list()))
def _sm_reply_nack_received(self, **kwargs):
"""
The reply was nacked. This is probably a result of the a
retransmission that was not needed.
"""
self._db.update_record(self._request_id,
states.ReplyStates.REPLY_NACKED)
self._reply_message_timer.cancel()
self._reply_message_timer = None
self._reply_listener.message_done(self)
_g_logger.debug("Reply NACKed, messaging complete. State event "
"transition: " + str(self._sm.get_event_list()))
def _sm_reply_ack_timeout(self, **kwargs):
"""
This happens when after a given amount of time an ack has still not
been received. We thus must re-send the reply.
"""
message_timer = kwargs['message_timer']
# The time out did occur before the message could be acked so we must
# resend it
_g_logger.info("Resending reply")
self._resend_reply_cnt += 1
if self._resend_reply_cnt > self._resend_reply_cnt_threshold:
# TODO punt at some point ?
pass
self._send_reply_message(message_timer)
def _sm_nacked_request_received(self, **kwargs):
"""
This happens when a request is received after it has been nacked.
This will occur if the first nack is lost or delayed. We retransmit
the nack
"""
nack_doc = {'type': message_types.MessageTypes.NACK,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'entity': "request_received",
'error_message': "The agent already rejected this request",
'agent_id': self._agent_id}
self._conn.send(nack_doc)
def _sm_cancel_waiting_ack(self, **kwargs):
"""
If a cancel is received while in the requesting state we must make sure
that the user does not get the cancel callback until after they have
acked the message. This handler occurs when the user calls ack()
after a cancel has arrived. Here we just register a cancel callback
and let the user react to it how they will.
"""
dcm_events.register_user_callback(
self._cancel_callback,
args=self._cancel_callback_args,
kwargs=self._cancel_callback_kwargs)
def _sm_send_status(self):
status_doc = {'type': message_types.MessageTypes.STATUS,
'message_id': utils.new_message_id(),
'request_id': self._request_id,
'entity': "status send",
'agent_id': self._agent_id,
'state': self._sm._current_state,
'reply': self._response_doc}
self._conn.send(status_doc)
def _sm_reinflated_reply_ack(self):
_g_logger.warn("The agent manager sent a message for this request "
"after it was in the REPLY_ACK state")
def _sm_reinflated_reply_nack(self):
_g_logger.warn("The agent manager sent a message for this request "
"after it was in the REPLY_NACK state")
def _reinflate_done(self):
if self._reply_message_timer:
self._reply_message_timer.cancel()
self._reply_message_timer = None
self._reply_listener.message_done(self)
def _sm_reply_ack_re_acked(self, message=None):
"""
This is called when a re-inflated state had already been reply acked,
and is now acked again. We just take it out of memory.
"""
self._reinflate_done()
def _sm_reply_ack_now_nacked(self, message=None):
"""
This is called whenever a re-inflated command reaches a terminal state
that was
"""
self._reinflate_done()
def _sm_reply_nack_re_nacked(self, message=None):
"""
This is called when a re-inflated state had already been reply nacked,
and is now nacked again. We just take it out of memory.
"""
self._reinflate_done()
def _sm_reply_nack_now_acked(self, message=None):
"""
This is called whenever a re-inflated command reaches acked state but
it was previously nacked
"""
self._reinflate_done()
def _sm_ack_reply_nack_received(self, message=None):
_g_logger.warn("A NACK was received when in the ACK state "
+ str(message))
# this will be cleaned up when the command replies, which it is
# required to do
def _sm_replied_nacked_reply(self, message=None):
"""
This is called when a request was received but the ACK for that
request received a NACK. However the command finished running
and a reply was sent back. Here we cancel the message and log the
event
"""
_g_logger.warn("A command that was already finished ended "
+ str(message))
self.shutdown()
def _setup_states(self):
self._sm.add_transition(states.ReplyStates.NEW,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.REQUESTING,
self._sm_initial_request_received)
self._sm.add_transition(states.ReplyStates.REQUESTING,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.REQUESTING,
self._sm_requesting_retransmission_received)
self._sm.add_transition(states.ReplyStates.REQUESTING,
states.ReplyEvents.CANCEL_RECEIVED,
states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
self._sm_requesting_cancel_received)
self._sm.add_transition(states.ReplyStates.REQUESTING,
states.ReplyEvents.USER_ACCEPTS_REQUEST,
states.ReplyStates.ACKED,
self._sm_requesting_user_accepts)
self._sm.add_transition(states.ReplyStates.REQUESTING,
states.ReplyEvents.USER_REPLIES,
states.ReplyStates.REPLY,
self._sm_requesting_user_replies)
self._sm.add_transition(states.ReplyStates.REQUESTING,
states.ReplyEvents.USER_REJECTS_REQUEST,
states.ReplyStates.NACKED,
self._sm_requesting_user_rejects)
self._sm.add_transition(states.ReplyStates.REQUESTING,
states.ReplyEvents.STATUS_RECEIVED,
states.ReplyStates.REQUESTING,
self._sm_send_status)
self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
self._sm_requesting_retransmission_received)
self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
states.ReplyEvents.CANCEL_RECEIVED,
states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
None)
self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
states.ReplyEvents.USER_ACCEPTS_REQUEST,
states.ReplyStates.ACKED,
self._sm_cancel_waiting_ack)
self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
states.ReplyEvents.USER_REPLIES,
states.ReplyStates.REPLY,
self._sm_requesting_user_replies)
self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
states.ReplyEvents.USER_REJECTS_REQUEST,
states.ReplyStates.NACKED,
self._sm_requesting_user_rejects)
self._sm.add_transition(states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
states.ReplyEvents.STATUS_RECEIVED,
states.ReplyStates.CANCEL_RECEIVED_REQUESTING,
self._sm_send_status)
self._sm.add_transition(states.ReplyStates.ACKED,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.ACKED,
self._sm_acked_request_received)
self._sm.add_transition(states.ReplyStates.ACKED,
states.ReplyEvents.CANCEL_RECEIVED,
states.ReplyStates.ACKED,
self._sm_acked_cancel_received)
self._sm.add_transition(states.ReplyStates.ACKED,
states.ReplyEvents.USER_REPLIES,
states.ReplyStates.REPLY,
self._sm_acked_reply)
self._sm.add_transition(states.ReplyStates.ACKED,
states.ReplyEvents.STATUS_RECEIVED,
states.ReplyStates.ACKED,
self._sm_send_status)
# if the AM receives and ACK but has never heard of the request ID
# it will send a nack. this should not happen in a normal course
# of events. At this point we should just kill the request and
# log a scary message. We also need to kill anything running for that
# that request
# This will happen when the agent manager quits on a request before
# the agent sends the ack. when the AM receives the ack it has already
# canceled the request and thus NACKs the ACK
self._sm.add_transition(states.ReplyStates.ACKED,
states.ReplyEvents.REPLY_NACK_RECEIVED,
states.ReplyStates.REPLY_NACKED,
self._sm_ack_reply_nack_received)
# note, eventually we will want to reply retrans logic to just punt
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.REPLY,
self._sm_reply_request_retrans)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.USER_REPLIES,
states.ReplyStates.REPLY,
self._sm_acked_reply)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.CANCEL_RECEIVED,
states.ReplyStates.REPLY,
self._sm_reply_cancel_received)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.REPLY_ACK_RECEIVED,
states.ReplyStates.REPLY_ACKED,
self._sm_reply_ack_received)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.TIMEOUT,
states.ReplyStates.REPLY,
self._sm_reply_ack_timeout)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.REPLY_NACK_RECEIVED,
states.ReplyStates.REPLY_NACKED,
self._sm_reply_nack_received)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.STATUS_RECEIVED,
states.ReplyStates.REPLY,
self._sm_send_status)
self._sm.add_transition(states.ReplyStates.REPLY,
states.ReplyEvents.DB_INFLATE,
states.ReplyStates.REPLY,
self._sm_acked_re_reply)
self._sm.add_transition(states.ReplyStates.NACKED,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.NACKED,
self._sm_nacked_request_received)
self._sm.add_transition(states.ReplyStates.NACKED,
states.ReplyEvents.STATUS_RECEIVED,
states.ReplyStates.NACKED,
self._sm_send_status)
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.REPLY_ACKED,
self._sm_reply_request_retrans)
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.REPLY_ACK_RECEIVED,
states.ReplyStates.REPLY_ACKED,
self._sm_reply_ack_re_acked)
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.REPLY_NACK_RECEIVED,
states.ReplyStates.REPLY_ACKED,
self._sm_reply_ack_now_nacked)
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.CANCEL_RECEIVED,
states.ReplyStates.REPLY_ACKED,
None)
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.STATUS_RECEIVED,
states.ReplyStates.REPLY_ACKED,
self._sm_send_status)
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.TIMEOUT,
states.ReplyStates.REPLY_ACKED,
None)
# this transition should only occur when the AM makes a mistake
# or messages are received out of order.
self._sm.add_transition(states.ReplyStates.REPLY_ACKED,
states.ReplyEvents.DB_INFLATE,
states.ReplyStates.REPLY_ACKED,
self._sm_reinflated_reply_ack)
self._sm.add_transition(states.ReplyStates.REPLY_NACKED,
states.ReplyEvents.REQUEST_RECEIVED,
states.ReplyStates.REPLY_NACKED,
self._sm_reply_request_retrans)
self._sm.add_transition(states.ReplyStates.REPLY_NACKED,
states.ReplyEvents.REPLY_ACK_RECEIVED,
states.ReplyStates.REPLY_NACKED,
self._sm_reply_nack_re_nacked)
self._sm.add_transition(states.ReplyStates.REPLY_NACKED,
states.ReplyEvents.REPLY_NACK_RECEIVED,
states.ReplyStates.REPLY_NACKED,
self._sm_reply_nack_now_acked)
self._sm.add_transition(states.ReplyStates.REPLY_NACKED,
states.ReplyEvents.CANCEL_RECEIVED,
states.ReplyStates.REPLY_NACKED,
None)
# this will happen when the plugin finishes and thus replies
# to a request that had its ACK NACKed. In this case we
# just cancel the messaging and log a message
self._sm.add_transition(states.ReplyStates.REPLY_NACKED,
states.ReplyEvents.USER_REPLIES,
states.ReplyStates.REPLY_NACKED,
self._sm_replied_nacked_reply)
# this next state should only occur when a message is out
# of order or the agent manager made a mistake
self._sm.add_transition(states.ReplyStates.REPLY_NACKED,
states.ReplyEvents.DB_INFLATE,
states.ReplyStates.REPLY_NACKED,
self._sm_reinflated_reply_ack)
class RequestListener(object):
def __init__(self, conf, sender_connection, dispatcher,
db, id_system=None):
self._conn = sender_connection
self._dispatcher = dispatcher
self._requests = {}
self._messages_processed = 0
self._reply_observers = []
self._timeout = conf.messaging_retransmission_timeout
self._shutdown = False
self._conf = conf
self._db = db
self._id_system = id_system
self._lock = threading.RLock()
self._db.starting_agent()
def get_reply_observers(self):
# get the whole list so that the user can add and remove themselves.
# This sort of thing should be done only with carefully writen code
# using carefully writen observers that do very light weight
# nonblocking operations
return self._reply_observers
def _call_reply_observers(self, func_name, argument):
for o in self._reply_observers:
try:
func = getattr(o, func_name)
func(argument)
except:
_g_logger.exception("A bad observer threw an exception.")
# dont let some crappy observer ruin everything
pass
def _process_doc(self, incoming_doc):
if incoming_doc is None:
return
with tracer.RequestTracer(incoming_doc['request_id']):
self._call_reply_observers("incoming_message", incoming_doc)
_g_logger.debug("New message type %s" % incoming_doc['type'])
# if the agent is misbehaving the AM might tell it to kill itself.
# cold.
if incoming_doc["type"] == message_types.MessageTypes.HEMLOCK:
_g_logger.error("HEMLOCK: DCM told me to kill myself.")
os.killpg(0, signal.SIGKILL)
sys.exit(10)
# if it is a alert message short circuit
if incoming_doc["type"] == message_types.MessageTypes.ALERT_ACK:
if self._id_system:
self._id_system.incoming_message(incoming_doc)
return
request_id = incoming_doc["request_id"]
# is this request already in memory?
if request_id in self._requests:
_g_logger.debug("The message was found in the requests.")
# send it through, state machine will deal with it
req = self._requests[request_id]
req.incoming_message(incoming_doc)
return
# if the request id has already been seen by the database
db_record = self._db.lookup_req(request_id)
if db_record:
_g_logger.info("Inflating the record from the DB."
+ request_id)
req = ReplyRPC(
self,
self._conf.agent_id,
self._conn,
request_id,
incoming_doc,
self._db,
timeout=self._timeout,
reply_doc=db_record.reply_doc,
start_state=db_record.state)
# this will probably be used in the near future so get it
# on the memory list
self._requests[request_id] = req
req.incoming_message(incoming_doc)
return
if incoming_doc["type"] == message_types.MessageTypes.REQUEST:
if len(list(self._requests.keys())) >=\
self._conf.messaging_max_at_once > -1:
# short circuit the case where the agent is too busy
dcm_logger.log_to_dcm_console_overloaded(
msg="The new request was rejected because the agent has too many outstanding requests.")
nack_doc = {
'type': message_types.MessageTypes.NACK,
'message_id': utils.new_message_id(),
'request_id': request_id,
'agent_id': self._conf.agent_id,
'error_message': ("The agent can only handle %d "
"commands at once"
% self._conf.messaging_max_at_once)}
self._conn.send(nack_doc)
return
_g_logger.debug("A new request has come in.")
req = ReplyRPC(
self,
self._conf.agent_id,
self._conn,
request_id,
incoming_doc,
self._db,
timeout=self._timeout)
self._call_reply_observers("new_message", req)
# only add the message if processing was successful
self._requests[request_id] = req
try:
self._dispatcher.incoming_request(req)
except Exception:
_g_logger.exception("The dispatcher could not handle a "
"message.")
del self._requests[request_id]
dcm_logger.log_to_dcm_console_messaging_error(
msg="The dispatcher could not handle the message.")
raise
else:
# if we have never heard of the ID and this is not a new
# request we return a courtesy error
_g_logger.debug("Unknown message ID sending a NACK")
nack_doc = {'type': message_types.MessageTypes.NACK,
'message_id': utils.new_message_id(),
'request_id': request_id,
'agent_id': self._conf.agent_id,
'error_message':
"%s is an unknown ID" % request_id}
self._conn.send(nack_doc)
def _validate_doc(self, incoming_doc):
pass
def _send_bad_message_reply(self, incoming_doc, message):
_g_logger.debug("Sending the bad message %s" % message)
# we want to send a NACK to the message however it may be an error
# because it was not formed with message_id or request_id. In this
# case we will send values in that place indicating that *a* message
# was bad. There will be almost no way for the sender to know which
# one
try:
request_id = incoming_doc['request_id']
except KeyError:
request_id = ReplyRPC.MISSING_VALUE_STRING
nack_doc = {'type': message_types.MessageTypes.NACK,
'message_id': utils.new_message_id(),
'request_id': request_id,
'error_message': message,
'agent_id': self._conf.agent_id}
self._conn.send(nack_doc)
def message_done(self, reply_message):
self._lock.acquire()
try:
request_id = reply_message.get_request_id()
del self._requests[request_id]
_g_logger.debug("The message %s has completed and is being "
"removed" % request_id)
self._messages_processed += 1
finally:
self._lock.release()
self._call_reply_observers("message_done", reply_message)
def get_messages_processed(self):
return self._messages_processed
def is_busy(self):
return len(self._requests) != 0
def shutdown(self):
"""
Stop accepting new requests but allow for outstanding messages to
complete.
"""
self._shutdown = True
for req in list(self._requests.values()):
req.kill()
def wait_for_all_nicely(self):
# XXX TODO how long should this block? do we need this?
# looks like just for tests
while self._requests:
dcm_events.poll()
def reply(self, request_id, reply_doc):
reply_req = self._requests[request_id]
reply_req.reply(reply_doc)
def incoming_parent_q_message(self, incoming_doc):
_g_logger.debug("Received message %s" % str(incoming_doc))
try:
self._validate_doc(incoming_doc)
self._process_doc(incoming_doc)
except Exception as ex:
_g_logger.exception(
"Error processing the message: %s" % str(incoming_doc))
self._send_bad_message_reply(incoming_doc, str(ex))
class ReplyObserverInterface(object):
@agent_util.not_implemented_decorator
def new_message(self, reply):
pass
@agent_util.not_implemented_decorator
def message_done(self, reply):
pass
@agent_util.not_implemented_decorator
def incoming_message(self, incoming_doc):
pass
```
#### File: plugins/api/base.py
```python
import logging
import os
import dcm.agent.logger as dcm_logger
import dcm.agent.plugins.api.exceptions as plugin_exceptions
import dcm.agent.plugins.api.utils as plugin_api
import dcm.agent.utils as agent_util
_g_logger = logging.getLogger(__name__)
class PluginReply(object):
"""An instance of this class must be returned from the run() method of
all extension plugins. It is serialized into a JSON object and sent
back to DCM as a reply to the associated command.
"""
def __init__(self, return_code, reply_type="void", reply_object=None,
message="", error_message=""):
"""
:param return_code: 0 for success, non-0 for failure
:param reply_type: A string which defines the reply_object layout. For
example "void"
:param reply_object: A module defined reply payload. The reply_type
argument is used to determine this values layout.
:param message: A string describing a successful action
:param error_message: A string describing any error that occurred while
processing this action.
"""
self._reply_doc = {
'return_code': return_code,
'reply_type': reply_type,
'reply_object': reply_object,
'message': message,
'error_message': error_message
}
def get_reply_doc(self):
return self._reply_doc
def get_message(self):
return self._reply_doc['message']
def set_message(self, msg):
self._reply_doc['message'] = msg
def get_return_code(self):
return self._reply_doc['return_code']
class _ArgHolder(object):
pass
class Plugin(object):
"""This is the base class that should be used for all plugins. It handles
the processing needed to validate and parse the protocol. When defining
a new plugin two class level variables should be defined.
:var protocol_arguments: This is a dictionary of arguments that the
command will expect/accept from DCM. It has the
following format.
.. code-block:: python
{ <argument name> :
(<human readable description string,
<True | False bool that states if the argument is mandatory>,
<argument type conversion function. This converts a byte string
into the needed python type. Some base functions can be found
in utils>,
<Default value>),
}
:var command_name: The name of this command. This must be globally
unique for all the commands in a given agent. It can
be defined by the module in order to tell the
dcm-agent-add-plugin program the desired name, however
ultimately it will be set by the agent and the
value may be different than the desired name.
:var long_runner: The variable long_runner can be set on the class
to instruct the dcm-agent-add-plugin that this plugin
will be run for a long time and should be set up for
polling with get_job_description.
"""
protocol_arguments = {}
command_name = None
def __init__(self, conf, request_id, items_map, name, arguments):
"""If the plugin overrides the constructor it must call super on
the parent constructor and pass in the same values it was passed.
:param conf: The DCM agent configuration object. This can be used
as a way to discover information about the agent
deployment. As an example conf.platform_name will tell
the plugin the linux distribution name (eg: ubuntu).
:param request_id: This is the request ID for this specific request
of the command. This will be different every time.
The plugin will rarely need this information.
:param items_map: This is an opaque structure that is threaded through
the module. Plugins should only use this when
calling super()
:param name: The name of this command. This will match
cls.command_name
:param arguments: The arguments that DCM passed into this command.
after the parent constructor is called these
arguments will be attributes of the self.args object.
"""
logname = __name__ + "." + name
log = logging.getLogger(logname)
self.logger = logging.LoggerAdapter(log, {'job_id': request_id})
self.job_id = request_id
self.name = name
self.conf = conf
self.items_map = items_map
self.arguments = arguments
self.args = _ArgHolder()
try:
self._validate_arguments()
except plugin_exceptions.AgentPluginParameterBadValueException:
raise
except Exception as ex:
raise plugin_exceptions.AgentPluginParameterBadValueException(
self.name, "general", str(ex))
def _validate_arguments(self):
# validate that all of the required arguments were sent
for arg in self.protocol_arguments:
h, mandatory, t, default = self.protocol_arguments[arg]
if mandatory and arg not in self.arguments:
raise plugin_exceptions.AgentPluginParameterNotSentException(
self.name, arg)
setattr(self.args, arg, default)
# validate that nothing extra was sent
for arg in self.arguments:
if arg not in self.protocol_arguments:
dcm_logger.log_to_dcm_console_unknown_job_parameter(
job_name=self.name,
parameter_name=arg)
else:
h, mandatory, t, default = self.protocol_arguments[arg]
a = self.arguments[arg]
if a is not None:
try:
a = t(a)
except Exception as ex:
_g_logger.exception(str(ex))
raise plugin_exceptions.AgentPluginParameterBadValueException(
self.name,
"Parameter %s has an invalid value %s" % (arg, a))
setattr(self.args, arg, a)
def __str__(self):
return self.name + ":" + self.job_id
def get_name(self):
"""This is called by DCM to get the name of the plugin. This should
not be overridden.
:return A string representing the command name:
"""
return self.name
def cancel(self, *args, **kwargs):
"""This method is called by the agent when an outstanding command needs
to be canceled. The plug in should treat it like a signal to cancel.
Then it is received the plugin should start canceling its work, however
it should return from cancel immediately. Cancel should not block
until the work is complete.
"""
pass
@agent_util.not_implemented_decorator
def run(self):
"""This method is called by the agent to give the plugin a thread that
it can use to do its work. When the plugin is finished it should
return a PluginReply.
If the plugin experiences an error while processing it can throw an
exception from the dcm.agent.plugins.api.exceptions module.
"""
pass
class ScriptPlugin(Plugin):
"""This base plugin class can be used for plugins that call out to
scripts. The ordered_param_list member variable must be set with the
parameters that the called script needs. The script name is
pulled from the plug ins configuration section, ex:
[plugin:add_user]
type: python_module
module_name: dcm.agent.plugins.builtin.add_user
script_name: addUser
That name is used to locate the absolute path to a script under
<base location>/bin
"""
def __init__(self, conf, job_id, items_map, name, arguments):
super(ScriptPlugin, self).__init__(
conf, job_id, items_map, name, arguments)
self.ordered_param_list = []
self.cwd = None
try:
script_name = items_map["script_name"]
self.exe_path = conf.get_script_location(script_name)
if not os.path.exists(self.exe_path):
raise plugin_exceptions.AgentPluginConfigException(
"The plugin %s points an add_user_exe_path that does not "
"exist." % name)
except KeyError as ke:
raise plugin_exceptions.AgentPluginConfigException(
"The plugin %s requires the option %s" % (name, str(ke)))
def run(self):
command_list = [self.exe_path]
command_list.extend(self.ordered_param_list)
_g_logger.debug("Plugin running the command %s" % str(command_list))
_g_logger.debug("Running the remote %s" % self.exe_path)
(stdout, stderr, rc) = plugin_api.run_command(
self.conf, command_list, cwd=self.cwd)
_g_logger.debug("Command %s: stdout %s. stderr: %s" %
(str(command_list), stdout, stderr))
return PluginReply(rc, message=stdout, error_message=stderr)
```
#### File: plugins/api/pages.py
```python
import datetime
import json
import threading
import dcm.agent.exceptions as exceptions
import dcm.agent.utils as utils
from dcm.agent.events.globals import global_space as dcm_events
class BasePage(object):
def __init__(self, page_size):
self.creation_time = datetime.datetime.now()
self._page_size = page_size
self._lock = threading.RLock()
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
class JsonPage(BasePage):
def __init__(self, page_size, obj_list):
super(JsonPage, self).__init__(page_size)
self._obj_list = obj_list
@utils.class_method_sync
def get_next_page(self):
page_list = []
size_so_far = 0
for json_obj in self._obj_list:
line_size = len(json.dumps(json_obj))
if size_so_far + line_size > self._page_size:
break
page_list.append(json_obj)
size_so_far += line_size
self._obj_list = self._obj_list[len(page_list):]
return (page_list, len(self._obj_list))
class StringPage(BasePage):
def __init__(self, page_size, string_data):
super(StringPage, self).__init__(page_size)
self._string_data = string_data
@utils.class_method_sync
def get_next_page(self):
this_page = self._string_data[:self._page_size]
self._string_data = self._string_data[self._page_size:]
return (this_page, len(self._string_data))
class PageMonitor(object):
def __init__(self, page_size=12*1024, life_span=60*60*2, sweep_time=10):
self._pages = {}
self._page_size = page_size
self._lock = threading.RLock()
self._life_span = life_span
self._timer = None
self._stopped = False
self._sweep_time = sweep_time
@utils.class_method_sync
def start(self):
if self._stopped:
return
self._timer = dcm_events.register_callback(
self.clean_sweep, delay=self._sweep_time)
@utils.class_method_sync
def stop(self):
self._stopped = True
if self._timer is not None:
dcm_events.cancel_callback(self._timer)
self._timer = None
@utils.class_method_sync
def get_next_page(self, token):
if token not in self._pages:
raise exceptions.AgentPageNotFoundException(token)
pager = self._pages[token]
(page, remaining) = pager.get_next_page()
if remaining < 1:
del self._pages[token]
token = None
return page, token
@utils.class_method_sync
def new_pager(self, pager, token):
self._pages[token] = pager
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
@utils.class_method_sync
def clean_sweep(self):
too_old = datetime.datetime.now() - \
datetime.timedelta(seconds=self._life_span)
kill_keys = []
for k in self._pages:
pager = self._pages[k]
if pager.creation_time < too_old:
kill_keys.append(k)
for k in kill_keys:
del self._pages[k]
self.start()
```
#### File: plugins/builtin/add_user.py
```python
import os
import dcm.agent.messaging.persistence as persistence
import dcm.agent.plugins.api.base as plugin_base
import dcm.agent.plugins.api.utils as plugin_utils
class AddUser(plugin_base.ScriptPlugin):
protocol_arguments = {
"userId": ("The new unix account name to be created", True,
plugin_utils.user_name, None),
"firstName": ("The user's first name", True, str, None),
"lastName": ("The user's last name", True, str, None),
"authentication": ("The user's ssh public key", True, str, None),
"administrator": ("A string that is either 'true' or 'false' "
"which indicates if the new user should have "
"ssh access", True, str, None)
}
def __init__(self, conf, job_id, items_map, name, arguments):
super(AddUser, self).__init__(
conf, job_id, items_map, name, arguments)
self.ordered_param_list = [self.args.userId,
self.args.userId,
self.args.firstName,
self.args.lastName,
self.args.administrator.lower()]
self.ssh_public_key = self.args.authentication
self._db = persistence.SQLiteAgentDB(conf.storage_dbfile)
def run(self):
key_file = self.conf.get_temp_file(self.args.userId + ".pub")
try:
if self.ssh_public_key:
with open(key_file, "w") as f:
f.write(self.ssh_public_key)
self.ordered_param_list.append(key_file)
plugin_utils.log_to_dcm_console_job_details(
job_name=self.name,
details="Attempting to add the user %s." % self.args.userId)
rc = super(AddUser, self).run()
admin_bool = self.args.administrator.lower() == "true"
self._db.add_user(
self.conf.agent_id, self.args.userId, self.ssh_public_key,
admin_bool)
plugin_utils.log_to_dcm_console_job_details(
job_name=self.name,
details="The user %s was added." % self.args.userId)
return rc
finally:
if os.path.exists(key_file):
plugin_utils.secure_delete(self.conf, key_file)
def load_plugin(conf, job_id, items_map, name, arguments):
return AddUser(conf, job_id, items_map, name, arguments)
```
#### File: plugins/builtin/clean_image.py
```python
import logging
import os
import sys
import threading
import dcm.agent.events.globals as events
import dcm.agent.logger as dcm_logger
from dcm.agent.messaging import persistence
import dcm.agent.utils as utils
import dcm.agent.plugins.api.base as plugin_base
import dcm.agent.plugins.builtin.remove_user as remove_user
_g_logger = logging.getLogger(__name__)
class CleanImage(plugin_base.Plugin):
protocol_arguments = {
"delUser":
("List of accounts to remove",
False, list, None),
"delHistory":
("Flag to delete all history files in all accounts",
False, bool, None),
"recovery":
("Create a recovery tar file of all the files that are deleted and encrypt it with the owners public key.",
False, bool, None),
"delKeys":
("Flag to delete private keys in users home directories",
False, bool, False)
}
def __init__(self, conf, job_id, items_map, name, arguments):
super(CleanImage, self).__init__(
conf, job_id, items_map, name, arguments)
self._done_event = threading.Event()
self._topic_error = None
self._db = persistence.SQLiteAgentDB(conf.storage_dbfile)
def run_scrubber(self, opts):
exe = os.path.join(os.path.dirname(sys.executable),
"dcm-agent-scrubber")
cmd = [
self.conf.system_sudo,
'-E',
exe
]
if opts:
cmd.extend(opts)
(stdout, stderr, rc) = utils.run_command(self.conf, cmd)
if rc != 0:
return plugin_base.PluginReply(
rc, message=stdout, error_message=stderr)
return plugin_base.PluginReply(
0, message="The image was scrubbed successfully")
def _clean_topic_done(self, topic_error):
self._topic_error = topic_error
self._done_event.set()
def run(self):
try:
events.global_pubsub.publish(
events.DCMAgentTopics.CLEANUP,
topic_kwargs={'request_id': self.job_id},
done_cb=self._clean_topic_done)
if self.args.delUser:
dcm_logger.log_to_dcm_console_job_details(
job_name=self.name,
details='Deleting users.')
for user in self.args.delUser:
rdoc = remove_user.RemoveUser(
self.conf,
self.job_id,
{'script_name': 'removeUser'},
'remove_user',
{'userId': user}).run()
if rdoc.get_return_code() != 0:
rdoc.set_message(rdoc.get_message() +
" : Delete users failed on %s" % user)
return rdoc
scrub_opts = ["-X", "-b", "-A"]
if self.args.delHistory:
scrub_opts.append("-H")
if self.args.delKeys:
dcm_logger.log_to_dcm_console_job_details(
job_name=self.name, details='Deleting private keys.')
scrub_opts.append("-k")
if self.args.recovery:
# find the public key, if not there abort
try:
username, public_key = self._db.get_owner()
except:
_g_logger.exception("Could not get the owning user")
raise Exception(
"The agent could not encrypt the rescue image")
if public_key is None:
raise Exception(
"The agent could not encrypt the rescue image")
tar_file = "/tmp/dcm_agent_recovery.tar.gz"
scrub_opts.extent(["-r", tar_file, "-e", public_key])
self.run_scrubber(scrub_opts)
self._done_event.wait()
if self._topic_error is not None:
return plugin_base.PluginReply(
1, error_message=str(self._topic_error))
return plugin_base.PluginReply(
0, message="Clean image command ran successfully")
except Exception as ex:
_g_logger.exception("clean_image failed: " + str(ex))
return plugin_base.PluginReply(
1, message=str(ex), error_message=str(ex))
def load_plugin(conf, job_id, items_map, name, arguments):
return CleanImage(conf, job_id, items_map, name, arguments)
```
#### File: plugins/builtin/configure_server.py
```python
import configparser
import json
import logging
import os
import urllib.parse
import dcm.agent.exceptions as exceptions
import dcm.agent.logger as dcm_logger
import dcm.agent.plugins.api.base as plugin_base
import dcm.agent.plugins.api.exceptions as plugin_exceptions
import dcm.agent.plugins.api.utils as plugin_utils
import dcm.agent.utils as utils
_g_logger = logging.getLogger(__name__)
class ConfigureServer(plugin_base.Plugin):
protocol_arguments = {
"configType":
("Which configuration management software to use (chef or puppet)",
True, str, None),
"authId":
("", False, str, None),
"configurationData":
("", False, plugin_utils.base64type_convertor, None),
"encryptedConfigToken":
("", False, plugin_utils.base64type_convertor, None),
"encryptedAuthSecret":
("", False, plugin_utils.base64type_convertor, None),
"endpoint":
("", False, str, None),
"providerRegionId":
("", False, str, None),
"runAsUser":
("", False, str, None),
"storageDelegate":
("", False, str, None),
"storageEndpoint":
("", False, str, None),
"storageAccount":
("", False, str, None),
"scriptFiles":
("", False, list, None),
"storagePublicKey":
("", False, plugin_utils.base64type_convertor, None),
"storagePrivateKey":
("", False, plugin_utils.base64type_convertor, None),
"environmentId":
("", False, str, None),
"personalityFiles":
("", False, list, None),
"configClientName":
("", False, str, None),
"configCert":
("", False, plugin_utils.base64type_convertor, None),
"configKey":
("", False, plugin_utils.base64type_convertor, None),
"runListIds":
("", False, list, None),
"parameterList":
("", False, plugin_utils.base64type_convertor, None),
}
def __init__(self, conf, job_id, items_map, name, arguments):
super(ConfigureServer, self).__init__(
conf, job_id, items_map, name, arguments)
if not self.args.runAsUser:
self.args.runAsUser = self.conf.system_user
def configure_server_with_chef(self):
chef_dir = self.conf.get_temp_file("chefconf", isdir=True)
run_list_file_name = os.path.join(chef_dir, "runList.cfg")
token_file_path = self.conf.get_temp_file("token.pem")
try:
if self.args.encryptedAuthSecret:
token = self.args.encryptedAuthSecret
else:
token = "NULL"
authId = self.args.authId
if authId is None:
authId = "NULL"
endpoint = self.args.endpoint
if endpoint is None:
endpoint = "NULL"
environmentId = self.args.environmentId
if environmentId is None:
environmentId = "NULL"
chef_json = {"run_list": self.args.runListIds}
with open(run_list_file_name, "w") as fptr:
fptr.write(json.dumps(chef_json))
with open(token_file_path, "w") as fptr:
fptr.write(token)
fptr.write(os.linesep)
exe = self.conf.get_script_location(
"runConfigurationManagement-CHEF")
cmd_list = [exe,
self.args.runAsUser,
self.args.configClientName,
token_file_path,
run_list_file_name,
authId,
endpoint,
environmentId,
self.conf.configuration_management_chef_client_version]
return plugin_utils.run_command(self.conf, cmd_list)
finally:
plugin_utils.safe_delete(run_list_file_name)
plugin_utils.safe_delete(token_file_path)
def _edit_puppet_conf(self, template_path, new_location, endpoint):
parser = configparser.SafeConfigParser()
parser.read(template_path)
if not parser.has_section("agent"):
parser.add_section("agent")
parser.set("agent", "certname", self.args.configClientName)
parser.set("agent", "server", endpoint)
with open(new_location, "w") as fptr:
parser.write(fptr)
def configure_server_with_puppet(self):
if self.args.endpoint is None:
raise exceptions.AgentOptionValueNotSetException("endpoint")
# XXX it will only work with the default port. There is no way for
# the user to configure anything else in the console
endpoint = urllib.parse.urlparse(self.args.endpoint).hostname
puppet_extras_base_path = os.path.join(self.conf.extra_base_path,
"puppetconf")
puppet_extras_bin = os.path.join(self.conf.extra_base_path,
"bin/puppet")
try:
utils.install_extras(
self.conf, package=self.conf.extra_package_name)
except exceptions.AgentExtrasNotInstalledException as ex:
_g_logger.exception("An error occurred trying to install puppet. "
"Exception message is %s" % str(ex))
raise
template_puppet_conf_path = os.path.join(puppet_extras_base_path,
"puppet.conf.template")
if not os.path.exists(template_puppet_conf_path):
raise exceptions.AgentExtrasNotInstalledException(
"The puppet.conf template did not install properly.")
if not os.path.exists(puppet_extras_bin):
raise exceptions.AgentExtrasNotInstalledException(
"The puppet binary did not install properly.")
puppet_conf_path = self.conf.get_temp_file("puppet.conf")
self._edit_puppet_conf(template_puppet_conf_path,
puppet_conf_path,
endpoint)
cert_file_path = self.conf.get_temp_file("cert.pem")
key_file_path = self.conf.get_temp_file("key.pem")
try:
with open(cert_file_path, "w") as fptr:
fptr.write(self.args.configCert)
with open(key_file_path, "w") as fptr:
fptr.write(self.args.configKey)
exe = self.conf.get_script_location(
"runConfigurationManagement-PUPPET")
cmd = [exe,
endpoint,
cert_file_path,
key_file_path,
self.args.configClientName,
self.conf.extra_base_path,
puppet_conf_path]
return plugin_utils.run_command(self.conf, cmd)
finally:
plugin_utils.safe_delete(cert_file_path)
plugin_utils.safe_delete(key_file_path)
plugin_utils.safe_delete(puppet_conf_path)
def run(self):
_g_logger.info("Running configuration management of type " +
self.args.configType)
if self.args.configType.upper() == "CHEF":
(stdout, stderr, rc) = self.configure_server_with_chef()
elif self.args.configType.upper() == "PUPPET":
(stdout, stderr, rc) = self.configure_server_with_puppet()
else:
raise plugin_exceptions.AgentPluginParameterBadValueException(
"configType", "CHEF or PUPPET")
if stderr:
dcm_logger.log_to_dcm_console_configuration_management_error(
stderr=stderr)
if stdout:
dcm_logger.log_to_dcm_console_configuration_management_output(
stdout=stdout)
if rc != 0:
return plugin_base.PluginReply(rc, message=stderr)
else:
return plugin_base.PluginReply(
rc, reply_type="string", reply_object=stdout)
def load_plugin(conf, job_id, items_map, name, arguments):
return ConfigureServer(conf, job_id, items_map, name, arguments)
```
#### File: plugins/builtin/rename.py
```python
import logging
import re
import dcm.agent.plugins.api.base as plugin_base
import dcm.agent.plugins.api.exceptions as plugin_exceptions
import dcm.agent.plugins.api.utils as plugin_utils
_g_logger = logging.getLogger(__name__)
def _is_legal(proposed_name):
if len(proposed_name) > 255:
raise plugin_exceptions.AgentPluginParameterBadValueException(
"rename", "serverName", "less than 255")
regex = ("^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)"
"*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])$")
allowed = re.compile(regex)
if allowed is None:
raise plugin_exceptions.AgentPluginParameterBadValueException(
"rename", "serverName", "a legal hostname")
class Rename(plugin_base.ScriptPlugin):
protocol_arguments = {
"serverName":
("The host name to which this server will be set.",
True, str, None)
}
def __init__(self, conf, job_id, items_map, name, arguments):
super(Rename, self).__init__(
conf, job_id, items_map, name, arguments)
_is_legal(arguments["serverName"])
self.ordered_param_list = [arguments["serverName"]]
def run(self):
private_ips = self.conf.meta_data_object.get_ipv4_addresses()
if not private_ips:
return plugin_base.PluginReply(
1, error_message="No IP Address was found")
self.ordered_param_list.append(private_ips[0])
plugin_utils.log_to_dcm_console_job_details(
job_name=self.name,
details="Renaming the server to %s with the local IP %s"
% (self.args.serverName, private_ips[0]))
return super(Rename, self).run()
def load_plugin(conf, job_id, items_map, name, arguments):
return Rename(conf, job_id, items_map, name, arguments)
```
#### File: plugins/testplugins/__init__.py
```python
import os
import subprocess
import dcm.agent.plugins.api.base as plugin_base
from dcm.agent.plugins.api.exceptions import AgentPluginConfigException
import dcm.agent.plugins.loader as plugin_loader
# This plugin directly forks out a script and passes in the arguments it
# received. This is only used for testing.
class ExePlugin(plugin_base.Plugin):
def __init__(self, conf, request_id, items_map, name, arguments):
super(ExePlugin, self).__init__(
conf, request_id, items_map, name, arguments)
if 'path' not in items_map:
raise AgentPluginConfigException(
"The configuration for the %s plugin does not have "
"an path entry." % name)
exe_path = items_map['path']
if not os.path.exists(exe_path):
raise AgentPluginConfigException(
"Module %s is misconfigured. The path %s "
"does not exists" % (name, exe_path))
self.exe = os.path.abspath(exe_path)
self.cwd = os.path.dirname(self.exe)
def run(self):
try:
return self._exec()
except Exception as ex:
self.logger.error("Error running the subprocess", ex)
def _exec(self):
args = [self.exe]
args.extend(self.arguments)
self.logger.info("Forking the command " + str(args))
args = ' '.join(args) # for some reason i cannot just pass the array
# at least should do a shell join
process = subprocess.Popen(args,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.cwd)
# TODO iterate over the output so that it does not all come just at
# the end
stdout, stderr = process.communicate()
if stdout is not None:
stdout = stdout.decode()
else:
stdout = ""
if stderr is not None:
stderr = stderr.decode()
else:
stderr = ""
self.logger.info("STDOUT: " + stdout)
self.logger.info("STDERR: " + stderr)
self.logger.info("Return code: " + str(process.returncode))
return plugin_base.PluginReply(
process.returncode, message=stdout, error_message=stderr)
def cancel(self, *args, **kwargs):
pass
def _load_exe(conf, request_id, items_map, name, arguments):
return ExePlugin(conf, request_id, items_map, name, arguments)
def register_test_loader():
plugin_loader.register_plugin_loader("exe", _load_exe)
```
#### File: tests/integration/test_agent_status.py
```python
import getpass
import os
import shutil
import tempfile
import unittest
import dcm.agent.cmd.service as service
import dcm.agent.cmd.configure as configure
import dcm.agent.logger as logger
import dcm.agent.tests.utils.general as test_utils
# does not inherit from unittest because of the python generators for
# testing storage clouds
class TestAgentStatus(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.run_as_user = getpass.getuser()
test_utils.connect_to_debugger()
cls.test_base_path = tempfile.mkdtemp()
cls.test_conf_path = os.path.join(
cls.test_base_path, "etc", "agent.conf")
conf_args = ["-c", "Amazon",
"-u", "http://doesntmatter.org/ws",
"-p", cls.test_base_path,
"-t", os.path.join(cls.test_base_path, "tmp"),
"-C", "ws",
"-U", cls.run_as_user,
"-l", "/tmp/agent_status_test.log"]
rc = configure.main(conf_args)
if rc != 0:
raise Exception("We could not configure the test env")
@classmethod
def tearDownClass(cls):
logger.clear_dcm_logging()
shutil.rmtree(cls.test_base_path)
def test_agent_status(self):
# we need a way to parse the output to verify tests
rc = service.main(
args=["dcm-agent", "-c", self.test_conf_path, "status"])
self.assertEqual(rc, 1)
```
#### File: tests/integration/test_cloud_metadata.py
```python
import getpass
import os
import platform
import shutil
import socket
import tempfile
import unittest
from mock import patch
import dcm.agent.cloudmetadata as cloudmetadata
import dcm.agent.config as config
import dcm.agent.cmd.configure as configure
import dcm.agent.tests.utils.general as test_utils
class TestCloudMetadata(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
cls.run_as_user = getpass.getuser()
cls.test_base_path = tempfile.mkdtemp()
conf_args = ["-c", "Amazon",
"-u", "http://doesntmatter.org/ws",
"-p", cls.test_base_path,
"-t", os.path.join(cls.test_base_path, "tmp"),
"-C", "success_tester",
"-U", cls.run_as_user,
"-l", "/tmp/agent_test_log.log"]
rc = configure.main(conf_args)
if rc != 0:
raise Exception("We could not configure the test env")
cls.test_conf_path = \
os.path.join(cls.test_base_path, "etc", "agent.conf")
cls.conf = config.AgentConfig([cls.test_conf_path])
cls.conf.start_job_runner()
@classmethod
def tearDownClass(cls):
cls.conf.stop_job_runner()
shutil.rmtree(cls.test_base_path)
def setUp(self):
self.clouds = {
1: cloudmetadata.AWSMetaData,
2: cloudmetadata.JoyentMetaData,
3: cloudmetadata.GCEMetaData,
4: cloudmetadata.AzureMetaData
}
self.cloud_types = {
1: 'Amazon',
2: 'Joyent',
3: 'Google',
4: 'Azure'
}
def tearDown(self):
self.clouds = None
self.cloud_types = None
@test_utils.skip_docker
def test_dhcp(self):
ipaddr = cloudmetadata.get_dhcp_ip_address(self.conf)
if platform.system().lower() == "linux":
if not ipaddr:
self.fail("We could not find the DHCP server address. "
"This will cause CloudStack to fail.")
try:
socket.inet_aton(ipaddr)
self.assertTrue(True)
except socket.error:
self.fail('You passed an invalid ip address')
def _get_instance_data_cloud_none(self, cloud):
self.conf.cloud_type = cloud
inst_id = self.conf.meta_data_object.get_instance_id()
self.assertIsNone(inst_id)
def test_get_instance_data_amazon_none(self):
if 'DCM_AGENT_ON_AMAZON' in os.environ:
raise unittest.SkipTest("We are actually on amazon")
self._get_instance_data_cloud_none(cloudmetadata.CLOUD_TYPES.Amazon)
def test_get_instance_data_google_none(self):
self.conf.meta_data_object = cloudmetadata.GCEMetaData(
self.conf, base_url=self.conf.cloud_metadata_url)
self._get_instance_data_cloud_none(cloudmetadata.CLOUD_TYPES.Google)
def test_get_instance_data_joyent_none(self):
self.conf.meta_data_object = cloudmetadata.JoyentMetaData(self.conf)
self._get_instance_data_cloud_none(cloudmetadata.CLOUD_TYPES.Joyent)
def test_get_instance_data_azure_none(self):
self.conf.cloud_type = cloudmetadata.CLOUD_TYPES.Azure
self.conf.meta_data_object = cloudmetadata.AzureMetaData(self.conf)
inst_id = self.conf.meta_data_object.get_instance_id()
# this will likely change in the future
hostname = socket.gethostname()
ha = hostname.split(".")
should_be = "%s:%s:%s" % (ha[0], ha[0], ha[0])
self.assertEqual(should_be, inst_id)
@patch('dcm.agent.cloudmetadata._get_metadata_server_url_data')
def test_get_aws_instance_id(self, mock_server):
self.conf.meta_data_object = cloudmetadata.AWSMetaData(
self.conf, base_url=self.conf.cloud_metadata_url)
mock_server.return_value = 'fake_instance_id'
instance_id = self.conf.meta_data_object.get_instance_id()
self.assertEqual(instance_id, 'fake_instance_id')
@patch('dcm.agent.cloudmetadata._get_metadata_server_url_data')
def test_get_gce_instance_id(self, mock_server):
self.conf.meta_data_object = cloudmetadata.GCEMetaData(
self.conf, base_url=self.conf.cloud_metadata_url)
mock_server.side_effect = ['fake_instance_id', 'servername.hello.com']
instance_id = self.conf.meta_data_object.get_instance_id()
self.assertEqual(instance_id, 'servername_fake_instance_id')
@patch('dcm.agent.cloudmetadata.JoyentMetaData.get_cloud_metadata')
def test_get_joyent_instance_id(self, mock_joyent_meta):
self.conf.meta_data_object = cloudmetadata.JoyentMetaData(self.conf)
mock_joyent_meta.return_value = 'fake_instance_id'
instance_id = self.conf.meta_data_object.get_instance_id()
self.assertEqual(instance_id, 'fake_instance_id')
@patch('dcm.agent.cloudmetadata.AzureMetaData.get_instance_id')
def test_get_azure_instance_id(self, mock_instance_id):
self.conf.meta_data_object = cloudmetadata.AzureMetaData(self.conf)
mock_instance_id.return_value =\
'fake_instance_id:fake_instance_id:fake_instance_id'
instance_id = self.conf.meta_data_object.get_instance_id()
self.assertEqual(instance_id,
'fake_instance_id:fake_instance_id:fake_instance_id')
@patch('dcm.agent.cloudmetadata._get_metadata_server_url_data')
def test_get_aws_startup_script(self, mock_server):
self.conf.meta_data_object = cloudmetadata.AWSMetaData(
self.conf, base_url=self.conf.cloud_metadata_url)
mock_server.return_value = 'fake_startup_script'
script = self.conf.meta_data_object.get_startup_script()
self.assertEqual(script, 'fake_startup_script')
@patch('dcm.agent.cloudmetadata._get_metadata_server_url_data')
def test_get_gce_startup_script(self, mock_server):
self.conf.meta_data_object = cloudmetadata.GCEMetaData(
self.conf, base_url=self.conf.cloud_metadata_url)
mock_server.return_value = 'fake_startup_script'
script = self.conf.meta_data_object.get_startup_script()
self.assertEqual(script, 'fake_startup_script')
@patch('dcm.agent.cloudmetadata.JoyentMetaData.get_cloud_metadata')
def test_get_joyent_startup_script(self, mock_joyent_meta):
self.conf.meta_data_object = cloudmetadata.JoyentMetaData(self.conf)
mock_joyent_meta.return_value = 'fake_startup_script'
script = self.conf.meta_data_object.get_startup_script()
self.assertEqual(script, 'fake_startup_script')
@patch('dcm.agent.cloudmetadata._get_metadata_server_url_data')
def test_get_openstack_startup_script(self, mock_cloud_meta):
self.conf.meta_data_object = cloudmetadata.OpenStackMetaData(self.conf)
mock_cloud_meta.return_value = 'fake_startup_script'
script = self.conf.meta_data_object.get_startup_script()
self.assertEqual(script, 'fake_startup_script')
def test_set_metadata_object(self):
for cloud in self.clouds:
self.conf.cloud_type = self.cloud_types[cloud]
self.conf.meta_data_object = None
cloudmetadata.set_metadata_object(self.conf)
self.assertIsInstance(self.conf.meta_data_object,
self.clouds[cloud])
```
#### File: unit/eventtracer/test_tracer.py
```python
import logging
import unittest
import dcm.eventlog.tracer as tracer
class TestEventTracer(unittest.TestCase):
def test_basic_event_log(self):
logger = logging.getLogger(__name__)
filter = tracer.RequestFilter()
logger.addFilter(filter)
with tracer.RequestTracer("12345"):
logger.error("A log record")
```
#### File: unit/messaging/test_request.py
```python
import unittest
import mock
import dcm.agent.logger as logger
import dcm.agent.messaging.request as request
import dcm.agent.messaging.states as states
import dcm.agent.messaging.types as types
from dcm.agent.events.globals import global_space as dcm_events
class TestRequesterStandardPath(unittest.TestCase):
@classmethod
def setUpClass(cls):
logger.clear_dcm_logging()
def _validate_request_message(self, send_doc, doc):
self.assertEqual(send_doc['payload'], doc)
self.assertTrue('message_id' in send_doc)
self.assertTrue('request_id' in send_doc)
self.assertTrue('type' in send_doc)
self.assertEqual(send_doc['type'], types.MessageTypes.REQUEST)
def test_request_call_writes_request_message(self):
conn = mock.Mock()
doc = {'amessage': 'foru'}
requester = request.RequestRPC(doc, conn, "XYZ")
requester.send()
(param_list, keywords) = conn.send.call_args
send_doc = param_list[0]
self._validate_request_message(send_doc, doc)
self.assertEqual(conn.send.call_count, 1)
def test_request_ack(self):
conn = mock.Mock()
doc = {'amessage': 'foru'}
requester = request.RequestRPC(doc, conn, "XYZ")
requester.send()
self.assertEqual(conn.send.call_count, 1)
(param_list, keywords) = conn.send.call_args
send_doc = param_list[0]
reply_doc = {'type': types.MessageTypes.ACK,
'message_id': send_doc['message_id']}
requester.incoming_message(reply_doc)
self.assertEqual('REQUESTED', requester._sm._current_state)
def test_requesting_reply(self):
conn = mock.Mock()
doc = {'amessage': 'foru'}
requester = request.RequestRPC(doc, conn, "XYZ")
requester.send()
self.assertEqual(conn.send.call_count, 1)
(param_list, keywords) = conn.send.call_args
send_doc = param_list[0]
reply_doc = {'type': types.MessageTypes.REPLY,
'message_id': send_doc['message_id']}
requester.incoming_message(reply_doc)
self.assertEqual(states.RequesterStates.USER_CALLBACK,
requester._sm._current_state)
def test_standard_path(self):
conn = mock.Mock()
doc = {'amessage': 'foru'}
requester = request.RequestRPC(doc, conn, "XYZ")
requester.send()
self.assertEqual(conn.send.call_count, 1)
(param_list, keywords) = conn.send.call_args
send_doc = param_list[0]
reply_doc = {'type': types.MessageTypes.ACK,
'message_id': send_doc['message_id'],
'request_id': send_doc['request_id']}
requester.incoming_message(reply_doc)
self.assertEqual('REQUESTED', requester._sm._current_state)
reply_doc = {'type': types.MessageTypes.REPLY,
'message_id': send_doc['message_id'],
'request_id': send_doc['request_id']}
requester.incoming_message(reply_doc)
self.assertEqual(states.RequesterStates.USER_CALLBACK,
requester._sm._current_state)
reply = requester.get_reply()
requester.got_reply()
self.assertEqual(reply, reply_doc)
(param_list, keywords) = conn.send.call_args
send_doc = param_list[0]
self.assertEqual(reply_doc['message_id'], send_doc['message_id'])
self.assertTrue('request_id' in send_doc)
self.assertTrue('type' in send_doc)
self.assertEqual(send_doc['type'], types.MessageTypes.ACK)
requester.ack_sent_timeout()
def test_standard_with_callback_path(self):
self.called = False
def reply_called(*args, **kwargs):
self.called = True
conn = mock.Mock()
doc = {'amessage': 'foru'}
requester = request.RequestRPC(
doc, conn, "XYZ", reply_callback=reply_called)
requester.send()
self.assertEqual(conn.send.call_count, 1)
(param_list, keywords) = conn.send.call_args
send_doc = param_list[0]
reply_doc = {'type': types.MessageTypes.ACK,
'message_id': send_doc['message_id'],
'request_id': send_doc['request_id']}
requester.incoming_message(reply_doc)
self.assertEqual('REQUESTED', requester._sm._current_state)
reply_doc = {'type': types.MessageTypes.REPLY,
'message_id': send_doc['message_id'],
'request_id': send_doc['request_id']}
requester.incoming_message(reply_doc)
while requester._sm._current_state !=\
states.RequesterStates.ACK_SENT:
dcm_events.poll()
self.assertEqual(states.RequesterStates.ACK_SENT,
requester._sm._current_state)
(param_list, keywords) = conn.send.call_args
send_doc = param_list[0]
self.assertEqual(reply_doc['message_id'], send_doc['message_id'])
self.assertTrue('request_id' in send_doc)
self.assertTrue('type' in send_doc)
self.assertEqual(send_doc['type'], types.MessageTypes.ACK)
requester.ack_sent_timeout()
class TestRequesterRetransmissionCases(unittest.TestCase):
@classmethod
def setUpClass(cls):
logger.clear_dcm_logging()
def _validate_request_message(self, send_doc, doc):
self.assertEqual(send_doc['payload'], doc)
self.assertTrue('message_id' in send_doc)
self.assertTrue('request_id' in send_doc)
self.assertTrue('type' in send_doc)
self.assertEqual(send_doc['type'], types.MessageTypes.REQUEST)
def test_request_no_ack_timeout(self):
conn = mock.Mock()
doc = {'amessage': 'foru'}
requester = request.RequestRPC(doc, conn, "XYZ", timeout=1)
requester.send()
dcm_events.poll(timeblock=1.5)
self.assertGreater(conn.send.call_count, 1)
(param_list, keywords) = conn.send.call_args
self._validate_request_message(param_list[0], doc)
requester.cleanup()
def test_double_reply(self):
conn = mock.Mock()
doc = {'amessage': 'foru'}
requester = request.RequestRPC(doc, conn, "XYZ")
requester.send()
self.assertEqual(conn.send.call_count, 1)
(param_list, keywords) = conn.send.call_args
send_doc = param_list[0]
reply_doc = {'type': types.MessageTypes.ACK,
'message_id': send_doc['message_id'],
'request_id': send_doc['request_id']}
requester.incoming_message(reply_doc)
self.assertEqual('REQUESTED', requester._sm._current_state)
reply_doc = {'type': types.MessageTypes.REPLY,
'message_id': send_doc['message_id'],
'request_id': send_doc['request_id']}
requester.incoming_message(reply_doc)
self.assertEqual(states.RequesterStates.USER_CALLBACK,
requester._sm._current_state)
requester.incoming_message(reply_doc)
self.assertEqual(states.RequesterStates.USER_CALLBACK,
requester._sm._current_state)
reply = requester.get_reply()
requester.got_reply()
self.assertEqual(reply, reply_doc)
(param_list, keywords) = conn.send.call_args
send_doc = param_list[0]
self.assertEqual(reply_doc['message_id'], send_doc['message_id'])
self.assertTrue('request_id' in send_doc)
self.assertTrue('type' in send_doc)
self.assertEqual(send_doc['type'], types.MessageTypes.ACK)
requester.ack_sent_timeout()
def test_reply_after_ack(self):
conn = mock.Mock()
doc = {'amessage': 'foru'}
requester = request.RequestRPC(doc, conn, "XYZ")
requester.send()
self.assertEqual(conn.send.call_count, 1)
(param_list, keywords) = conn.send.call_args
send_doc = param_list[0]
reply_doc = {'type': types.MessageTypes.ACK,
'message_id': send_doc['message_id'],
'request_id': send_doc['request_id']}
requester.incoming_message(reply_doc)
self.assertEqual('REQUESTED', requester._sm._current_state)
reply_doc = {'type': types.MessageTypes.REPLY,
'message_id': send_doc['message_id'],
'request_id': send_doc['request_id']}
requester.incoming_message(reply_doc)
self.assertEqual(states.RequesterStates.USER_CALLBACK,
requester._sm._current_state)
requester.incoming_message(reply_doc)
reply = requester.get_reply()
requester.got_reply()
self.assertEqual(reply, reply_doc)
requester.incoming_message(reply_doc)
(param_list, keywords) = conn.send.call_args
send_doc = param_list[0]
self.assertEqual(reply_doc['message_id'], send_doc['message_id'])
self.assertTrue('request_id' in send_doc)
self.assertTrue('type' in send_doc)
self.assertEqual(send_doc['type'], types.MessageTypes.ACK)
requester.ack_sent_timeout()
requester.cleanup()
def test_double_requested_ack(self):
conn = mock.Mock()
doc = {'amessage': 'foru'}
requester = request.RequestRPC(doc, conn, "XYZ")
requester.send()
self.assertEqual(conn.send.call_count, 1)
(param_list, keywords) = conn.send.call_args
send_doc = param_list[0]
reply_doc = {'type': types.MessageTypes.ACK,
'message_id': send_doc['message_id'],
'request_id': send_doc['request_id']}
requester.incoming_message(reply_doc)
self.assertEqual('REQUESTED', requester._sm._current_state)
requester.incoming_message(reply_doc)
reply_doc = {'type': types.MessageTypes.REPLY,
'message_id': send_doc['message_id'],
'request_id': send_doc['request_id']}
requester.incoming_message(reply_doc)
self.assertEqual(states.RequesterStates.USER_CALLBACK,
requester._sm._current_state)
reply = requester.get_reply()
requester.got_reply()
self.assertEqual(reply, reply_doc)
(param_list, keywords) = conn.send.call_args
send_doc = param_list[0]
self.assertEqual(reply_doc['message_id'], send_doc['message_id'])
self.assertTrue('request_id' in send_doc)
self.assertTrue('type' in send_doc)
self.assertEqual(send_doc['type'], types.MessageTypes.ACK)
requester.ack_sent_timeout()
```
#### File: tests/unit/test_gen_docs.py
```python
from types import ModuleType
import unittest
import dcm.agent.cmd.gen_docs as gen_docs
class TestGenDocs(unittest.TestCase):
def setUp(self):
"""
:return: a list of the files in src/dcm/agent/plugins/builtin
"""
self.files = gen_docs.filelist
def tearDown(self):
self.files = None
def test_files(self):
"""
:return: assert that list does not contain .pyc files
and that it does contain add_user.py
"""
assert ("__init__.py" not in self.files)
assert ("__init__.pyc" not in self.files)
assert ("add_user.py" in self.files)
assert ("add_user.pyc" not in self.files)
def test_dynamic_import(self):
"""
:return: call dynamic_import and assert that it returns a module
"""
for file in self.files:
x = gen_docs.dynamic_import(file)
# it is a module
assert (isinstance(x, ModuleType))
def test_get_protocol_argument_dict(self):
"""
:return: call get_protocol_argument_dict and assert
that it returns a dict
"""
for file in self.files:
x = gen_docs.dynamic_import(file)
y = gen_docs.get_protocol_argument_dict(x)
# it is a dict
assert (isinstance(y, dict))
def test_gen_md_output(self):
"""
:return: assertion that expected_output is legit
when remove_user.py and add_user.py are
run through gen_docs.py
"""
fileone = 'remove_user.py'
expected_output_fileone = """## remove_user.py parameters:
- userId: The unix account name of the user to remove
- optional: True
- type: str
- default: None
"""
filetwo = 'add_user.py'
expected_output_filetwo = """## add_user.py parameters:
- administrator: A string that is either 'true' or 'false' which indicates if the new user should have ssh access
- optional: True
- type: str
- default: None
- authentication: The user's ssh public key
- optional: True
- type: str
- default: None
- firstName: The user's first name
- optional: True
- type: str
- default: None
- lastName: The user's last name
- optional: True
- type: str
- default: None
- userId: The new unix account name to be created
- optional: True
- type: Safe user name
- default: None
"""
# check remove_user.py
x = gen_docs.dynamic_import(fileone)
y = gen_docs.get_protocol_argument_dict(x)
z = gen_docs.output_markdown(fileone, y)
assert (z == expected_output_fileone)
# check add_user.py
a = gen_docs.dynamic_import(filetwo)
b = gen_docs.get_protocol_argument_dict(a)
c = gen_docs.output_markdown(filetwo, b)
assert (c == expected_output_filetwo)
```
#### File: tests/unit/test_handshake.py
```python
import os
import tempfile
import unittest
import mock
import dcm.agent
import dcm.agent.config as config
import dcm.agent.handshake as handshake
class TestHandshake(unittest.TestCase):
def setUp(self):
self.tmp_d = tempfile.mkdtemp()
root_dir = dcm.agent.get_root_location()
pluggin_path = os.path.join(root_dir, "etc", "plugin.conf")
test_conf = """
[workers]
count=1
[connection]
type=ws
[plugin]
configfile=%s
[storage]
base_dir=%s
[features]
hello=world
test=2
""" % (pluggin_path, self.tmp_d)
os.mkdir(os.path.join(self.tmp_d, "secure"))
self.conf_path = os.path.join(self.tmp_d, "agent.conf")
with open(self.conf_path, "w") as fptr:
fptr.write(test_conf)
def tearDown(self):
try:
os.removedirs(self.tmp_d)
except:
pass
def test_get_conf_files(self):
conf = config.AgentConfig([self.conf_path])
hs = handshake.HandshakeManager(conf, mock.Mock())
handshake_doc = hs.get_send_document()
features = handshake_doc['features']
self.assertIsNotNone(features['plugins'])
self.assertIn("add_user", features['plugins'])
self.assertIn("hello", features)
self.assertIn("test", features)
self.assertEqual(features["hello"], "world")
self.assertEqual(features["test"], '2')
```
#### File: tests/unit/test_pager.py
```python
import json
import unittest
import dcm.agent.exceptions as exceptions
import dcm.agent.plugins.api.pages as pages
from dcm.agent.events.globals import global_space as dcm_events
class TestPager(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_walk_pager_uniform_sizes(self):
json_entry = {"12345": "6789"}
j_size = len(json.dumps(json_entry))
per_page = 4
page_size = per_page * j_size
page_count = 5
total_entries = per_page * page_count
json_list = []
for i in range(total_entries):
json_list.append(json_entry)
page_monitor = pages.PageMonitor(page_size=page_size)
token = "<PASSWORD>"
pager = pages.JsonPage(page_size, json_list)
page_monitor.new_pager(pager, token)
page_1, new_token = page_monitor.get_next_page(token)
self.assertEqual(token, new_token)
self.assertEqual(len(page_1), per_page)
count = 1
while new_token is not None:
page, new_token = page_monitor.get_next_page(token)
self.assertEqual(len(page), per_page)
count += 1
self.assertEqual(page_count, count)
def tests_sweeper(self):
json_entry = {"12345": "6789"}
j_size = len(json.dumps(json_entry))
per_page = 3
page_size = per_page * j_size
page_monitor = pages.PageMonitor(
page_size=page_size, life_span=2, sweep_time=1)
page_monitor.start()
try:
json_list = [json_entry, json_entry]
token = "pu<PASSWORD>"
pager = pages.JsonPage(page_size, json_list)
page_monitor.new_pager(pager, token)
dcm_events.poll(timeblock=3.0)
self.assertRaises(exceptions.AgentPageNotFoundException,
page_monitor.get_next_page,
token)
finally:
page_monitor.stop()
```
#### File: tests/unit/test_persist.py
```python
import datetime
import json
import os
import tempfile
import time
import threading
import unittest
import uuid
import dcm.agent.exceptions as exceptions
import dcm.agent.messaging.persistence as persistence
import dcm.agent.messaging.states as messaging_states
class TestPersistMemory(unittest.TestCase):
def setUp(self):
self.db = persistence.SQLiteAgentDB(":memory:")
def test_complete_empty(self):
res = self.db.get_all_complete()
self.assertEqual(res, [])
def test_rejected_empty(self):
res = self.db.get_all_rejected()
self.assertEqual(res, [])
def test_nacked_empty(self):
res = self.db.get_all_reply_nacked()
self.assertEqual(res, [])
def test_acked_empty(self):
res = self.db.get_all_ack()
self.assertEqual(res, [])
def test_reply_empty(self):
res = self.db.get_all_reply()
self.assertEqual(res, [])
def test_lookup_empty(self):
res = self.db.lookup_req("NotTThere")
self.assertIsNone(res)
def test_update_not_there(self):
passed = False
try:
self.db.update_record("Nope", "ASTATE")
except exceptions.PersistenceException:
passed = True
self.assertTrue(passed, "An exception did not happen")
def test_new_record_ack_search(self):
request_id = str(uuid.uuid4())
agent_id = str(uuid.uuid4())
request_doc = {"request_id": request_id}
state = messaging_states.ReplyStates.ACKED
self.db.new_record(request_id, request_doc, None, state, agent_id)
res = self.db.get_all_ack()
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0].agent_id, agent_id)
self.assertEqual(res[0].request_id, request_id)
def test_new_record_reply_search(self):
request_id = str(uuid.uuid4())
agent_id = str(uuid.uuid4())
request_doc = {"request_id": request_id}
state = messaging_states.ReplyStates.REPLY
self.db.new_record(request_id, request_doc, None, state, agent_id)
res = self.db.get_all_reply()
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0].agent_id, agent_id)
self.assertEqual(res[0].request_id, request_id)
def test_new_record_reply_nacked_search(self):
request_id = str(uuid.uuid4())
agent_id = str(uuid.uuid4())
request_doc = {"request_id": request_id}
state = messaging_states.ReplyStates.REPLY_NACKED
self.db.new_record(request_id, request_doc, None, state, agent_id)
res = self.db.get_all_reply_nacked()
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0].agent_id, agent_id)
self.assertEqual(res[0].request_id, request_id)
def test_new_record_nacked_search(self):
request_id = str(uuid.uuid4())
agent_id = str(uuid.uuid4())
request_doc = {"request_id": request_id}
state = messaging_states.ReplyStates.NACKED
self.db.new_record(request_id, request_doc, None, state, agent_id)
res = self.db.get_all_rejected()
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0].agent_id, agent_id)
self.assertEqual(res[0].request_id, request_id)
def test_new_record_lookup(self):
request_id = str(uuid.uuid4())
agent_id = str(uuid.uuid4())
request_doc = {"request_id": request_id}
state = messaging_states.ReplyStates.REPLY_NACKED
reply_doc = {"akey": "andstuff"}
self.db.new_record(request_id, request_doc, reply_doc, state, agent_id)
res = self.db.lookup_req(request_id)
self.assertEqual(res.agent_id, agent_id)
self.assertEqual(res.request_id, request_id)
self.assertEqual(json.loads(res.reply_doc), reply_doc)
def test_new_record_update_lookup(self):
request_id = str(uuid.uuid4())
agent_id = str(uuid.uuid4())
request_doc = {"request_id": request_id}
state = messaging_states.ReplyStates.REPLY_NACKED
reply_doc = {"akey": "andstuff"}
self.db.new_record(request_id, request_doc, None, state, agent_id)
state = messaging_states.ReplyStates.ACKED
self.db.update_record(request_id, state, reply_doc=reply_doc)
res = self.db.lookup_req(request_id)
self.assertEqual(res.agent_id, agent_id)
self.assertEqual(res.request_id, request_id)
self.assertEqual(json.loads(res.reply_doc), reply_doc)
self.assertEqual(res.state, state)
def test_clear_all_lost(self):
request_id = str(uuid.uuid4())
agent_id = str(uuid.uuid4())
request_doc = {"request_id": request_id}
state = messaging_states.ReplyStates.ACKED
reply_doc = {"akey": "andstuff"}
self.db.new_record(request_id, request_doc, reply_doc, state, agent_id)
self.db.starting_agent()
res = self.db.lookup_req(request_id)
self.assertEqual(res.agent_id, agent_id)
self.assertEqual(res.request_id, request_id)
r = json.loads(res.reply_doc)
self.assertEqual(r["return_code"], 1)
self.assertEqual(res.state, messaging_states.ReplyStates.REPLY)
def test_clear_empty(self):
cut_off_time = datetime.datetime.now()
self.db.clean_all_expired(cut_off_time)
def test_clear_lost(self):
request_id1 = str(uuid.uuid4())
request_id2 = str(uuid.uuid4())
agent_id = str(uuid.uuid4())
request_doc = {"request_id": request_id1}
state = messaging_states.ReplyStates.ACKED
reply_doc = {"request": "one"}
self.db.new_record(
request_id1, request_doc, reply_doc, state, agent_id)
time.sleep(0.1)
cut_off_time = datetime.datetime.now()
reply_doc = {"request": "two"}
request_doc = {"request_id": request_id2}
self.db.new_record(
request_id2, request_doc, reply_doc, state, agent_id)
self.db.clean_all_expired(cut_off_time)
res = self.db.lookup_req(request_id1)
self.assertTrue(res is None)
res = self.db.lookup_req(request_id2)
self.assertTrue(res is not None)
class TestPersistDisk(unittest.TestCase):
def setUp(self):
_, self.db_file = tempfile.mkstemp("test_db")
self.db = persistence.SQLiteAgentDB(self.db_file)
def tearDown(self):
os.remove(self.db_file)
def test_record_sweeper(self):
request_id = str(uuid.uuid4())
agent_id = str(uuid.uuid4())
request_doc = {"request_id": request_id}
state = messaging_states.ReplyStates.REPLY_NACKED
self.db.new_record(request_id, request_doc, None, state, agent_id)
request_id2 = str(uuid.uuid4())
request_doc = {"request_id": request_id2}
self.db.new_record(request_id2, request_doc, None, state, agent_id)
cleaner = persistence.DBCleaner(self.db, 10, 10, 0.05)
cleaner.start()
time.sleep(0.5)
cleaner.done()
res = self.db.lookup_req(request_id)
self.assertTrue(not res)
res = self.db.lookup_req(request_id2)
self.assertTrue(not res)
def test_add_alert_db(self):
alert_time1 = int(time.time() * 1000)
time_received = int(time.time() * 1000)
level = 1
rule = 5000
subject = "Test alert"
message = "test message"
alert_hash1 = "madeup1"
alert_hash2 = "madeup2"
alert_time2 = int(time.time() * 1000)
x = self.db.add_alert(alert_time1, time_received,
alert_hash1, level, rule, subject, message)
y = self.db.add_alert(alert_time2, time_received,
alert_hash2, level, rule, subject, message)
latest_time = self.db.get_latest_alert_time()
self.assertEqual(latest_time, alert_time2)
class TestPersistMultiThread(unittest.TestCase):
def setUp(self):
_, self.db_file = tempfile.mkstemp("test_db")
self.db = persistence.SQLiteAgentDB(self.db_file)
def tearDown(self):
os.remove(self.db_file)
def test_thread_lookup(self):
request_id = str(uuid.uuid4())
agent_id = str(uuid.uuid4())
request_doc = {"request_id": request_id}
state = messaging_states.ReplyStates.REPLY_NACKED
self.db.new_record(request_id, request_doc, None, state, agent_id)
request_id2 = str(uuid.uuid4())
request_doc = {"request_id": request_id2}
self.db.new_record(request_id2, request_doc, None, state, agent_id)
failed = []
def _thread_lookup():
try:
res = self.db.lookup_req(request_id)
print(res)
except Exception as ex:
print(str(ex))
failed.append(True)
t = threading.Thread(target=_thread_lookup)
t.start()
t.join()
self.assertTrue(len(failed) == 0)
def test_thread_update(self):
request_id = str(uuid.uuid4())
agent_id = str(uuid.uuid4())
request_doc = {"request_id": request_id}
state = messaging_states.ReplyStates.REPLY_NACKED
self.db.new_record(request_id, request_doc, None, state, agent_id)
request_id2 = str(uuid.uuid4())
request_doc = {"request_id": request_id2}
self.db.new_record(request_id2, request_doc, None, state, agent_id)
failed = []
def _thread_lookup():
try:
res = self.db.update_record(
request_id, messaging_states.ReplyStates.REPLY)
print(res)
except Exception as ex:
print(str(ex))
failed.append(True)
t = threading.Thread(target=_thread_lookup)
t.start()
t.join()
self.assertTrue(len(failed) == 0)
def test_agent_mismatch(self):
request_id1 = str(uuid.uuid4())
request_id2 = str(uuid.uuid4())
agent_id1 = str(uuid.uuid4())
request_doc = {"request_id": request_id1}
state = messaging_states.ReplyStates.REPLY_NACKED
reply_doc = {"akey": "andstuff"}
self.db.new_record(
request_id1, request_doc, reply_doc, state, agent_id1)
request_doc["request_id"] = request_id2
self.db.new_record(
request_id2, request_doc, reply_doc, state, agent_id1)
res = self.db.lookup_req(request_id1)
self.assertTrue(res is not None)
res = self.db.lookup_req(request_id2)
self.assertTrue(res is not None)
self.db.check_agent_id("differentid")
res = self.db.lookup_req(request_id1)
self.assertTrue(res is None)
res = self.db.lookup_req(request_id2)
self.assertTrue(res is None)
def test_agent_id_cleanup_empty(self):
self.db.check_agent_id("differentid")
def test_agent_id_match(self):
request_id1 = str(uuid.uuid4())
request_id2 = str(uuid.uuid4())
agent_id = str(uuid.uuid4())
request_doc = {"request_id": request_id1}
state = messaging_states.ReplyStates.REPLY_NACKED
reply_doc = {"akey": "andstuff"}
self.db.new_record(
request_id1, request_doc, reply_doc, state, agent_id)
request_doc["request_id"] = request_id2
self.db.new_record(
request_id2, request_doc, reply_doc, state, agent_id)
res = self.db.lookup_req(request_id1)
self.assertTrue(res is not None)
res = self.db.lookup_req(request_id2)
self.assertTrue(res is not None)
self.db.check_agent_id(agent_id)
res = self.db.lookup_req(request_id1)
self.assertTrue(res is not None)
res = self.db.lookup_req(request_id2)
self.assertTrue(res is not None)
```
#### File: tests/unit/test_pubsub.py
```python
import unittest
import uuid
import dcm.agent.events.callback as events
import dcm.agent.events.pubsub as pubsub
import dcm.agent.tests.utils.general as test_utils
class TestPubSub(unittest.TestCase):
@classmethod
def setUpClass(cls):
test_utils.connect_to_debugger()
def setUp(self):
self._event_space = events.EventSpace()
self._pub_sub = pubsub.PubSubEvent(self._event_space)
def test_simple_publish(self):
topic = str(uuid.uuid4())
x_val = 1
y_val = []
apple_val = "sauce"
def test_callback(x_param, y_param, apple_param=None):
self.assertEqual(x_param, x_val)
self.assertEqual(y_param, y_val)
self.assertEqual(apple_param, apple_val)
y_val.append("called")
self._pub_sub.subscribe(topic, test_callback)
self._pub_sub.publish(topic,
topic_args=(x_val, y_val),
topic_kwargs={'apple_param': apple_val})
self._event_space.poll(timeblock=0.0)
self.assertEqual(len(y_val), 1)
def test_multiple_subscribe(self):
topic = str(uuid.uuid4())
x_val = []
def test_callback1(x_param):
x_param.append(1)
def test_callback2(x_param):
x_param.append(2)
def test_callback3(x_param):
x_param.append(3)
self._pub_sub.subscribe(topic, test_callback1)
self._pub_sub.subscribe(topic, test_callback2)
self._pub_sub.subscribe(topic, test_callback3)
self._pub_sub.publish(topic, topic_args=(x_val,))
self._event_space.poll(timeblock=0.0)
self.assertEqual(len(x_val), 3)
self.assertIn(1, x_val)
self.assertIn(2, x_val)
self.assertIn(3, x_val)
def test_public_empty(self):
topic = str(uuid.uuid4())
self._pub_sub.publish(topic)
self._event_space.poll(timeblock=0.0)
def test_unsubscribe(self):
topic = str(uuid.uuid4())
def test_callback():
pass
self._pub_sub.subscribe(topic, test_callback)
self._pub_sub.unsubscribe(topic, test_callback)
try:
self._pub_sub.unsubscribe(topic, test_callback)
passes = False
except KeyError:
passes = True
self.assertTrue(passes)
def test_done_callback(self):
topic = str(uuid.uuid4())
x_val = []
def test_callback1(x_param):
x_param.append(1)
def test_callback2(x_param):
x_param.append(2)
def test_callback3(x_param):
x_param.append(3)
def done_cb(topic_error, x_param=None):
self.assertEqual(len(x_param), 3)
self.assertIn(1, x_param)
self.assertIn(2, x_param)
self.assertIn(3, x_param)
self.assertIsNone(topic_error)
x_param.append("done")
self._pub_sub.subscribe(topic, test_callback1)
self._pub_sub.subscribe(topic, test_callback2)
self._pub_sub.subscribe(topic, test_callback3)
self._pub_sub.publish(topic,
topic_args=(x_val,),
done_cb=done_cb,
done_kwargs={'x_param': x_val})
self._event_space.poll(timeblock=0.0)
self.assertIn('done', x_val)
def test_done_error_callback(self):
topic = str(uuid.uuid4())
x_val = []
def test_callback1(x_param):
x_param.append(1)
def test_callback2(x_param):
raise Exception("error")
def test_callback3(x_param):
x_param.append(3)
def done_cb(topic_error, x_param=None):
self.assertLess(len(x_param), 3)
self.assertIsNotNone(topic_error)
x_param.append("done")
self._pub_sub.subscribe(topic, test_callback1)
self._pub_sub.subscribe(topic, test_callback2)
self._pub_sub.subscribe(topic, test_callback3)
self._pub_sub.publish(topic,
topic_args=(x_val,),
done_cb=done_cb,
done_kwargs={'x_param': x_val})
self._event_space.poll(timeblock=0.0)
self.assertIn('done', x_val)
```
#### File: tests/unit/test_system_stat.py
```python
import time
import unittest
import uuid
import dcm.agent.exceptions as exceptions
import dcm.agent.systemstats as systemstats
class TestSystemStats(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
systemstats.clean_up_all()
def test_get_system_stat_not_exists(self):
self.assertRaises(
exceptions.AgentOptionValueNotSetException,
systemstats.get_stats,
"somename")
def test_stop_system_stat_not_exists(self):
self.assertRaises(
exceptions.AgentOptionValueNotSetException,
systemstats.stop_stats,
"somename")
def test_start_system_stat_bad_type(self):
name = str(uuid.uuid4())
self.assertRaises(
exceptions.AgentOptionValueException,
systemstats.start_new_system_stat,
name,
"no_good",
10,
10.0)
def test_system_stat_happy_path_cpu_idle(self):
hold_count = 10
interval = 0.1
name = str(uuid.uuid4())
systemstats.start_new_system_stat(
name,
"cpu-idle",
hold_count,
interval)
time.sleep((hold_count + 2) * interval)
stats_d = systemstats.get_stats(name)
self.assertEqual(len(stats_d['status']), hold_count)
systemstats.stop_stats(name)
def test_system_stat_two_cpu_idle(self):
hold_count1 = 10
interval1 = 0.1
name1 = str(uuid.uuid4())
hold_count2 = int(hold_count1 / 2)
interval2 = interval1 * 2
name2 = str(uuid.uuid4())
systemstats.start_new_system_stat(
name1,
"cpu-idle",
hold_count1,
interval1)
systemstats.start_new_system_stat(
name2,
"cpu-idle",
hold_count2,
interval2)
time.sleep((hold_count1 + 2) * interval1)
stats_d = systemstats.get_stats(name1)
self.assertEqual(len(stats_d['status']), hold_count1)
time.sleep((hold_count2 + 2) * interval2)
stats_d = systemstats.get_stats(name2)
self.assertEqual(len(stats_d['status']), hold_count2)
systemstats.stop_stats(name1)
systemstats.stop_stats(name2)
def test_system_stat_stop_twice(self):
hold_count = 10
interval = 0.1
name = str(uuid.uuid4())
systemstats.start_new_system_stat(
name,
"cpu-idle",
hold_count,
interval)
systemstats.stop_stats(name)
self.assertRaises(
exceptions.AgentOptionValueNotSetException,
systemstats.stop_stats,
name)
class TestAgentVerboseSystemStats(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
systemstats.clean_up_all()
def test_basic_verbose_stat(self):
hold_count1 = 10
interval1 = 0.1
name1 = str(uuid.uuid4())
systemstats.start_new_system_stat(
name1,
"system-stats",
hold_count1,
interval1)
time.sleep((hold_count1 + 2) * interval1)
stats_d = systemstats.get_stats(name1)
self.assertEqual(len(stats_d['status']), hold_count1)
ent_1 = stats_d['status'][0]
self.assertIn('cpu-load', ent_1)
self.assertIn('net-bytes-in', ent_1)
self.assertIn('net-bytes-out', ent_1)
self.assertIn('disk-read-bytes', ent_1)
self.assertIn('disk-write-bytes', ent_1)
self.assertIn('disk-read-ops', ent_1)
self.assertIn('disk-write-ops', ent_1)
self.assertIn('timestamp', ent_1)
systemstats.stop_stats(name1)
def test_basic_read_ops(self):
hold_count1 = 10
interval1 = 0.1
name1 = str(uuid.uuid4())
systemstats.start_new_system_stat(
name1,
"disk-read-ops",
hold_count1,
interval1)
time.sleep((hold_count1 + 3) * interval1)
stats_d = systemstats.get_stats(name1)
self.assertEqual(len(stats_d['status']), hold_count1)
ent_1 = stats_d['status'][0]
self.assertIn('disk-read-ops', ent_1)
self.assertIn('timestamp', ent_1)
systemstats.stop_stats(name1)
def test_basic_write_ops(self):
hold_count1 = 10
interval1 = 0.1
name1 = str(uuid.uuid4())
systemstats.start_new_system_stat(
name1,
"disk-write-ops",
hold_count1,
interval1)
time.sleep((hold_count1 + 3) * interval1)
stats_d = systemstats.get_stats(name1)
self.assertEqual(len(stats_d['status']), hold_count1)
ent_1 = stats_d['status'][0]
self.assertIn('disk-write-ops', ent_1)
self.assertIn('timestamp', ent_1)
systemstats.stop_stats(name1)
def test_basic_read_bytes(self):
hold_count1 = 10
interval1 = 0.1
name1 = str(uuid.uuid4())
systemstats.start_new_system_stat(
name1,
"disk-read-bytes",
hold_count1,
interval1)
time.sleep((hold_count1 + 3) * interval1)
stats_d = systemstats.get_stats(name1)
self.assertEqual(len(stats_d['status']), hold_count1)
ent_1 = stats_d['status'][0]
self.assertIn('disk-read-bytes', ent_1)
self.assertIn('timestamp', ent_1)
systemstats.stop_stats(name1)
def test_basic_write_bytes(self):
hold_count1 = 10
interval1 = 0.1
name1 = str(uuid.uuid4())
systemstats.start_new_system_stat(
name1,
"disk-write-bytes",
hold_count1,
interval1)
time.sleep((hold_count1 + 3) * interval1)
stats_d = systemstats.get_stats(name1)
self.assertEqual(len(stats_d['status']), hold_count1)
ent_1 = stats_d['status'][0]
self.assertIn('disk-write-bytes', ent_1)
self.assertIn('timestamp', ent_1)
systemstats.stop_stats(name1)
def test_basic_net_in(self):
hold_count1 = 10
interval1 = 0.1
name1 = str(uuid.uuid4())
systemstats.start_new_system_stat(
name1,
"net-bytes-in",
hold_count1,
interval1)
time.sleep((hold_count1 + 3) * interval1)
stats_d = systemstats.get_stats(name1)
self.assertEqual(len(stats_d['status']), hold_count1)
ent_1 = stats_d['status'][0]
self.assertIn('net-bytes-in', ent_1)
self.assertIn('timestamp', ent_1)
systemstats.stop_stats(name1)
def test_basic_net_out(self):
hold_count1 = 10
interval1 = 0.1
name1 = str(uuid.uuid4())
systemstats.start_new_system_stat(
name1,
"net-bytes-out",
hold_count1,
interval1)
time.sleep((hold_count1 + 3) * interval1)
stats_d = systemstats.get_stats(name1)
self.assertEqual(len(stats_d['status']), hold_count1)
ent_1 = stats_d['status'][0]
self.assertIn('net-bytes-out', ent_1)
self.assertIn('timestamp', ent_1)
systemstats.stop_stats(name1)
```
#### File: tests/unit/test_utils.py
```python
import logging
import os
import tempfile
import unittest
import dcm.agent.config as config
from dcm.agent.plugins.api.utils import json_param_type
import dcm.agent.utils as utils
class TestUtils(unittest.TestCase):
def test_safe_delete_no_exists(self):
# test non existent file
rc = utils.safe_delete("no/such/file")
self.assertTrue(rc)
def test_get_conf_files(self):
osf, path = tempfile.mkstemp()
osf, path2 = tempfile.mkstemp()
os.environ["DCM_AGENT_CONF"] = path2
try:
file_list = config.get_config_files(conffile=path)
self.assertIn(path, file_list)
self.assertIn(path2, file_list)
finally:
utils.safe_delete(path)
utils.safe_delete(path2)
def test_stack_trace(self):
utils.build_assertion_exception(logging, "a message")
def test_json_params(self):
res = json_param_type(None)
self.assertIsNone(res)
res = json_param_type("null")
self.assertIsNone(res)
res = json_param_type('{"x": 1}')
self.assertTrue('x' in res.keys())
self.assertEqual(res['x'], 1)
res = json_param_type({"x": 1})
self.assertTrue('x' in res.keys())
self.assertEqual(res['x'], 1)
```
#### File: tests/utils/test_connection.py
```python
import json
import logging
import threading
import dcm.agent.connection.connection_interface as conniface
import dcm.agent.messaging.utils as utils
import dcm.agent.messaging.types as message_types
import dcm.agent.tests.utils.test_exceptions as test_exceptions
from dcm.agent.events.globals import global_space as dcm_events
_g_logger = logging.getLogger(__name__)
class RequestRetransmission(object):
def __init__(self):
self.request_doc = None
self._event_retrans_map = {
message_types.MessageTypes.REQUEST: 0,
message_types.MessageTypes.ACK: 0,
message_types.MessageTypes.NACK: 0,
message_types.MessageTypes.REPLY: 0}
def set_retrans_event(self, event, count):
if event not in self._event_retrans_map:
raise Exception("This event doesnt exist")
self._event_retrans_map[event] = count
def should_retrans(self, event):
if event not in self._event_retrans_map:
raise Exception("This event doesnt exist")
if self._event_retrans_map[event] < 1:
return False
self._event_retrans_map[event] -= 1
return True
def set_request_doc(self, doc):
self.request_doc = doc
class TestConnection(conniface.ConnectionInterface):
def __init__(self, reader, writer, reply_ignore_count=0,
retrans_requests=None):
# a file like object that is full of command arguments. space separated
self._reader = reader
self._writer = writer
self._reply_ignore_count = reply_ignore_count
self._retrans = retrans_requests
if self._retrans is None:
self._retrans = []
self._request_number = 0
self._retrans_map = {}
self._lock = threading.Lock()
def _read_from_file(self):
buf = self._reader.readline().strip()
if not buf:
return
_g_logger.debug("read message " + buf)
ba = buf.split()
command = ba.pop(0)
arguments = ba
message_id = utils.new_message_id()
request_id = utils.new_message_id()
request_doc = {
'type': message_types.MessageTypes.REQUEST,
'request_id': request_id,
'message_id': message_id,
'payload': {'command': command, 'arguments': arguments}
}
# check for any retrans requests of this message
if len(self._retrans) > self._request_number:
rt = self._retrans[self._request_number]
self._retrans_map[request_id] = rt
rt.set_request_doc(request_doc)
self._request_number += 1
self._check_retrans(request_id, message_types.MessageTypes.REQUEST)
dcm_events.register_callback(
self.recv_obj.incoming_parent_q_message, args=[request_doc])
def set_receiver(self, receive_object):
"""
Read 1 packet from the connection. 1 complete json doc.
"""
self.recv_obj = receive_object
self._read_from_file()
def incoming_parent_q_message(self, request_id, msg):
self._read_from_file()
self.recv_obj.incoming_parent_q_message(msg)
def _check_retrans(self, request_id, event):
if request_id in self._retrans_map:
retrans = self._retrans_map[request_id]
if retrans.should_retrans(event):
dcm_events.register_callback(
self.incoming_parent_q_message,
args=[request_id, retrans.request_doc])
def connect(self, receive_callback, handshake_manager):
pass
def close(self):
pass
def send(self, doc):
with self._lock:
t = doc['type']
request_id = doc['request_id']
_g_logger.debug("Fake conn sending " + t)
self._check_retrans(request_id, t)
if t == message_types.MessageTypes.ACK:
# no reply required here
return
elif t == message_types.MessageTypes.NACK:
# no reply required here
return
elif t == message_types.MessageTypes.REPLY:
payload = doc['payload']
self._writer.write(json.dumps(payload) + '\n')
self._writer.flush()
if self._reply_ignore_count == 0:
# we must ACK the reply
reply_ack = {
"type": message_types.MessageTypes.ACK,
"request_id": doc["request_id"],
"message_id": doc["message_id"],
}
dcm_events.register_callback(
self.incoming_parent_q_message,
args=[doc["request_id"], reply_ack])
else:
self._reply_ignore_count -= 1
else:
raise test_exceptions.AgentTestException(
"type %s should never happen" % t)
class ReplyConnection(object):
def __init__(self):
pass
def send(self, doc):
dcm_events.register_callback(
self._request.incoming_message, args=[doc])
def set_request_side(self, request):
self._request = request
def close(self):
pass
class RequestConnection(object):
def __init__(self):
pass
def send(self, doc):
self._rl.incoming_parent_q_message(doc)
def set_request_listener(self, rl):
self._rl = rl
def close(self):
pass
``` |
{
"source": "jpwoeltjen/hfhd",
"score": 3
} |
#### File: hfhd/hfhd/hf.py
```python
import numpy as np
import pandas as pd
from hfhd import hd
import numba
from numba import prange
import warnings
def refresh_time(tick_series_list):
r"""
The all-refresh time scheme of Barndorff-Nielsen et al. (2011).
If this function is applied to two assets at a time, it becomes the
pairwise-refresh time. The function is accelerated via JIT compilation
with Numba.
Parameters
----------
tick_series_list : list of pd.Series
Each pd.Series contains tick prices of one asset with datetime index.
Returns
-------
out : pd.DataFrame
Synchronized previous ticks according to the refresh-time scheme.
Notes
-----
Multivariate estimators require synchronization of the time series.
This can be achieved via a grid. A grid is a subset of $[0, T]$ and it is
defined as
\begin{equation}
\mathcal{V}=\left\{v_{0}, v_{1}, \ldots, v_{\tilde{n}}\right\}\subset[0, T]
\end{equation}
with $v_{0}=0$ and $v_{\tilde{n}}=T,$ where $\tilde{n}$ is the sampling
frequency, i.e., the number of grid intervals. Two prominent ways to
specify the grid are (i) a regular grid, where $v_{m}-v_{m-1}=\Delta v,
\text{ for } m=1, \ldots, \tilde{n}$, and (ii) a grid based on
'refresh times' of Barndorff et al. (2011), where the grid spacing is
dependent on the observation times. If more than two assets are considered,
refresh times can be further classified into 'all-refresh-times' and
'pairwise-refresh times'. Estimators based on pairwise-refresh times use
the data more efficiently but the integrated covariance matrix estimate
might not be positive definite. The pairwise-refresh time
$\mathcal{V}_p=\left\{v_{0}, v_{1}, \ldots, v_{\tilde{n}}\right\}$ can be
obtained by setting $v_{0}=0,$ and
\begin{equation}
v_{m}=\max \left\{\min \left\{t^{(k)}_i
\in t^{(k)}: t^{(k)}_i > v_{m-1}\right\},\min \left\{t^{(l)}_i
\in t^{(l)}: t^{(l)}_i > v_{m-1}\right\}\right\}
\end{equation}
where $\tilde{n}$ is the total number of refresh times in the interval
$(0,1].$ This scheme is illustrated in the figure. The
procedure has to be repeated for every asset pair. In contrast, the
all-refresh time scheme uses a single grid for all assets, which is
determined based on the trade time of the slowest asset of each grid
interval. Hence, the spacing of grid elements can be much wider. This
implies that estimators based on the latter scheme may discard a large
proportion of the data, especially if there is a very slowly trading asset.
In any case,
there has to be at least one observation time of each asset between any two
grid elements. With that condition in mind, the 'previous tick time' of
asset $j$ is defined as
\begin{equation}
\tau^{(j)}_m=\max \left\{ t^{(j)}_i \in t^{(j)}:
t^{(j)}_i \leq v_{m}\right\}
\end{equation}
The following diagram illustrates the scheme for two assets, $k$ and $l$.
.. tikz::
\draw
(0,1.75) -- (11,1.75)
(0,-0.75) -- (11,-0.75)
(0,1.5) -- (0,2)
(1.9,1.5) -- (1.9,2)
(3.5,1.5) -- (3.5,2)
(5,1.5) -- (5,2)
(6.5,1.5) -- (6.5,2)
(8,1.5) -- (8,2)
(10.8,1.5) -- (10.8,2)
(0,-0.5) -- (0,-1)
(1.9,-0.5) -- (1.9,-1)
(5.7,-0.5) -- (5.7,-1)
(10.3,-0.5) -- (10.3,-1);
\draw[dashed,gray]
(0,3.75) -- (0,-2.75) node[below] {$\nu_0=0$}
(1.9,3.75) -- (1.9,-2.75) node[below] {$\nu_1$}
(5.7,3.75) -- (5.7,-2.75) node[below] {$\nu_2$}
(9.5,3.75) -- (9.5,-2.75) node[below] {$t_{3}^{(l)}=
\tau_{3}^{(l)}=\nu_3 = T$};
\draw[dashed] (11,1.75) -- (12,1.75)
(11,-0.75) -- (12,-0.75);
\draw[very thick] (9.5,-1.4) -- (9.5,0.25)
(9.5,0.8) -- (9.5,2.4);
\draw
(0,1) node{$t_{0}^{(k)} = \tau_{0}^{(k)}$}
(1.9,1) node{$t_{1}^{(k)} = \tau_{1}^{(k)}$}
(3.5,1) node{$t_{2}^{(k)} $}
(5,1) node{$t_{3}^{(k)} = \tau_{2}^{(k)}$}
(6.5,1) node{$t_{4}^{(k)}$}
(8,1) node{$t_{5}^{(k)}= \tau_{3}^{(k)}$}
(11,1) node{$t_{6}^{(k)}$}
(9.5,0.5) node{\textbf{$T$}}
(0,0) node{$t_{0}^{(l)} = \tau_{0}^{(l)}$}
(1.9,0) node{$t_{1}^{(l)} = \tau_{1}^{(l)}$}
(5.7,0) node{$t_{2}^{(l)}= \tau_{2}^{(l)}$}
(10.3,0) node{$t_{4}^{(l)}$};
\draw
(0,1.75) node[left,xshift=-0pt]{$X^{(k)}$}
(0,-0.75) node[left,xshift=-0pt]{$X^{(l)}$};
\draw[decorate,decoration={brace,amplitude=12pt}]
(0,2)--(1.9,2) node[midway, above,yshift=10pt,]
{$ \Delta X_{\tau^{(k)}_1}^{(k)}$};
\draw[decorate,decoration={brace,amplitude=12pt}]
(1.9,2)--(5,2) node[midway, above,yshift=10pt,]
{$ \Delta X_{\tau^{(k)}_2}^{(k)}$};
\draw[decorate,decoration={brace,amplitude=12pt}]
(5,2)--(8,2) node[midway, above,yshift=10pt,]
{$ \Delta X_{\tau^{(k)}_3}^{(k)}$};
\draw[decorate,decoration={brace,amplitude=12pt}]
(9.5,-1)--(5.7,-1) node[midway, below,yshift=-10pt,]
{$ \Delta X_{\tau^{(l)}_3}^{(l)}$};
\draw[decorate,decoration={brace,amplitude=12pt}]
(5.7,-1)--(1.9,-1) node[midway, below,yshift=-10pt,]
{$ \Delta X_{\tau^{(l)}_2}^{(l)}$};
\draw[decorate,decoration={brace,amplitude=12pt}]
(1.9,-1)--(0,-1) node[midway, below,yshift=-10pt,]
{$ \Delta X_{\tau^{(l)}_1}^{(l)}$};
References
----------
<NAME>., <NAME>., <NAME> <NAME>. (2011).
Multivariate realised kernels: consistent positive semi-definite
estimators of the covariation of equity prices with noise and
non-synchronous trading, Journal of Econometrics 162(2): 149–169.
Examples
--------
>>> np.random.seed(0)
>>> n = 20
>>> returns = np.random.multivariate_normal([0, 0], [[1,0.5],[0.5,1]], n)/n**0.5
>>> prices = np.exp(returns.cumsum(axis=0))
>>> # sample n/2 (non-synchronous) observations of each tick series
>>> series_a = pd.Series(prices[:, 0]).sample(int(n/2)).sort_index().rename('a')
>>> series_b = pd.Series(prices[:, 1]).sample(int(n/2)).sort_index().rename('b')
>>> previous_ticks = refresh_time([series_a, series_b])
>>> np.round(previous_ticks.values,4)
array([[0.34 , 0.4309],
[0.2317, 0.4313],
[0.1744, 0.4109],
[0.1336, 0.3007],
[0.1383, 0.4537],
[0.1292, 0.1665],
[0.0936, 0.162 ]])
"""
if (len(tick_series_list) < 2):
raise ValueError(
'tick_series_list should be a list containing at least two pd.Series.')
indeces = tuple([np.array(x.dropna().index, dtype='uint64') for x in tick_series_list])
values = tuple([x.dropna().to_numpy(dtype='float64') for x in tick_series_list])
rt_data, index = _refresh_time(indeces, values)
index = pd.to_datetime(index)
return pd.DataFrame(rt_data, index=index).dropna()
@numba.njit
def _refresh_time(indeces, values):
"""
The computationally expensive iteration of :func:`~refresh_time`
is accelerated with Numba.
Parameters
----------
indeces : a tuple or list of numpy.ndarrays, int64
The length is equal to the number of assets. Each numpy.ndarray contains
the unix time of ticks of one asset.
values : a tuple or list of numpy.ndarrays, float64
Each numpy.ndarray contains the prices of ticks of one asset.
Returns
-------
merged_values : numpy.ndarray
Synchronized previous ticks according to the refresh-time scheme.
merged_index
The refresh times.
"""
# get a sorted main index with all unique trade times
merged_index = indeces[0]
for index in indeces[1:]:
merged_index = np.append(merged_index, index)
merged_index = np.sort(np.unique(merged_index))
# Initialize the merged_values array.
merged_values = np.empty((merged_index.shape[0], len(values)))
merged_values[:, :] = np.nan
# Initialize the values array. These are the previous ticks.
last_values = np.empty(merged_values.shape[1])
last_values[:] = np.nan
for i in range(merged_values.shape[0]):
for j in range(merged_values.shape[1]):
index = indeces[j]
loc = np.searchsorted(index, merged_index[i])
# if there was a trade of asset j update the last_value
# make sure that loc < values[j].shape[0] since numba
# will not raise an out-of-bounds error but will put some
# random value currently in memory.
if index[loc] == merged_index[i] and loc < values[j].shape[0]:
last_values[j] = values[j][loc]
# if all assets traded at least once since the last refresh
# time, a new grid point is formed and the clock starts anew.
if not np.isnan(last_values).any():
merged_values[i, :] = last_values
last_values[:] = np.full_like(last_values, np.nan)
return merged_values, merged_index
def preaverage(data, K=None, g=None, return_K=False):
r"""
The preaveraging scheme of Podolskij and Vetter (2009). It uses the fact
that if the noise is i.i.d with zero mean, then averaging a rolling window
of (weighted) returns diminishes the effect of microstructure noise on the
variance estimate.
Parameters
----------
data : pd.Series or pd.DataFrame
A time series of log-returns. If multivariate, the time series
has to be synchronized (e.g. with :func:`~refresh_time`).
K : int, default = ``None``
The preaveraging window length. ``None``
implies :math:`K=0.4 n^{1/2}` is chosen as recommended in
Hautsch & Podolskij (2013).
g : function, default = ``None``
A weighting function. ``None`` implies
:math:`g(x) = min(x, 1-x)` is chosen.
Returns
-------
data_pa : pd.Series
The preaveraged log-returns.
Notes
-----
The preaveraged log-returns using the window-length :math:`K` are given by
.. math::
\begin{equation}
\begin{aligned}
\bar{\mathbf{Y}}_{i}=\sum_{j=1}^{K-1} g\left(\frac{j}{K}\right)
\Delta_{i-j+1}\mathbf{Y}, \quad \text { for } i=K, \ldots, n,
\end{aligned}
\end{equation}
where :math:`\mathbf{Y}_i` have been synchronized beforehand, for example
with :func:`~refresh_time`. Note that the direction of the moving window
has been reversed compared to the definition in Podolskij and Vetter (2009)
to stay consistent within the package. :math:`g` is a weighting function.
A popular choice is
.. math::
\begin{equation}
g(x)=\min (x, 1-x).
\end{equation}
References
----------
<NAME>., <NAME>., 2009.
Estimation of volatility functionals in the simultaneous presence of
microstructure noise and jumps.
Bernoulli 15 (3), 634–658.
"""
index = data.index
# if univariate add axis
if len(data.shape) == 1:
data = data.to_numpy()[:, None]
else:
data = data.to_numpy()
n, p = data.shape
if K is None:
K = int(np.sqrt(n)*0.4)
if g is None:
g = _numba_minimum
weight = g(np.arange(1, K)/K)
data_pa = _preaverage(data, weight)
if p == 1:
data_pa = pd.Series(data_pa.flatten(), index=index)
else:
data_pa = pd.DataFrame(data_pa, index=index)
if return_K:
return data_pa, K
else:
return data_pa
@numba.njit(cache=False, parallel=False, fastmath=False)
def _preaverage(data, weight):
"""
Preaverage an observation matrix with shape = (n, p) given a weight vector
with shape = (K-1, p).
Parameters
----------
data : numpy.ndarray, shape = (n, p)
The observation matrix of synchronized log-returns.
weight : numpy.ndarray, shape = (K-1, )
The weight vector, looking back K -2 time steps.
Returns
-------
data_pa : numpy.ndarray, shape = (n, p)
The preaveraged returns.
"""
n, p = data.shape
K = weight.shape[0] + int(1)
data_pa = np.full_like(data, np.nan)
for i in prange(K-1, n):
for j in range(p):
data_pa[i, j] = np.dot(weight, data[i-K+2:i+1, j])
return data_pa
@numba.njit
def _upper_triangular_indeces(p):
"""Get the upper triangular indeces of a square matrix. int16 should
suffice for even the largest ``p`` encountered in practice.
Parameters
----------
p : int
The dimension of the square matrix.
Returns
-------
idx : numpy.ndarray, shape(int((p*(p+1)/2), 2)
The array of indeces. i in zeroth column, j in first column.
"""
s = 0
idx = np.zeros((int((p*(p+1)/2)), 2), dtype=np.int16)
for i in range(p):
for j in range(i, p):
idx[s] = i, j
s += 1
if idx[-1, 0] <= 0:
raise ValueError("Got negative index, ``p`` probably too large for int16")
return idx
def _get_indeces_and_values(tick_series_list):
"""
Get indeces and values each as 2d numpy.ndarray from a list of
pd.Series.
Parameters
----------
tick_series_list : list of pd.Series
Each pd.Series contains ticks of one asset with datetime index.
Returns
-------
indeces : numpy.ndarray, dtype='uint64', shape = (p, n_max)
where p is the number of assets and n_max is the length of the
longest pd.Series.
values : numpy.ndarray, dtype='float64', shape = (p, n_max)
where p is the number of assets and n_max is the length of the
longest pd.Series.
"""
n_max = np.max([len(x) for x in tick_series_list])
indeces = np.empty((len(tick_series_list), n_max), dtype='uint64')
indeces[:, :] = np.nan
values = np.empty((len(tick_series_list), n_max), dtype='float64')
values[:, :] = np.nan
for i, x in enumerate(tick_series_list):
idx = np.array(x.dropna().index, dtype='uint64')
v = np.array(x.dropna().to_numpy(), dtype='float64')
indeces[i, :idx.shape[0]] = idx[:]
values[i, :idx.shape[0]] = v[:]
return indeces, values
def get_cumu_demeaned_resid(price, y_hat=None):
r"""
From a pd.Series of tick prices and predictions get a pd.Series of
tick log-prices with zero-mean returns, i.e. the reconstructed
log-prices from de-meaned log-return residuals. These log-prices are inputs
to the integrated covariance matrix estimators.
Parameters
----------
series : pd.Series
Tick prices of one asset with datetime index.
y_hat : pd.Series
The predictions.
Returns
-------
out : pd.Series
Log-prices corresponding to zero-mean returns.
"""
y = np.log(price.dropna()).diff()
resid = y - y.mean()
if y_hat is not None:
resid -= y_hat - y_hat.mean()
return resid.cumsum()
def msrc(tick_series_list, M=None, N=None, pairwise=True):
r"""
The multi-scale realized volatility (MSRV) estimator of Zhang (2006).
It is extended to multiple dimensions following Zhang (2011).
If ``pairwise=True`` estimate correlations with pairwise-refresh time
previous ticks and variances with all available ticks for each asset.
Parameters
----------
tick_series_list : list of pd.Series
Each pd.Series contains tick-log-prices of one asset
with datetime index. Must not contain nans.
M : int, >=1, default=None
The number of scales
If ``M=None`` all scales :math:`i = 1, ..., M` are used, where M is
chosen :math:`M = n^{1/2}` acccording to Eqn (34) of Zhang (2006).
N : int, >=0, default=None
The constant $N$ of Tao et al. (2013)
If ``N=None`` :math:`N = n^{1/2}`. Lam and Qian (2019) need
:math:`N = n^{2/3}` for non-sparse integrated covariance matrices,
in which case the rate of convergence reduces to $n^{1/6}$.
pairwise : bool, default=True
If ``True`` the estimator is applied to each pair individually. This
increases the data efficiency but may result in an estimate that is
not p.s.d.
Returns
-------
out : numpy.ndarray
The mrc estimate of the integrated covariance matrix.
Examples
--------
>>> np.random.seed(0)
>>> n = 200000
>>> returns = np.random.multivariate_normal([0, 0], [[1,0.5],[0.5,1]], n)/n**0.5
>>> prices = 100*np.exp(returns.cumsum(axis=0))
>>> # add Gaussian microstructure noise
>>> noise = 10*np.random.normal(0, 1, n*2).reshape(-1, 2)*np.sqrt(1/n**0.5)
>>> prices +=noise
>>> # sample n/2 (non-synchronous) observations of each tick series
>>> series_a = pd.Series(prices[:, 0]).sample(int(n/2)).sort_index()
>>> series_b = pd.Series(prices[:, 1]).sample(int(n/2)).sort_index()
>>> # get log prices
>>> series_a = np.log(series_a)
>>> series_b = np.log(series_b)
>>> icov = msrc([series_a, series_b], M=1, pairwise=False)
>>> icov_c = msrc([series_a, series_b])
>>> # This is the biased, uncorrected integrated covariance matrix estimate.
>>> np.round(icov, 3)
array([[11.553, 0.453],
[ 0.453, 2.173]])
>>> # This is the unbiased, corrected integrated covariance matrix estimate.
>>> np.round(icov_c, 3)
array([[0.985, 0.392],
[0.392, 1.112]])
Notes
-----
Realized variance estimators based on multiple scales exploit the
fact that the proportion of the observed realized variance over a
specified interval due to microstructure noise increases with the sampling
frequency, while the realized variance of the true underlying process stays
constant. The bias can thus be corrected by subtracting a high frequency
estimate, scaled by an optimal weight, from a medium frequency estimate.
The weight is chosen such that the large bias in the high frequency
estimate, when scaled by the weight, is exactly equal to the medium bias,
and they cancel each other out as a result.
By considering $M$ time scales, instead of just two as in :func:`~tsrc`,
Zhang2006 improves the rate of convergence to $n^{-1 / 4}$.
This is the best attainable rate of convergence in this setting.
The proposed multi-scale realized volatility (MSRV) estimator is defined as
\begin{equation}
\langle\widehat{X^{(j)}, X^{(j)}}\rangle^{(MSRV)}_T=\sum_{i=1}^{M}
\alpha_{i}[Y^{(j)}, Y^{(j)}]^{\left(K_{i}\right)}_T
\end{equation}
where $\alpha_{i}$ are weights satisfying
\begin{equation}
\begin{aligned}
&\sum \alpha_{i}=1\\
&\sum_{i=1}^{M}\left(\alpha_{i} / K_{i}\right)=0
\end{aligned}
\end{equation}
The optimal weights for the chosen number of scales $M$, i.e.,
the weights that minimize the noise variance contribution, are given by
\begin{equation}
a_{i}=\frac{K_{i}\left(K_{i}-\bar{K}\right)}
{M \operatorname{var}\left(K\right)},
\end{equation}
where
%$\bar{K}$ denotes the mean of $K$.
$$\bar{K}=\frac{1}{M} \sum_{i=1}^{M} K_{i} \quad \text { and } \quad
\operatorname{var}\left(K\right)=\frac{1}{M}
\sum_{i=1}^{M} K_{i}^{2}-\bar{K}^{2}.
$$
If all scales are chosen, i.e., $K_{i}=i$, for $i=1, \ldots, M$, then
$\bar{K}=\left(M+1\right) / 2$ and $\operatorname{var}\left(K\right)=
\left(M^{2}-1\right) / 12$, and hence
\begin{equation}
a_{i}=12 \frac{i}{M^{2}} \frac{i / M-1 / 2-1 /
\left(2 M\right)}{1-1 / M^{2}}.
\end{equation}
In this case, as shown by the author in Theorem 4, when $M$ is chosen
optimally on the order of $M=\mathcal{O}(n^{1/2})$, the estimator is
consistent at rate $n^{-1/4}$.
References
----------
<NAME>. (2006).
Efficient estimation of stochastic volatility using
noisy observations: A multi-scale approach,
Bernoulli 12(6): 1019–1043.
<NAME>. (2011).
Estimating covariation: Epps effect, microstructure noise,
Journal of Econometrics 160.
"""
if pairwise:
indeces, values = _get_indeces_and_values(tick_series_list)
cov = _msrc_pairwise(indeces, values, M, N)
else:
data = refresh_time(tick_series_list)
data = data.to_numpy().T
if data.ndim == 1:
data = data.reshape(1, -1)
cov = _msrc(data, M, N)
return cov
@numba.njit(fastmath=False, parallel=False)
def _get_YY_m(Y, N, m):
Km = N + m
log_rets = Y[:, Km:] - Y[:, :-Km]
return log_rets @ log_rets.T / Km
@numba.njit(fastmath=False, parallel=False)
def _msrc(data, M, N):
r"""
The inner function of :func:`~msrc`, not pairwise. The multi-scale realized
volatility (MSRV) estimator of Zhang (2006). It is extended to multiple
dimensions following Zhang (2011).
Parameters
----------
data : numpy.ndarray, >0, shape = (p, n)
previous tick prices with dimensions p by n, where
p = #assets, n = #number of refresh times, most recent tick on the
right, must be synchronized (e.g. with :func:`~refresh_time`).
M : int, >=1
The number of scales.
If ``M=None`` all scales :math:`i = 1, ..., M` are used, where M is
chosen :math:`M = n^{1/2}` acccording to Eqn (34) of Zhang (2006).
N : int, >=0
The constant $N$ of Tao et al. (2013)
If ``N=None`` :math:`N = n^{1/2}`. Lam and Qian (2019) need
:math:`N = n^{2/3}` for non-sparse integrated covariance matrices,
in which case the rate of convergence reduces to $n^{1/6}$.
Returns
-------
out : numpy.ndarray
The mrc estimate of the integrated covariance matrix.
Examples
--------
# >>> np.random.seed(0)
# >>> n = 200000
# >>> returns = np.random.multivariate_normal([0, 0], [[1,0.5],[0.5,1]], n)/n**0.5
# >>> prices = 100*np.exp(returns.cumsum(axis=0))
# >>> # add Gaussian microstructure noise
# >>> noise = 10*np.random.normal(0, 1, n*2).reshape(-1, 2)*np.sqrt(1/n**0.5)
# >>> prices +=noise
# >>> # sample n/2 (non-synchronous) observations of each tick series
# >>> series_a = pd.Series(prices[:, 0]).sample(int(n/2)).sort_index()
# >>> series_b = pd.Series(prices[:, 1]).sample(int(n/2)).sort_index()
# >>> pt = refresh_time([series_a, series_b])
# >>> icov = _msrc(pt.values.T, K=np.array([1]))
# >>> icov_c = _msrc(pt.values.T)
# >>> # This is the biased uncorrected integrated covariance matrix estimate.
# >>> icov
# array([[11.55288112, 0.45281646],
# [ 0.45281646, 2.17269871]])
# >>> # This is the unbiased corrected integrated covariance matrix estimate.
# >>> icov_c
# array([[0.89731589, 0.48705002],
# [0.48705002, 0.9801241 ]])
# >>> # In the univariate case we add an axis
# >>> univariate_ticks = series_a.values[:, None]
# >>> ivar_c = _msrc(univariate_ticks.T)
# >>> ivar_c
# array([[0.90361064]])
"""
p, n = data.shape
if M is None:
# Opt M according to Eqn (34) of Zhang (2006)
M = int(np.ceil(n**(1/2)))
if N is None:
# N according to Fan and Wang (2007)
N = int(np.ceil(n**(1/2)))
# N according to Lam and Wang (2019)
# N = int(np.ceil(n**(2/3)))
s = np.zeros((p, p))
if M > 1:
for m in range(1, M+1):
# optimal weights according to Eqn (18)
a = 12*(m + N)*(m - M/2 - 1/2) / (M*(M**2 - 1))
YY_m = _get_YY_m(data, N, m)
s += a * YY_m
zeta = (M + N)*(N + 1)/((n + 1)*(M - 1))
YY_K1 = _get_YY_m(data, N, 1)
YY_KM = _get_YY_m(data, N, M)
s += zeta * (YY_K1 - YY_KM)
else:
s += _get_YY_m(data, 0, 1)
return s
@numba.njit(cache=False, parallel=True)
def _msrc_pairwise(indeces, values, M=None, N=None):
"""
Accelerated inner function of pairwise :func:`msrc`.
Parameters
----------
indeces : numpy.ndarray, shape(p, n_max), dtype='uint64'
The length is equal to the number of assets. Each 'row' contains
the unix time of ticks of one asset.
values : numpy.ndarray, shape(p, n_max), dtype='float64'>0
Each 'row' contains the log-prices of ticks of one asset.
K : numpy.ndarray
An array of sclales.
Returns
-------
cov : numpy.ndarray, 2d
The integrated ovariance matrix using the pairwise synchronized data.
"""
p = indeces.shape[0]
cov = np.ones((p, p))
# don't loop over ranges but get all indeces in advance
# to improve parallelization.
idx = _upper_triangular_indeces(p)
for t in prange(len(idx)):
i, j = idx[t, :]
# get the number of no nan values for asset i and j.
# This is needed since nan is not defined
# for int64, which are in the indeces. Hence, I use the fact that
# values and indeces have the same shape and nans are only at the
# end of an array.
n_not_nans_i = values[i][~np.isnan(values[i])].shape[0]
n_not_nans_j = values[i][~np.isnan(values[j])].shape[0]
if i == j:
cov[i, i] = _msrc(values[i, :n_not_nans_i].reshape(1, -1), M, N)[0, 0]
else:
merged_values, _ = _refresh_time(
(indeces[i, :n_not_nans_i],
indeces[j, :n_not_nans_j]),
(values[i, :n_not_nans_i],
values[j, :n_not_nans_j]))
# numba doesn't support boolean indexing of 2d array
merged_values = merged_values.flatten()
merged_values = merged_values[~np.isnan(merged_values)]
merged_values = merged_values.reshape(-1, 2)
cov[i, j] = _msrc(merged_values.T, M, N)[0, 1]
cov[j, i] = cov[i, j]
return cov
def tsrc(tick_series_list, J=1, K=None):
r"""
The two-scales realized volatility (TSRV) of
Zhang et al. (2005). It is extentended to handle multiple dimension
according to Zhang (2011). :func:`~msrc` has better convergence
rate and is thus prefrerred.
Parameters
----------
tick_series_list : list of pd.Series
Each pd.Series contains tick-log-prices of one asset
with datetime index.
K : int, default = ``int(n**(2/3))``
long scale, default = ``int(n**(2/3))`` as per Zhang (2005)
J : int, default = 1
short scale
Returns
-------
out : numpy.ndarray
The TSRV estimate.
Examples
--------
>>> np.random.seed(0)
>>> n = 200000
>>> returns = np.random.multivariate_normal([0, 0], [[1, 0.5],[0.5, 1]], n)/n**0.5
>>> prices = 100*np.exp(returns.cumsum(axis=0))
>>> # add Gaussian microstructure noise
>>> noise = 10*np.random.normal(0, 1, n*2).reshape(-1, 2)*np.sqrt(1/n**0.5)
>>> prices += noise
>>> # sample n/2 (non-synchronous) observations of each tick series
>>> series_a = pd.Series(prices[:, 0]).sample(int(n/2)).sort_index()
>>> series_b = pd.Series(prices[:, 1]).sample(int(n/2)).sort_index()
>>> # take logs
>>> series_a = np.log(series_a)
>>> series_b = np.log(series_b)
>>> icov_c = tsrc([series_a, series_b])
>>> # This is the unbiased, corrected integrated covariance matrix estimate.
>>> np.round(icov_c, 3)
array([[0.995, 0.361],
[0.361, 0.977]])
Notes
-----
The two-scales realized volatility (TSRV) estimator is defined as
\begin{equation}
\widehat{\langle X^{(j)}, X^{(j)}\rangle}^{(\mathrm{TSRV})}_{T}=
\left[Y^{(j)}, Y^{(j)}\right]_{T}^{(K)}-\frac{\bar{n}_{K}}{\bar{n}_{J}}
\left[Y^{(j)}, Y^{(j)}\right]_{T}^{(J)},
\end{equation}
where
\begin{equation}
\left[Y^{(j)}, Y^{(j)}\right]_{T}^{(K)}=\frac{1}{K}
\sum_{i=K}^{n}\left(Y_{\tau_{i}^{(j)}}^{(j)}-
Y_{\tau_{i-K}^{(j)}}^{(j)}\right)^2,
\end{equation}
with $K$ being a positive integer usually chosen much larger than 1 and
$\bar{n}_{K}=\left(n-K+1\right)/K$ and $\bar{n}_{J}=\left(n- J+1\right)/J$.
If $K$ is chosen on the order of$K=\mathcal{O}\left(n^{2 / 3}\right)$ this
estimator is asymptotically unbiased, consistent, asymptotically normal
distributed and converges at rate $n^{-1 / 6}$.
Zhang (2011) proposes the (multivariate) two scales realized covariance
(TSCV) estimator based on previous-tick times of asset $k$ and $l$,
which simultaneously corrects for the bias due to asynchronicity and the
bias due to microstructure noise. Previous-tick times may be computed via
:func:`~refresh_time`.
The TSCV estimator is defined as
\begin{equation}
\widehat{\langle X^{(k)},X^{(l)}\rangle}_{T}^{(TSCV)}=c\left(\left[Y^{(k)},
Y^{(l)}\right]_{T}^{(K)}-\frac{\bar{n}_{K}}{\bar{n}_{J}}\left[Y^{(k)},
Y^{(l)}\right]_{T}^{(J)}\right),
\end{equation}
where
\begin{equation}
\left[Y^{(k)}, Y^{(l)}\right]_{T}^{(K)}=\frac{1}{K}
\sum_{i=K}^{\tilde{n}}\left(Y^{(k)}_{\tau^{(k)}_{i}}-Y^{(k)}_{\tau^{(k)}_{i-K}}
\right)\left(Y^{(l)}_{\tau^{(l)}_{i}}-Y^{(l)}_{\tau^{(l)}_{i-K}}\right)
\end{equation}
$c=1+o_{p}\left(\tilde{n}^{-1 / 6}\right)$ is a small sample correction.
$K$ is again a positive integer usually chosen much larger than 1 and
$\bar{n}_{K}=\left(\tilde{n}- K+1\right) / K$ and $\bar{n}_{J}=
\left(\tilde{n}- J+1\right) / J$.
The author shows that if $K=\mathcal{O}\left((n^{(k)}+n^{(l)})^{2/3}\right)$
this estimator is asymptotically unbiased, consistent, asymptotically
normal distributed and converges at rate $\tilde{n}^{-1 / 6}$.
.. Note:: Use :func:`~msrc` since it has better converges rate.
References
----------
<NAME>., <NAME>. and <NAME>. (2005).
A tale of two time scales: Determining integrated
volatility with noisy high-frequency data, Journal of
the American Statistical Association 100(472): 1394–1411.
<NAME>. (2011). Estimating covariation: Epps effect,
microstructure noise, Journal of Econometrics 160.
"""
data = refresh_time(tick_series_list)
M = data.shape[0]
if K is None:
K = int(M ** (2 / 3))
sk = (data - data.shift(K)).dropna()
sk = sk.transpose().dot(sk)
sk = 1 / K * sk
sj = (data - data.shift(J)).dropna()
sj = sj.transpose().dot(sj)
sj = 1 / J * sj
nj = (M - J + 1) / J
nk = (M - K + 1) / K
return (sk - nk/nj * sj).to_numpy()
@numba.njit
def _numba_minimum(x):
"""
The weighting function of Christensen et al. (2010) used in
:func:`~mrc`.
Parameters
----------
x : numpy.ndarray
Returns
-------
out : numpy.ndarray
The output of the function applied to each element of x.
"""
return np.minimum(x, 1-x)
def mrc(tick_series_list, theta=None, g=None, bias_correction=True,
pairwise=True, k=None):
r"""
The modulated realised covariance (MRC) estimator of
Christensen et al. (2010).
Parameters
----------
tick_series_list : list of pd.Series
Each pd.Series contains tick-log-prices of one asset
with datetime index.
theta : float, optional, default=None
Theta is used to determine the preaveraging window ``k``.
If ``bias_correction`` is True (see below)
then :math:`k = \theta \sqrt{n}`,
else :math:`k = \theta n^{1/2+ 0.1}`.
Hautsch & Podolskij (2013) recommend 0.4 for liquid assets
and 0.6 for less liquid assets. If ``theta=0``, the estimator reduces
to the standard realized covariance estimator. If ``theta=None`` and
``k`` is not specified explicitly, the suggested theta of 0.4 is used.
g : function, optional, default = ``None``
A vectorized weighting function.
If ``g = None``, :math:`g=min(x, 1-x)`
bias_correction : boolean, optional
If ``True`` (default) then the estimator is optimized for convergence
rate but it might not be p.s.d. Alternatively as described in
Christensen et al. (2010) it can be ommited. Then k should be chosen
larger than otherwise optimal.
pairwise : bool, default=True
If ``True`` the estimator is applied to each pair individually. This
increases the data efficiency but may result in an estimate that is
not p.s.d.
k : int, optional, default=None
The bandwidth parameter with which to preaverage. Alternative to theta.
Useful for non-parametric eigenvalue regularization based on sample
spliting.
Returns
-------
mrc : numpy.ndarray
The mrc estimate of the integrated covariance.
Examples
--------
>>> np.random.seed(0)
>>> n = 2000
>>> returns = np.random.multivariate_normal([0, 0], [[1, 0.5],[0.5, 1]], n)
>>> returns /= n**0.5
>>> prices = 100 * np.exp(returns.cumsum(axis=0))
>>> # add Gaussian microstructure noise
>>> noise = 10 * np.random.normal(0, 1, n * 2).reshape(-1, 2)
>>> noise *= np.sqrt(1 / n ** 0.5)
>>> prices += noise
>>> # sample n/2 (non-synchronous) observations of each tick series
>>> series_a = pd.Series(prices[:, 0]).sample(int(n/2)).sort_index()
>>> series_b = pd.Series(prices[:, 1]).sample(int(n/2)).sort_index()
>>> # take logs
>>> series_a = np.log(series_a)
>>> series_b = np.log(series_b)
>>> icov_c = mrc([series_a, series_b], pairwise=False)
>>> # This is the unbiased, corrected integrated covariance matrix estimate.
>>> np.round(icov_c, 3)
array([[0.882, 0.453],
[0.453, 0.934]])
>>> # This is the unbiased, corrected realized variance estimate.
>>> ivar_c = mrc([series_a], pairwise=False)
>>> np.round(ivar_c, 3)
array([[0.894]])
>>> # Use ticks more efficiently by pairwise estimation
>>> icov_c = mrc([series_a, series_b], pairwise=True)
>>> np.round(icov_c, 3)
array([[0.894, 0.453],
[0.453, 0.916]])
Notes
-----
The MRC estimator is the equivalent to the realized integrated covariance
estimator using preaveraged returns. It is of thus of the form
.. math::
\begin{equation}
\label{eqn:mrc_raw}
\left[\mathbf{Y}\right]^{(\text{MRC})}=\frac{n}{n-K+2}
\frac{1}{\psi_{2} K} \sum_{i=K-1}^{n} \bar{\mathbf{Y}}_{i}
\bar{\mathbf{Y}}_{i}^{\prime},
\end{equation}
where :math:`\frac{n}{n-K+2}` is a finite sample correction, and
.. math::
\begin{equation}
\begin{aligned}
&\psi_{1}^{k}=k \sum_{i=1}^{k}\left(g\left(\frac{i}{k}\right)-g
\left(\frac{i-1}{k}\right)\right)^{2}\\
&\psi_{2}^{k}=\frac{1}{k}
\sum_{i=1}^{k-1} g^{2}\left(\frac{i}{k}\right).
\end{aligned}
\end{equation}
In this form, however, the estimator is biased. The bias corrected
estimator is given by
.. math::
\begin{equation}
\label{eqn:mrc}
\left[\mathbf{Y}\right]^{(\text{MRC})}=\frac{n}{n-K+2}
\frac{1}{\psi_{2} k} \sum_{i=K-1}^{n} \bar{\mathbf{Y}}_{i}
\left(\bar{\mathbf{Y}}_{i}-\frac{\psi_{1}}{\theta^{2} \psi_{2}}
\hat{\mathbf{\Psi}}\right)^{\prime},
\end{equation}
where
.. math::
\begin{equation}
\hat{\mathbf{\Psi}}=\frac{1}{2 n} \sum_{i=1}^{n} \Delta_{i}\mathbf{Y}
\left(\Delta_{i} \mathbf{Y}\right)^{\prime}.
\end{equation}
The rate of convergence of this estimator is determined by the
window-length :math:`K`. Choosing
:math:`K=\mathcal{O}(\sqrt{n})`, delivers the best rate of convergence
of :math:`n^{-1/4}`. It is thus suggested to choose
:math:`K=\theta \sqrt{n}`, where :math:`\theta` can be calibrated from the
data. Hautsch and Podolskij (2013) suggest values between 0.4 (for liquid
stocks) and 0.6 (for less liquid stocks).
.. note::
The bias correction may result in an estimate that is not positive
semi-definite.
If positive semi-definiteness is essential, the bias-correction can be
omitted. In this case, :math:`K` should be chosen larger
than otherwise optimal with respect to the convergence rate. Of course,
the convergence rate is slower then. The optimal rate of convergence
without the bias correction is :math:`n^{-1 / 5}`, which is attained
when :math:`K=\theta n^{1/2+\delta}` with :math:`\delta=0.1`.
``theta`` should be chosen between 0.3 and 0.6. It should be chosen
higher if (i) the sampling frequency declines,
(ii) the trading intensity of the underlying stock is low,
(iii) transaction time sampling (TTS) is used as opposed to calendar time
sampling (CTS). A high ``theta`` value can lead to oversmoothing when
CTS is used. Generally the higher the sampling frequency the better.
Since :func:`~mrc` and :func:`~msrc` are based on different approaches
it might make sense to ensemble them. Monte Carlo results show that the
variance estimate of the ensemble is better than each component
individually. For covariance estimation the preaveraged
:func:`~hayashi_yoshida` estimator has the advantage that even ticks that
don't contribute to the covariance (due to log-summability) are used for
smoothing. It thus uses the data more efficiently.
References
----------
<NAME>., <NAME>. and <NAME>. (2010). Pre-averaging
estimators of the ex-post covariance matrix in noisy diffusion models
with non-synchronous data, Journal of Econometrics 159(1): 116–133.
<NAME>. and <NAME>. (2013). Preaveraging-based estimation of
quadratic variation in the presence of noise and jumps: theory,
implementation, and empirical evidence,
Journal of Business & Economic Statistics 31(2): 165–183.
"""
if g is None:
g = _numba_minimum
p = len(tick_series_list)
if pairwise and p > 1:
indeces, values = _get_indeces_and_values(tick_series_list)
cov = _mrc_pairwise(indeces, values, theta, g, bias_correction, k)
else:
if p > 1:
data = refresh_time(tick_series_list).dropna()
data = np.diff(data.to_numpy(), axis=0)
else:
data = tick_series_list[0]
data = np.diff(data.to_numpy(), axis=0)[:, None]
cov = _mrc(data, theta, g, bias_correction, k)
return cov
@numba.njit(cache=False, fastmath=False, parallel=False)
def _mrc(data, theta, g, bias_correction, k):
r"""
The modulated realised covariance (MRC) estimator of
Christensen et al. (2010).
Parameters
----------
data : numpy.ndarray, shape = (n, p)
An array of univariate log_returns
or synchronized multivariate log-returns
(e.g. with :func:`~refresh_time`).
theta : float, optional, default=0.4
Theta is used to determine the preaveraging window ``k``.
If ``bias_correction`` is True (see below)
then :math:`k = \theta \sqrt{n}`,
else :math:`k = \theta n^{1/2+ 0.1}`.
Hautsch & Podolskij (2013) recommend 0.4 for liquid assets
and 0.6 for less liquid assets. If ``theta=0``, the estimator reduces
to the standard realized covariance estimator.
g : function
A vectorized weighting function.`
bias_correction : boolean
If ``True``, then the estimator is optimized for convergence
rate but it might not be p.s.d. Alternatively, as described in
Christensen et al. (2010), it can be ommited. Then k should be chosen
larger than otherwise optimal.
k : int
The bandwidth parameter with which to preaverage. Alternative to theta.
Useful for non-parametric eigenvalue regularization based on sample
spliting.
Returns
-------
mrc : numpy.ndarray
The mrc estimate of the integrated covariance.
"""
n, p = data.shape
# get the bandwidth
if k is not None and theta is not None:
raise ValueError("Either ``theta`` or ``k`` can be specified,"
" but not both! One of them must be ``None``.")
if k is None:
if theta is None:
theta = 0.4
k = _get_k(n, theta, bias_correction)
if theta is None:
if bias_correction:
theta = k / np.sqrt(n)
else:
theta = k / np.power(n, 0.6)
# If theta is greater than zero comute the preaveraging estimator,
# otherwise the estimator is just the realized covariance matrix.
if theta > 0:
psi2 = np.sum(g(np.arange(1, k)/k)**2)/k
psi1 = np.sum((g(np.arange(1, k)/k)-g((np.arange(1, k)-1)/k))**2)*k
weight = g(np.arange(1, k)/k)
data_pa = _preaverage(data, weight)
data_pa = data_pa.flatten()
data_pa = data_pa[~np.isnan(data_pa)]
data_pa = data_pa.reshape(-1, p)
# The biass correction term, bc, needs to be initialized as array to
# have a consistent type for numba.
bc = np.zeros((p, p))
if bias_correction:
bc += psi1 / (theta ** 2 * psi2) * data.T @ data / n / 2
finite_sample_correction = n / (n - k + 2)
mrc = finite_sample_correction / (psi2 * k) * data_pa.T @ data_pa - bc
else:
mrc = data.T @ data
return mrc
@numba.njit(cache=False, parallel=True, fastmath=False)
def _mrc_pairwise(indeces, values, theta, g, bias_correction, k):
r"""
Accelerated inner function of pairwise :func:`~mrc`.
Parameters
----------
indeces : numpy.ndarray, shape(p, n_max), dtype='uint64'
The length is equal to the number of assets. Each 'row' contains
the unix time of ticks of one asset.
values : numpy.ndarray, shape(p, n_max), dtype='float64'>0
Each 'row' contains the log-prices of ticks of one asset.
theta : float, optional, default=0.4
theta is used to determine the preaveraging window ``k``.
If ``bias_correction`` is True (see below)
then :math:`k = \theta \sqrt{n}`,
else :math:`k = \theta n^{1/2+ 0.1}`.
Hautsch & Podolskij (2013) recommend 0.4 for liquid assets
and 0.6 for less liquid assets. If ``theta=0``, the estimator reduces
to the standard realized covariance estimator.
g : function
A vectorized weighting function.`
bias_correction : boolean
If ``True``, then the estimator is optimized for convergence
rate but it might not be p.s.d. Alternatively as described in
Christensen et al. (2010) it can be ommited. Then k should be chosen
larger than otherwise optimal.
k : int
The bandwidth parameter with which to preaverage. Alternative to theta.
Useful for non-parametric eigenvalue regularization based on sample
spliting.
Returns
-------
cov : numpy.ndarray, 2d
The integrated covariance matrix using the pairwise synchronized data.
"""
p = indeces.shape[0]
cov = np.ones((p, p))
# don't loop over ranges but get all indeces in advance
# to improve parallelization.
idx = _upper_triangular_indeces(p)
for t in prange(len(idx)):
i, j = idx[t, :]
# get the number of no nan values for asset i and j.
# This is needed since nan is not defined
# for int64, which are in the indeces. Hence, I use the fact that
# values and indeces have the same shape and nans are only at the
# end of an array.
n_not_nans_i = values[i][~np.isnan(values[i])].shape[0]
n_not_nans_j = values[i][~np.isnan(values[j])].shape[0]
if i == j:
data = values[i, :n_not_nans_i].reshape(-1, 1)
data = data[1:, :] - data[:-1, :]
cov[i, i] = _mrc(data, theta, g, bias_correction, k)[0, 0]
else:
merged_values, _ = _refresh_time((indeces[i, :n_not_nans_i],
indeces[j, :n_not_nans_j]),
(values[i, :n_not_nans_i],
values[j, :n_not_nans_j]))
# numba doesn't support boolean indexing of 2d array
merged_values = merged_values.flatten()
merged_values = merged_values[~np.isnan(merged_values)]
data = merged_values.reshape(-1, 2)
data = data[1:, :] - data[:-1, :]
cov[i, j] = _mrc(data, theta, g, bias_correction, k)[0, 1]
cov[j, i] = cov[i, j]
return cov
@numba.njit
def _get_k(n, theta, bias_correction):
""" Get the optimal bandwidth for preaveraging depending on the sample
size and whether or not to correct for the bias.
"""
if theta > 0:
if bias_correction:
k = np.ceil(np.sqrt(n)*theta)
else:
delta = 0.1
k = np.ceil(np.power(n, 0.5+delta)*theta)
else:
k = 1
return int(k)
@numba.njit
def parzen_kernel(x):
r"""
The Parzen weighting function used in the kernel realized volatility
matrix estimator (:func:`~krvm`) of Barndorff-Nielsen et al. (2011).
Parameters
----------
x : float
Returns
-------
y : float
The weight.
References
----------
<NAME>., <NAME>., <NAME>. and <NAME>. (2011).
Multivariate realised kernels: consistent positive semi-definite estimators
of the covariation of equity prices with noise and non-synchronous trading,
Journal of Econometrics 162(2): 149– 169.
"""
if x < 0:
raise ValueError("x must be >= 0.")
elif x <= 1/2:
y = 1 - 6 * x**2 + 6 * x**3
elif x <= 1:
y = 2 * (1 - x)**3
else:
y = 0
return y
@numba.njit
def quadratic_spectral_kernel(x):
"""
The Quadratic Spectral weighting function used in the kernel realized
volatility matrix estimator (:func:`~krvm`) of Barndorff-Nielsen et.
al (2011).
Parameters
----------
x : float
Returns
-------
y : float
The weight.
References
----------
<NAME>., <NAME>., <NAME>. and <NAME>. (2011).
Multivariate realised kernels: consistent positive semi-definite estimators
of the covariation of equity prices with noise and non-synchronous trading,
Journal of Econometrics 162(2): 149– 169.
"""
if x < 0:
raise ValueError("x must be >= 0.")
elif x == 0:
y = 1
else:
y = 3 / (x**2) * (np.sin(x) / x - np.cos(x))
return y
def get_bandwidth(n, var_ret, var_noise, kernel):
"""
Compute the optimal bandwidth parameter $H$ for :func:`~krvm` according to
Barndorff-Nielsen et al. (2011).
Parameters
----------
n : int >0
The sample size.
var_ret : float > 0
The variance of the efficient return process.
var_noise :float >=0
The variance of the noise process.
Returns
-------
H : int
The bandwidth parameter.
References
----------
Barndorff-Nielsen, <NAME>., <NAME>., <NAME>. and <NAME>. (2011).
Multivariate realised kernels: consistent positive semi-definite estimators
of the covariation of equity prices with noise and non-synchronous trading,
Journal of Econometrics 162(2): 149– 169.
"""
if kernel == 'parzen':
# Parzen kernel c_star according to Table 1 of
# Barndorff-Nielsen et al. (2011).
c_star = 3.51
elif kernel == 'quadratic_spectral':
# Quadratic Spectral c_star according to Table 1 of
# Barndorff-Nielsen et al. (2011).
c_star = 0.46
else:
raise ValueError("Specified kernel not implemented.")
xi_sq = var_noise / var_ret
H = int(c_star * xi_sq**(2/5) * n**(3/5))
return H
@numba.njit
def gamma(data, h):
r"""
The h-th realized autocovariance.
Parameters
----------
data : numpy.ndarray, shape = (p, n)
An array of synchronized and demeaned log_returns.
(e.g. with :func:`~refresh_time`).
h : int
The order of the autocovariance.
Returns
-------
gamma_h : numpy.ndarray, shape = (p, p)
The h-th realized autocovariance matrix.
Notes
-----
The h-th realized autocovariance is given by
\begin{equation}
\boldsymbol{\gamma}^{(h)}\left(\mathbf{Y}\right)=
\sum_{s=h+2}^{n+1}\left(\mathbf{Y}(s)-\mathbf{Y}(s-1)\right)
\left(\mathbf{Y}(s-h)-\mathbf{Y}(s-h-1)\right)^{\prime}, \quad h \geq 0
\end{equation}
and
\begin{equation}
\boldsymbol{\gamma}^{(h)}\left(\mathbf{Y}\right)=
\boldsymbol{\gamma}^{(-h)}\left(\mathbf{Y}\right)^{\prime}, \quad h < 0,
\end{equation}
where $\mathbf{Y}$ denotes the synchronized zero-return log-price.
"""
if h == 0:
gamma_h = data @ data.T
else:
gamma_h = data[:, abs(h):] @ data[:, :-abs(h)].T
if h < 0:
gamma_h = gamma_h.T
return gamma_h
def krvm(tick_series_list, H, pairwise=True, kernel=quadratic_spectral_kernel):
r"""
The kernel realized volatility matrix estimator (KRVM) of Barndorff-Nielsen
et al. (2011).
Parameters
----------
tick_series_list : list of pd.Series
Each pd.Series contains tick-log-prices of one asset
with datetime index.
H : int, > 0
The bandwidth parameter for the Parzen kernel.
Should be on the order of $n^{3/5}$.
pairwise : bool, default=True
If ``True`` the estimator is applied to each pair individually. This
increases the data efficiency but may result in an estimate that is
not p.s.d even for the p.s.d version of thiss estimator.
kernel : function, default=quadratic_spectral_kernel
The kernel weighting function.
Returns
-------
cov : numpy.ndarray
The intgrated covariance matrix estimate.
Notes
-----
The multivariate realized kernel estimator smoothes the autocovariance
operator and thereby achieves the optimal convergence rate in the
multivariate setting with noise and asynchronous observation times.
Incidentally, this estimator is similar in form to the HAC, widely used in
the statistics and econometrics literature to deal with heteroscedastic and
autocorrelated noise. Observations are synchronized with the
:func:`refresh-time` scheme. In addition, $m$ observation are averaged at
the beginning and at the end of the trading day to estimate the efficient
price at these times. The authors call this 'jittering'. In practice the
effect of jittering is negligible but it is needed for proving consistency.
(It is ignored in this implementation.)
The, with parameter $m$, jittered log-price vectors are denoted as
$\mathbf{Y}^{(m)}(s), s=1, \ldots, n-2 m+1$.
The kernel estimator is defined by
\begin{equation}
\widehat{\mathbf{\Sigma}}^{(KRVM)}=\boldsymbol{\gamma}^{(0)}
\left(\mathbf{Y}^{(m)}\right)+\sum_{h=1}^{n-2 m} k\left(\frac{h-1}{H}
\right)\left[\boldsymbol{\gamma}^{(h)}\left(\mathbf{Y}^{(m)}\right)+
\boldsymbol{\gamma}^{(-h)}\left(\mathbf{Y}^{(m)}\right)\right],
\end{equation}
where
\begin{equation}
\boldsymbol{\gamma}^{(h)}\left(\mathbf{Y}\right)=
\sum_{s=h+2}^{n+1}\left(\mathbf{Y}(s)-\mathbf{Y}(s-1)\right)
\left(\mathbf{Y}(s-h)-\mathbf{Y}(s-h-1)\right)^{\prime}, \quad h \geq 0
\end{equation}
and
\begin{equation}
\boldsymbol{\gamma}^{(h)}\left(\mathbf{Y}\right)=
\boldsymbol{\gamma}^{(-h)}\left(\mathbf{Y}\right)^{\prime}, \quad h < 0,
\end{equation}
with $\mathbf{Y}$ denoting the synchronized zero-return log-price.
$\boldsymbol{\gamma}^{(h)}$ is the $h$th realized autocovariance (:func:`gamma`).
$k(\cdot)$ is the kernel function with its bandwidth parameter $H$. It is
assumed that
(i) $k(0)=1$ and $k^{\prime}(0)=0$,
(ii) $k(\cdot)$ is twice differentiable with continuous
derivatives, and
(iii) $\int_{0}^{\infty} k(x)^{2} d x,
\int_{0}^{\infty} k^{\prime}(x)^{2} d x$ and $\int_{0}^{\infty}
k^{\prime \prime}(x)^{2} d x$ are finite. A slightly adjusted form of this
estimator that is positive semidefinite is given by
\begin{equation}
\widehat{\mathbf{\Sigma}}^{(KRVM_{psd})}=\boldsymbol{\gamma}^{(0)}
\left(\mathbf{Y}^{(m)}\right)+\sum_{h=1}^{n-2 m} k\left(\frac{h}{H}\right)
\left[\boldsymbol{\gamma}^{(h)}\left(\mathbf{Y}^{(m)}\right)+
\boldsymbol{\gamma}^{(-h)}\left(\mathbf{Y}^{(m)}\right)\right].
\end{equation}
This form requires the additional assumption $\int_{-\infty}^{\infty}
k(x) \exp (i x \lambda) d x \geq 0$ for all $\lambda \in \mathbb{R}$.
Choosing the right kernel function is important. The authors show, for
example, that the estimator based on the Bartlett weight function is
inconsistent. Instead, the Parzen kernel (:func:`parzen_kernel`) is
suggested as a weight function that yields a consistent estimator and can
be efficiently implemented. The bandwidth $H$ must be on the order of
$n^{3 / 5}$. The authors choose the scalar $H$ as the average of optimal
individual $H^{(j)}$:
$$\bar{H}=p^{-1} \sum_{j=1}^{p} H^{(j)},$$
where
\begin{equation}
H^{(j)}=c^{*} \xi_{j}^{4 / 5} n^{3 / 5},
\end{equation}
with
\begin{equation}
c^{*}=\left\{k^{\prime \prime}(0)^{2} / k_{\bullet}^{0,0}\right\}^{1 / 5},
\end{equation}
and
\begin{equation}
\xi_{j}^{2}={\Sigma}_{\epsilon, j j} / {\Sigma}_{j j}.
\end{equation}
$\mathbf{\Sigma}_{\epsilon}$ and $\mathbf{\Sigma}$ denote, as previously
defined, the integrated covariance matrix of the noise and the efficient
return process, respectively. Here these quantities are understood over the
interval under consideration. Hence, $\xi_{j}^{2}$ can be interpreted as
the ratio of the noise variance and the return variance.
For the Parzen kernel $c^{*} = 3.51$, as tabulated by the authors. It is a
measure of the relative asymptotic efficiency of the kernel.
${\Sigma}_{j j}$ may be estimated via a low frequency estimator and
${\Sigma}_{\epsilon,j j}$ via a high frequency estimator.
References
----------
<NAME>., <NAME>., <NAME>. and <NAME>. (2011).
Multivariate realised kernels: consistent positive semi-definite estimators
of the covariation of equity prices with noise and non-synchronous trading,
Journal of Econometrics 162(2): 149– 169."""
p = len(tick_series_list)
if pairwise and p > 1:
indeces, values = _get_indeces_and_values(tick_series_list)
cov = _krvm_pairwise(indeces, values, H, kernel)
else:
if p > 1:
data = refresh_time(tick_series_list).dropna()
data = np.diff(data.to_numpy(), axis=0)
else:
data = tick_series_list[0]
data = np.diff(data.to_numpy(), axis=0)[:, None]
cov = _krvm(data.T, H, kernel)
return cov
@numba.njit(cache=False, parallel=True, fastmath=False)
def _krvm_pairwise(indeces, values, H, kernel):
r"""
Accelerated inner function of pairwise :func:`~krvm`.
Parameters
----------
indeces : numpy.ndarray, shape(p, n_max), dtype='uint64'
The length is equal to the number of assets. Each 'row' contains
the unix time of ticks of one asset.
values : numpy.ndarray, shape(p, n_max), dtype='float64'>0
Each 'row' contains the log-prices of ticks of one asset.
H : int, > 0
The bandwidth parameter for the Parzen kernel.
Should be on the order of $n^{3/5}$.
kernel : function
The kernel weighting function.
Returns
-------
cov : numpy.ndarray, 2d
The integrated covariance matrix using the pairwise synchronized data.
"""
p = indeces.shape[0]
cov = np.ones((p, p))
# don't loop over ranges but get all indeces in advance
# to improve parallelization.
idx = _upper_triangular_indeces(p)
for t in prange(len(idx)):
i, j = idx[t, :]
# get the number of no nan values for asset i and j.
# This is needed since nan is not defined
# for int64, which are in the indeces. Hence, I use the fact that
# values and indeces have the same shape and nans are only at the
# end of an array.
n_not_nans_i = values[i][~np.isnan(values[i])].shape[0]
n_not_nans_j = values[i][~np.isnan(values[j])].shape[0]
if i == j:
data = values[i, :n_not_nans_i].reshape(-1, 1)
data = data[1:, :] - data[:-1, :]
cov[i, i] = _krvm(data.T, H, kernel)[0, 0]
else:
merged_values, _ = _refresh_time((indeces[i, :n_not_nans_i],
indeces[j, :n_not_nans_j]),
(values[i, :n_not_nans_i],
values[j, :n_not_nans_j]))
# numba doesn't support boolean indexing of 2d array
merged_values = merged_values.flatten()
merged_values = merged_values[~np.isnan(merged_values)]
data = merged_values.reshape(-1, 2)
data = data[1:, :] - data[:-1, :]
cov[i, j] = _krvm(data.T, H, kernel)[0, 1]
cov[j, i] = cov[i, j]
return cov
@numba.njit(cache=False, parallel=False, fastmath=False)
def _krvm(data, H, kernel):
"""
Parameters
----------
data : numpy.ndarray, shape = (p, n)
An array of (jittered), synchronized and log_returns.
(e.g. with :func:`~refresh_time`).
H : int, > 0
The bandwidth parameter for the Parzen kernel.
Should be on the order of $n^{3/5}$.
kernel : function
The kernel weighting function.
Returns
-------
cov : numpy.ndarray, 2d
The integrated covariance matrix estimate.
References
----------
<NAME>., <NAME>., <NAME>. and <NAME>. (2011).
Multivariate realised kernels: consistent positive semi-definite estimators
of the covariation of equity prices with noise and non-synchronous trading,
Journal of Econometrics 162(2): 149– 169."""
p, n = data.shape
# if p.s.d estimator: c=0, else: c=1, since pairwise estimation and
# subsequent shrinkage is advocated anyway, hard-code to 1.
c = 1
cov = gamma(data, 0)
for h in range(1, n+1):
weight = kernel((h-c) / H)
# The Parzen kernel, for example, needs to compute only
# H gammas after that the weight stays 0, hence early stop.
if weight == 0:
return cov
g = gamma(data, h)
cov += weight * (g + g.T)
return cov
def hayashi_yoshida(tick_series_list, theta=None, k=None):
r"""
The (pairwise) Hayashi-Yoshida estimator of Hayashi and Yoshida (2005).
This estimtor sums up all products of time-overlapping returns
between two assets. This makes it possible to compute unbiased
estimates of the integrated covariance between two assets that
are sampled non-synchronously. The standard realized covariance
estimator is biased toward zero in this case. This is known as
the Epps effect. The function is accelerated via JIT compilation with
Numba.
The preaveraged version handles microstructure noise as shown in
Christensen et al. (2010).
Parameters
----------
tick_series_list : list of pd.Series
Each pd.Series contains tick-log-prices of one asset
with datetime index.
theta : float, theta>=0, default=None
If ``theta=None`` and ``k`` is not specified explicitly,
theta will be set to 0.
If theta>0, the log-returns are preaveraged with theta and
:math:`g(x) = min(x, 1-x)`. Hautsch and Podolskij (2013) suggest
values between 0.4 (for liquid stocks) and 0.6 (for less
liquid stocks).
If ``theta=0``, this is the standard HY estimator.
k : int, >=1, default=None
The bandwidth parameter with which to preaverage. Alternative to
``theta``. Useful for non-parametric eigenvalue regularization based
on sample splitting. When ``k=None`` and ``theta=None``, ``k`` will
be set to 1. If ``k=1``, this is the standard HY estimator.
Returns
-------
cov : numpy.ndarray
The pairwise HY estimate of the integrated covariance matrix.
Notes
-----
The estimator is defined as
.. math::
\begin{equation}
\left\langle X^{(k)}, X^{(l)}\right\rangle_{H Y}=
\sum_{i=1}^{n^{(k)}}\sum_{i'=1}^{n^{(l)}}
\Delta X_{t^{(k)}_i}^{(k)}
\Delta X_{t^{(l)}_{i^{\prime}}}^{(l)}
\mathbf{1}_{\left\{\left(t_{i-1}^{(k)},
t_{i}^{(k)}\right] \cap\left(t_{i^{\prime}-1}^{(l)},
t_{i^{\prime}}^{(l)}\right]\neq \emptyset \right\}},
\end{equation}
where
.. math::
\Delta X_{t^{(j)}_i}^{(j)} :=X_{t^{(j)}_i}^{(j)} - X_{t^{(j)}_{i-1}}^{(j)}
denotes the jth asset tick-to-tick log-return over the interval spanned from
.. math::
{t^{(j)}_{i-1}} \text{ to } {t^{(j)}_i}, i = 1, \cdots, n^{(j)}.
and :math:`n^{(j)} = |t^{(j)}| -1` denotes the number of tick-to-tick
returns. The following diagram visualizes the products of returns that are
part of the sum by the dashed lines.
.. tikz::
\draw (0,1.75) -- (11,1.75)
(0,-0.75) -- (11,-0.75)
(0,1.5) -- (0,2)
(1.9,1.5) -- (1.9,2)
(4,1.5) -- (4,2)
(5,1.5) -- (5,2)
(7.3,1.5) -- (7.3,2)
(10.8,1.5) -- (10.8,2)
(0,-0.5) -- (0,-1)
(1.9,-0.5) -- (1.9,-1)
(5.7,-0.5) -- (5.7,-1)
(8,-0.5) -- (8,-1)
(10.3,-0.5) -- (10.3,-1);
\draw[dashed,gray]
(1.1,1.75) -- (1.1,-0.75)
(3,1.75) -- (3.8,-0.75)
(4.5,1.75) -- (3.8,-0.75)
(6.15,1.75) -- (3.8,-0.75)
(6.15,1.75) -- (6.8,-0.75) ;
\draw[dashed] (11,1.75) -- (12,1.75)
(11,-0.75) -- (12,-0.75);
\draw[very thick] (9.5,-1.4) -- (9.5,0.25)
(9.5,0.8) -- (9.5,2.4);
\draw (0,0.5) node{$t_{0}^{(k)}=t_{0}^{(l)}=0$}
(1.9,1) node{$t_{1}^{(k)}$}
(4,1) node{$t_{2}^{(k)}$}
(5,1) node{$t_{3}^{(k)}$}
(7.3,1) node{$t_{4}^{(k)}$}
(11,1) node{$t_{5}^{(k)}$}
(9.5,0.5) node{\textbf{$T$}}
(1.9,0) node{$t_{1}^{(l)}$}
(5.7,0) node{$t_{2}^{(l)}$}
(8,0) node{$t_{3}^{(l)}$}
(10.3,0) node{$t_{4}^{(l)}$};
\draw (0,1.75) node[left,xshift=-0pt]{$X^{(k)}$}
(0,-0.75) node[left,xshift=-0pt]{$X^{(l)}$};
\draw[decorate,decoration={brace,amplitude=12pt}]
(0,2)--(1.9,2) node[midway, above,yshift=10pt,]
{$ \Delta X_{t^{(k)}_1}^{(k)}$};
\draw[decorate,decoration={brace,amplitude=12pt}]
(1.9,2)--(4,2) node[midway, above,yshift=10pt,]
{$ \Delta X_{t^{(k)}_2}^{(k)}$};
\draw[decorate,decoration={brace,amplitude=12pt}]
(4,2)--(5,2) node[midway, above,yshift=10pt,]
{$ \Delta X_{t^{(k)}_3}^{(k)}$};
\draw[decorate,decoration={brace,amplitude=12pt}]
(5,2)--(7.3,2) node[midway, above,yshift=10pt,]
{$ \Delta X_{t^{(k)}_4}^{(k)}$};
\draw[decorate,decoration={brace,amplitude=12pt}]
(8,-1)--(5.7,-1) node[midway, below,yshift=-10pt,]
{$ \Delta X_{t^{(l)}_3}^{(l)}$};
\draw[decorate,decoration={brace,amplitude=12pt}]
(5.7,-1)--(1.9,-1) node[midway, below,yshift=-10pt,]
{$ \Delta X_{t^{(l)}_2}^{(l)}$};
\draw[decorate,decoration={brace,amplitude=12pt}]
(1.9,-1)--(0,-1) node[midway, below,yshift=-10pt,]
{$ \Delta X_{t^{(l)}_1}^{(l)}$};
When returns are preaveraged with :func:`~preaverage`, the HY
estimator of can be made robust to microstructure noise as well.
It is then of the slightly adjusted form
.. math::
\begin{equation}
\left\langle X^{(k)}, X^{(l)}\right
\rangle_{H Y}^{\theta}=\frac{1}{
\left(\psi_{H Y} K \right)^{2}}
\sum_{i=K}^{n^{(k)}}
\sum_{i'=K}^{n^{(l)}}
\bar{Y}_{t^{(k)}_i}^{(k)}\bar{Y}_{t^{(l)}_{i'}}^{(l)}
\mathbf{1}_{\left\{\left(t_{i-K}^{(k)},
t_{i}^{(k)}\right] \cap\left(t_{i'-K}^{(l)},
t_{i'}^{(l)}\right] \neq \emptyset\right)}
\end{equation}
where
:math:`\psi_{HY}=\frac{1}{K} \sum_{i=1}^{K-1} g\left(\frac{i}{K}\right)`
The preaveraged HY estimator has optimal convergence rate
:math:`n^{-1/4}`, where :math:`n=\sum_{j=1}^{p} n^{(j)}`.
Christensen et al. (2013) subsequently proof a central limit theorem for
this estimator and show that it is robust to some dependence structure of
the noise process. Since preaveraging is performed before synchronization,
the estimator utilizes more data than other methods that cancel noise after
synchronization. In particular, the preaveraged HY estimator even uses the
observation :math:`t^{(j)}_2` in the figure, which does not contribute
the the covariance due to the log-summability.
References
----------
<NAME>. and <NAME>. (2005).
On covariance estimation of
non-synchronously observed diffusion processes,
Bernoulli 11(2): 359–379.
<NAME>., <NAME>. and <NAME>. (2010).
Pre-averaging
estimators of the ex-post covariance matrix in noisy diffusion models
with non-synchronous data, Journal of Econometrics 159(1): 116–133.
<NAME>. and <NAME>. (2013).
Preaveraging-based estimation of
quadratic variation in the presence of noise and jumps: theory,
implementation, and empirical evidence,
Journal of Business & Economic Statistics 31(2): 165–183.
<NAME>., <NAME>. and <NAME>. (2013).
On covariation estimation for multivariate continuous itˆo
semimartingales with noise in non-synchronous observation schemes,
Journal of Multivariate Analysis 120: 59–84.
Examples
--------
>>> np.random.seed(0)
>>> n = 10000
>>> returns = np.random.multivariate_normal([0, 0], [[1,0.5],[0.5,1]], n)/n**0.5
>>> prices = np.exp(returns.cumsum(axis=0))
>>> # sample n/2 (non-synchronous) observations of each tick series
>>> series_a = pd.Series(prices[:, 0]).sample(int(n/2)).sort_index()
>>> series_b = pd.Series(prices[:, 1]).sample(int(n/2)).sort_index()
>>> # take logs
>>> series_a = np.log(series_a)
>>> series_b = np.log(series_b)
>>> icov = hayashi_yoshida([series_a, series_b])
>>> np.round(icov, 3)
array([[0.983, 0.512],
[0.512, 0.99 ]])
"""
indeces, values = _get_indeces_and_values(tick_series_list)
p = indeces.shape[0]
# get log-returns
values = np.diff(values, axis=1)
# do not drop first nan which results from diff since its index
# is used to determine first interval. Instead put column of zeros.
values = np.column_stack((np.zeros(p), values))
cov = _hayashi_yoshida_pairwise(indeces, values, theta, k)
return cov
@numba.njit(cache=False, parallel=True, fastmath=False)
def _hayashi_yoshida_pairwise(indeces, values, theta, k):
r"""
The pairwise computation of the integrated covariance matrix
in :func:`~hayashi_yoshida` using :func:`~_hayashi_yoshida` is
accelerated and parallelized.
Parameters
----------
indeces : numpy.ndarray, shape(p, n_max), dtype='uint64'
The length is equal to the number of assets. Each 'row' contains
the unix time of ticks of one asset.
values : numpy.ndarray, shape(p, n_max), dtype='float64'>0
Each 'row' contains the log-tick-returns of one asset.
theta : float, theta>=0
If ``theta=None`` and ``k`` is not specified explicitly,
theta will be set to 0.
If theta>0, the log-returns are preaveraged with theta and
:math:`g(x) = min(x, 1-x)`. Hautsch and Podolskij (2013) suggest
values between 0.4 (for liquid stocks) and 0.6 (for less
liquid stocks).
If ``theta=0``, this is the standard HY estimator.
k : int, >=1
The bandwidth parameter with which to preaverage. Alternative to
``theta``. Useful for non-parametric eigenvalue regularization based
on sample splitting.
Returns
-------
cov : numpy.ndarray
The pairwise HY estimate of the integrated covariance matrix.
"""
p = indeces.shape[0]
cov = np.zeros((p, p))
# don't loop over ranges but get all indeces in advance
# to improve parallelization.
idx = _upper_triangular_indeces(p)
for t in prange(len(idx)):
i, j = idx[t, :]
# get the number of no nan values for asset i and j.
# This is needed since nan is not defined
# for int64, which are in the indeces. Hence, I use the fact that
# values and indeces have the same shape and nans are only at the
# end of an array.
n_not_nans_i = values[i][~np.isnan(values[i])].shape[0]
n_not_nans_j = values[i][~np.isnan(values[j])].shape[0]
# for efficiency set slower trading asset to ``a``.
if n_not_nans_i <= n_not_nans_j:
a_values = values[i, :n_not_nans_i]
a_index = indeces[i, :n_not_nans_i]
b_values = values[j, :n_not_nans_j]
b_index = indeces[j, :n_not_nans_j]
else:
b_values = values[i, :n_not_nans_i]
b_index = indeces[i, :n_not_nans_i]
a_values = values[j, :n_not_nans_j]
a_index = indeces[j, :n_not_nans_j]
hy = _hayashi_yoshida(a_index, b_index,
a_values, b_values,
k, theta)
cov[i, j] = hy
cov[j, i] = hy
return cov
@numba.njit(cache=False, parallel=False, fastmath=False)
def _hayashi_yoshida(a_index, b_index, a_values, b_values, k=None, theta=None):
"""
The inner function of :func:`~hayashi_yoshida` is accelerated
via JIT compilation with Numba.
Parameters
----------
a_index : numpy.ndarray, 1d, dtype='uint64'
A numpy.ndarray containing indeces of trade times. Must be
uint64 since Numba cannot check nan otherwise. Preferably
a should be the slower trading asset.
b_index : numpy.ndarray, 1d, dtype='uint64'
A numpy.ndarray containing indeces of trade times. Must be
uint64 since Numba cannot check nan otherwise.
a_values : numpy.ndarray, 1d
A numpy.ndarray containing log-returns at times given by `a_index`.
The index is determined by the last price, i.e.,
r_t = log(p_t) - log(p_{t-1})
b_values : numpy.ndarray, 1d
A numpy.ndarray containing log-returns. Similar to a_values.
k : int, default=None
k is 1 for the standard HY estimator. When preaveraging
is used to cancel microstructure noise, the step size has to be
adjusted according to Eqn (27) of Christensen et al. (2010).
Returns
-------
hy : float
The HY estimate of the covariance of returns of asset a and asset b.
"""
assert len(a_index) == len(a_values) and len(b_index) == len(b_values), \
'indeces and values must have same length.'
if k is not None and theta is not None:
raise ValueError("Either ``theta`` or ``k`` can be specified,"
" but not both! One of them must be ``None``.")
if theta is None:
if k is None:
# if no preaveraging
k = 1
else:
# If ``theta`` is specified set k as recommended in
# Christensen et al. (2010)
k = _get_k((a_values.shape[0] + b_values.shape[0])/2, theta, True)
if k > 1:
# Preaverage
weight = _numba_minimum(np.arange(1, k)/k)
a_values = _preaverage(a_values.reshape(-1, 1), weight).flatten()
# and adjust acc. to Eqn (27) of Christensen et al. (2010).
# psi_HY = np.sum(g(np.arange(1, k)/k))/k = 1/4 for weight
# function chosen as def g(x): return np.minimum(x, 1-x)
a_values = a_values[k-1:] / (k / 4)
b_values = _preaverage(b_values.reshape(-1, 1), weight).flatten()
b_values = b_values[k-1:] / (k / 4)
a_index = a_index[k-1:]
b_index = b_index[k-1:]
temp = np.zeros(a_index.shape[0], dtype=np.float64)
for i in prange(k, a_index.shape[0]):
start = a_index[i-k]
end = a_index[i]
start_b = np.searchsorted(b_index, start, 'right')
# TODO limit search space e.g. end only after start. Currently
# insignificant speedup. E.g.:
# end_b = np.searchsorted(b_index[start_b:], end, 'left') + start_b
end_b = np.searchsorted(b_index, end, 'left')
# Don't do:
# hy += np.sum(a_values[i] * b_values[start_b: end_b+k])
# since there are problems in parallelization.
temp[i] = np.sum(a_values[i] * b_values[start_b: end_b+k])
hy = np.sum(temp)
return hy
def ensemble(estimates, var_weights, cov_weights):
"""
Ensemble multiple covariance matrix estimates with weights given
by ``var_weights`` and ``cov_weights`` for the diagonal and
off-diagonal elements, respectively. This function is used in the
ensembled pairwise integrated covariance (EPIC) estimator of Woeltjen
(2020). The :func:`msrc` estimator , the :func:`mrc` estimator, the
:func:`krvm` estimator and the preaveraged :func:`hayashi_yoshida`
estimator are ensembled to compute an improved finite sample estimate
of the pairwise integrated covariance matrix. The EPIC estimator uses every
available tick, and compares favorable in finite samples to its
constituents on their own. The preaveraged HY estimates of the off-diagonals
have better finite sample properties than the other estimators so it might
be preferable to overweight them by setting the corresponding
``cov_weights`` element to a number >1/4.
Parameters
----------
estimates : list of numpy.ndarrays with shape = (p, p)
The covariance matrix estimates.
var_weights : numpy.ndarray
The weights with which the diagonal elements of the MSRC, MRC, and
the preaveraged HY covariance estimates are weighted, respectively.
The weights must sum to one.
cov_weights : numpy.ndarray
The weights with which the off-diagonal elements of the MSRC, MRC, and
the preaveraged HY covariance estimates are weighted, respectively. The
HY estimator uses the data more efficiently and thus may deserve a
higher weight. The weights must sum to one.
Returns
-------
cov : numpy.ndarray
The ensemble estimate of the integrated covariance matrix.
"""
p, p_prime = estimates[0].shape
assert p == p_prime, "The covariance matrices must be square."
cov = np.zeros((p, p))
V = np.eye(p)
C = np.ones((p, p)) - V
for i, estimate in enumerate(estimates):
assert estimate.shape == (p, p), \
"All estimates must have same dimension."
cov += (var_weights[i] * V + cov_weights[i] * C) * estimate
return cov
```
#### File: hfhd/hfhd/loss.py
```python
import numpy as np
from hfhd import hd
def prial(S_list, sigma_hat_list, sigma, loss_func=None):
r"""
The percentage relative improvement in average loss (PRIAL)
over the sample covariance matrix.
Parameters
----------
S_list : list of numpy.ndarray
The sample covariance matrix.
sigma_hat_list : list of numpy.ndarray
The covariance matrix estimate using the estimator of interest.
sigma : numpy.ndarray
The (true) population covariance matrix.
loss_func : function, defualt = None
The loss function. If ``None`` the minimum variance loss function is
used.
Returns
-------
prial : float
The PRIAL.
Notes
-----
The percentage relative improvement in average loss (PRIL)
over the sample covariance matrix is given by:
.. math::
\mathrm{PRIAL}_{n}\left(\widehat{\Sigma}_{n}\right):=
\frac{\mathbb{E}\left[\mathcal{L}_{n}\left(S_{n},
\Sigma_{n}\right)\right]-\mathbb{E}\left[\mathcal{L}_{n}
\left(\widehat{\Sigma}_{n}, \Sigma_{n}\right)\right]}
{\mathbb{E}\left[\mathcal{L}_{n}\left(S_{n},
\Sigma_{n}\right)\right]-\mathbb{E}\left[\mathcal{L}_{n}
\left(S_{n}^{*}, \Sigma_{n}\right)\right]} \times 100 \%
"""
if loss_func is None:
loss_func = loss_mv
mean_loss_S = np.mean([loss_func(S, sigma) for S in S_list], axis=0)
mean_loss_sigma_hat = np.mean([loss_func(sigma_hat, sigma)
for sigma_hat in sigma_hat_list], axis=0)
mean_loss_fsopt = np.mean([loss_func(hd.fsopt(S, sigma), sigma)
for S in S_list], axis=0)
denom = mean_loss_S - mean_loss_fsopt
num = mean_loss_S - mean_loss_sigma_hat
if denom != 0:
prial = num / denom * 100
else:
raise ValueError("""PRIAL not defined: The sample covariance attained
the smallest possible loss.""")
return prial
def loss_mv(sigma_hat, sigma):
r"""
The minimum variance loss function of Ledoit and Wolf (2018).
Parameters
----------
sigma_hat : numpy.ndarray
The covariance matrix estimate using the estimator of interest.
sigma : numpy.ndarray
The (true) population covariance matrix.
Returns
-------
out : float
The minimum variance loss.
Notes
-----
The minimum variance (MV)-loss function is proposed by
<NAME> al. (2019) as a loss function that is appropriate for covariance
matrix estimator evaluation for the problem of minimum variance portfolio
allocations under a linear constraint and large-dimensional asymptotic
theory.
The loss function is given by:
.. math::
\mathcal{L}_{n}^{\mathrm{MV}}\left(\widehat{\Sigma}_{n},
\Sigma_{n}\right):=\frac{\operatorname{Tr}\left(\widehat{\Sigma}_{n}^{-1}
\Sigma_{n} \widehat{\Sigma}_{n}^{-1}\right) / p}
{\left[\operatorname{Tr}\left(\widehat{\Sigma}_{n}^{-1}\right)
/p\right]^{2}}-\frac{1}{\operatorname{Tr}\left(\Sigma_{n}^{-1}\right)/p}.
It can be interpreted as the true variance of the minimum variance
portfolio constructed from the estimated covariance matrix.
"""
p = sigma.shape[0]
sigma_hat_inv = np.linalg.inv(sigma_hat)
sigma_inv = np.linalg.inv(sigma)
num = np.trace(sigma_hat_inv @ sigma @ sigma_hat_inv) / p
denom = (np.trace(sigma_hat_inv) / p) ** 2
return num / denom - (np.trace(sigma_inv) / p)
def loss_fr(sigma_hat, sigma):
r"""Squared Frobenius norm scaled by 1/p.
Same as ``np.linalg.norm(sigma_hat - sigma, 'fro')**2 *1/p``.
Parameters
----------
sigma_hat : numpy.ndarray
The covariance matrix estimate using the estimator of interest.
sigma : numpy.ndarray
The (true) population covariance matrix.
Returns
-------
out : float
The minimum variance loss.
Notes
-----
The loss function is given by:
.. math::
\mathcal{L}_{n}^{\mathrm{FR}}\left(\widehat{\Sigma}_{n},
\Sigma_{n}\right):=\frac{1}{p}
\operatorname{Tr}\left[\left(\widehat{\Sigma}_{n}
-\Sigma_{n}\right)^{2}\right]
"""
p = sigma.shape[0]
delta = sigma_hat - sigma
return np.trace(delta @ delta) / p
def marchenko_pastur(x, c, sigma_sq):
r"""
The Marchenko-Pastur distribution. This is the pdf
of eigenvalues of a sample covariance matrix estimate of
the true covariance matrix, which is a``sigma_sq`` scaled identity matrix.
It depends on the concentration ratio ``c``, which is the ratio of
the dimension divided by the number of observations.
Parameters
----------
x : float
The value of the sample eigenvalue.
c : float
The concentration ratio. $c=p/n$.
sigma_sq : float
The value of population eigenvalues.
Returns
-------
p : float
The value of the Marchenko-Pastur distribution at the sample
eigenvalue ``x``.
Notes
-----
The Marchenko-Pastur law states that the limiting spectrum of the sample
covariance matrix $S = {X 'X}/n$ of independent and identically
distributed $p$-dimensional random vectors
$\mathbf{X}=\left(x_{1}, \ldots, x_{n}\right)$
with mean $\mathbf{0}$ and covariance matrix
$\mathbf{\Sigma}=\sigma^{2} \mathbf{I}_{p}$, has density
\begin{equation}
f_{c}(x)=\left\{\begin{array}{ll}
\frac{1}{2 \pi x c \sigma^{2}} \sqrt{(b-x)(x-a)}, & a \leq x \leq b \\
0, & \text { otherwise, }
\end{array}\right.
\end{equation}
where the smallest and the largest eigenvalues are given by
$a=\sigma^{2}(1-\sqrt{c})^{2}$ and $b=\sigma^{2}(1+\sqrt{c})^{2}$,
respectively, as $p, n \rightarrow \infty$ with $p / n \rightarrow c>0$.
References
----------
<NAME>. and <NAME>. (1967).
Distribution of eigenvalues for some sets of random matrices,
Matematicheskii Sbornik 114(4): 507–536.
"""
a = sigma_sq*(1-np.sqrt(c))**2
b = sigma_sq*(1+np.sqrt(c))**2
if a <= x <= b:
p = 1/(2*np.pi*x*c*sigma_sq)*np.sqrt((b-x)*(x-a))
else:
p = 0
return p
``` |
{
"source": "jpwoodbu/ari-backup",
"score": 2
} |
#### File: ari-backup/ari_backup/lvm.py
```python
import copy
import os
import gflags
import rdiff_backup_wrapper
FLAGS = gflags.FLAGS
gflags.DEFINE_string(
'snapshot_mount_root', '/tmp',
'root path for creating temporary directories for mounting LVM snapshots')
gflags.DEFINE_string('snapshot_suffix', '-ari_backup',
'suffix for LVM snapshots')
class LVMSourceMixIn(object):
"""MixIn class to work with LVM based backup sources.
This class registers pre-job and post-job hooks to create and mount LVM
snapshots before and after a backup job.
This class depends on the source_hostname instance variable which should be
defined by any subclass of workflow.BaseWorkFlow that also uses this mixin.
"""
def __init__(self, *args, **kwargs):
super(LVMSourceMixIn, self).__init__(*args, **kwargs)
# Assign flags to instance vars so they might be easily overridden in
# workflow configs.
self.snapshot_mount_root = FLAGS.snapshot_mount_root
self.snapshot_suffix = FLAGS.snapshot_suffix
# This is a list of 3-tuples, where each inner 3-tuple expresses the LV
# to back up, the mount point for that LV, and any mount options
# necessary. For example: [('hostname/root, '/', 'noatime'),]
# TODO(jpwoodbu) I wonder if noatime being used all the time makes
# sense to improve read performance and reduce writes to the snapshots.
self._logical_volumes = list()
# A list of dicts with the snapshot paths and where they should be
# mounted.
self._lv_snapshots = list()
# Mount the snapshots in a directory named for this job's label.
self._snapshot_mount_point_base_path = os.path.join(
self.snapshot_mount_root, self.label)
# Set up pre and post job hooks to manage snapshot workflow.
self.add_pre_hook(self._create_snapshots)
self.add_pre_hook(self._mount_snapshots)
self.add_post_hook(self._umount_snapshots)
self.add_post_hook(self._delete_snapshots)
# Maintain backward compatibility with old hooks interface.
@property
def lv_list(self):
self.logger.warning(
'lv_list is deprecated. Please use add_volume() instead.')
return self._logical_volumes
@lv_list.setter
def lv_list(self, value):
self.logger.warning(
'lv_list is deprecated. Please use add_volume() instead.')
self._logical_volumes = value
def add_volume(self, name, mount_point, mount_options=None):
"""Adds logical volume to list of volumes to be backed up.
Args:
name: str, full logical volume path (with volume group) in
group/volume_name format.
mount_point: str, path where the volume should be mounted during
the backup. This is normally the same path where the volume is
normally mounted. For example, if the volume is normally
mounted at /var/www, the value passed here should be /var/www
if you want this data to be in the /var/www directory in the
backup.
mount_options: str or None, mount options to be applied when
mounting the snapshot. For example, "noatime,ro". Defaults to
None which applies no mount options.
"""
volume = (name, mount_point, mount_options)
self._logical_volumes.append(volume)
def _create_snapshots(self):
"""Creates snapshots of all the volumns added with add_volume()."""
self.logger.info('Creating LVM snapshots...')
for volume in self._logical_volumes:
# TODO(jpwoodbu) This try/except won't ne necessary when the
# deprecated interface to the self.lv_list is removed.
try:
lv_path, src_mount_path, mount_options = volume
except ValueError:
lv_path, src_mount_path = volume
mount_options = None
vg_name, lv_name = lv_path.split('/')
new_lv_name = lv_name + self.snapshot_suffix
mount_path = (
'{snapshot_mp_bp}{src_mount_path}'.format(
snapshot_mp_bp=self._snapshot_mount_point_base_path,
src_mount_path=src_mount_path))
# TODO(jpwoodbu) Is it really OK to always make a 1GB exception
# table?
command = ['lvcreate', '-s', '-L', '1G', lv_path, '-n',
new_lv_name]
self.run_command(command, self.source_hostname)
self._lv_snapshots.append({
'lv_path': vg_name + '/' + new_lv_name,
'mount_path': mount_path,
'mount_options': mount_options,
'created': True,
'mount_point_created': False,
'mounted': False,
})
def _delete_snapshots(self, error_case=None):
"""Deletes tracked snapshots.
Args:
error_case: bool or None, whether an error has occurred during the
backup. Default is None. This method does not use this arg but
must accept it as part of the post hook API.
"""
self.logger.info('Deleting LVM snapshots...')
for snapshot in self._lv_snapshots:
if snapshot['created']:
lv_path = snapshot['lv_path']
# -f makes lvremove not interactive
command = ['lvremove', '-f', lv_path]
self.run_command_with_retries(command, self.source_hostname)
snapshot['created'] = False
def _mount_snapshots(self):
"""Creates mountpoints as well as mounts the snapshots.
If the mountpoint directory already has a file system mounted then we
raise Exception. Metadata is updated whenever a snapshot is
successfully mounted so that _umount_snapshots() knows which
snapshots to try to umount.
TODO(jpwoodbu) Add mount_options to documentation for backup config
files.
"""
self.logger.info('Mounting LVM snapshots...')
for snapshot in self._lv_snapshots:
lv_path = snapshot['lv_path']
device_path = '/dev/' + lv_path
mount_path = snapshot['mount_path']
mount_options = snapshot['mount_options']
# mkdir the mount point
command = ['mkdir', '-p', mount_path]
self.run_command(command, self.source_hostname)
snapshot['mount_point_created'] = True
# If where we want to mount our LV is already a mount point then
# let's back out.
if os.path.ismount(mount_path):
raise Exception(
'{mount_path} is already a mount point.'.format(
mount_path=mount_path))
# mount the LV, possibly with mount options
if mount_options:
command = ['mount', '-o', mount_options, device_path,
mount_path]
else:
command = ['mount', device_path, mount_path]
self.run_command(command, self.source_hostname)
snapshot['mounted'] = True
def _umount_snapshots(self, error_case=None):
"""Umounts mounted snapshots in self._lv_snapshots.
Args:
error_case: bool or None, whether an error has occurred during the
backup. Default is None. This method does not use this arg but
must accept it as part of the post hook API.
"""
# TODO(jpwoodbu) If the user doesn't put '/' in their _includes, then
# we'll end up with directories around where the snapshots are mounted
# that will not get cleaned up. We should probably add functionality to
# make sure the "label" directory is recursively removed. Check out
# shutil.rmtree() to help resolve this issue.
self.logger.info('Umounting LVM snapshots...')
# We need a local copy of the _lv_snapshots list to muck with in this
# method.
local_lv_snapshots = copy.copy(self._lv_snapshots)
# We want to umount these logical volumes in reverse order as this
# should ensure that we umount the deepest paths first.
local_lv_snapshots.reverse()
for snapshot in local_lv_snapshots:
mount_path = snapshot['mount_path']
if snapshot['mounted']:
command = ['umount', mount_path]
self.run_command_with_retries(command, self.source_hostname)
snapshot['mounted'] = False
if snapshot['mount_point_created']:
command = ['rmdir', mount_path]
self.run_command_with_retries(command, self.source_hostname)
snapshot['mount_point_created'] = False
class RdiffLVMBackup(LVMSourceMixIn, rdiff_backup_wrapper.RdiffBackup):
"""Subclass to add LVM snapshot management to RdiffBackup."""
def __init__(self, *args, **kwargs):
super(RdiffLVMBackup, self).__init__(*args, **kwargs)
def _prefix_mount_point_to_paths(self, paths):
"""Prefixes the snapshot_mount_point_base_path to each path in paths.
Args:
paths: list, list of strings representing paths for the backup
config.
Returns:
List of strings with the given paths prefixed with the base path
where the snapshots are mounted.
"""
new_paths = list()
for path in paths:
new_path = '{snapshot_mp_bp}{path}'.format(
snapshot_mp_bp=self._snapshot_mount_point_base_path,
path=path)
new_paths.append(new_path)
return new_paths
def _run_custom_workflow(self):
"""Run backup of LVM snapshots.
This method overrides the base class's _run_custom_workflow() so that
we can modify the includes and excludes to have the
_snapshot_mount_point_base_path prefixed to their paths. This allows
the user to configure what to backup from the perspective of the file
system on the snapshot itself.
"""
self.logger.debug('RdiffLVMBackup._run_custom_workflow started.')
# Cook the self._includes and self._excludes so that the src paths
# include the mount path for the logical volumes.
self._includes = self._prefix_mount_point_to_paths(self._includes)
self._excludes = self._prefix_mount_point_to_paths(self._excludes)
# After changing the top-level src dir to where the snapshots are
# mounted, have the base class perform an rdiff-backup.
self.top_level_src_dir = self._snapshot_mount_point_base_path
super(RdiffLVMBackup, self)._run_custom_workflow()
self.logger.debug('RdiffLVMBackup._run_custom_workflow completed.')
```
#### File: ari-backup/ari_backup/style_test.py
```python
import os
import subprocess
import unittest
REPO_PATH = os.path.dirname(os.path.join(os.path.dirname(__file__), '..'))
class StyleTest(unittest.TestCase):
"""
This test class contains code style enforcement tests.
If this test fails, please make sure you are in compliance with PEP-8[0].
The test should have printed the problems to the screen when you ran
"python setup.py test", but you can also manually invoke this test by
running "flake8 ." at the root of this repository.
[0] https://www.python.org/dev/peps/pep-0008/
"""
def test_pep8(self):
"""This test makes sure the code is PEP-8 compliant."""
flake8_command = ['/usr/bin/flake8', REPO_PATH]
self.assertEqual(subprocess.call(flake8_command), 0)
```
#### File: ari-backup/ari_backup/test_lib.py
```python
import copy
import mock
import gflags
FLAGS = gflags.FLAGS
class FlagSaverMixIn(object):
"""A mix in class to preserve gflags values between tests.
This class can be subclasses by test classes to permit tests to safely
modify the values of gflags. The original value will be restored after the
test completes.
"""
def setUp(self):
super(FlagSaverMixIn, self).setUp()
self._save_flags()
def tearDown(self):
super(FlagSaverMixIn, self).tearDown()
self._restore_flags()
def _save_flags(self):
self._flag_values = copy.deepcopy(FLAGS.__dict__)
def _restore_flags(self):
FLAGS.__dict__.update(self._flag_values)
def GetMockCommandRunner():
"""Creates a mock version of workflow.CommandRunner helpful for testing.
This mock is useful for testing how the run() method of the CommandRunner
is called. To avoid breaking the flow of code under test, it always returns
an empty string for stdout and stderr and a returncode of 0 (success).
Returns:
A tuple with exactly this value: ('', '', 0) meant to represent the
stdout, the stderr, and the return code of the executed command.
"""
mock_command_runner = mock.MagicMock()
mock_command_runner.run = mock.MagicMock()
stdout = str()
stderr = str()
returncode = 0
mock_command_runner.run.return_value = (stdout, stderr, returncode)
# Attach the AssertCallsInOrder function as a function on the returned
# object to make using the AssertCallsInOrder function more convenient (the
# user won't have to pass in the mock object with the recorded calls).
mock_command_runner.AssertCallsInOrder = GetAssertCallsInOrderWrapper(
mock_command_runner.run)
return mock_command_runner
def GetAssertCallsInOrderWrapper(mock_object):
"""Convenience wrapper around AssertCallsInOrder.
This function returns a wrapper around AssertCallsInOrder which already
has a reference to the mock object with the record of calls made on the
mock.
Args:
mock_object: mock.Mock, the mock object which will contain the record
of calls.
Returns:
A callable which acts like AssertCallsInOrder but only requires passing
the calls argument.
"""
def wrapper(calls):
return AssertCallsInOrder(mock_object, calls)
return wrapper
def AssertCallsInOrder(mock_object, calls):
"""Test whether calls on a mock object are called in a particular order.
This test doesn't care whether all the calls recorded on the mock are
present in the given "calls" argument. It does care that all calls in the
"calls" argument are present in the calls recorded on the mock, and that
the order of those calls matches the order in the "calls" argument.
Args:
mock_object: mock.Mock, a mock object with a recorded list of calls.
calls: list, a list of mock.call objects
Raises:
AssertionError: When any single expected call object is missing from
the recorded calls in the mock.
AssertionError: When the expected calls are not in the expected order in
the recorded calls in the mock.
"""
call_indexes = list()
recorded_calls = mock_object.mock_calls
for call in calls:
try:
call_indexes.append(recorded_calls.index(call))
except ValueError:
raise AssertionError('{} missing from {}'.format(call,
recorded_calls))
sorted_call_indexes = copy.copy(call_indexes)
sorted_call_indexes.sort()
if call_indexes != sorted_call_indexes:
raise AssertionError(
'{} are not in the expected order within {}.'.format(
calls, recorded_calls))
``` |
{
"source": "jpwoodbu/dontcamp_bf2",
"score": 2
} |
#### File: jpwoodbu/dontcamp_bf2/dc_ass_extensions.py
```python
import bf2
import host
import time
import new # for kickstick
import re
from dc_debug import decho
def sayNextMap(unused=None):
#It works. We'll just leave it at that.
decho( 'The next map is %s' % (host.rcon_invoke('maplist.list').splitlines()[int(host.rcon_invoke('admin.nextLevel').strip())].split()[1].strip('"').replace('_', ' ').title()), 1 )
def getMyKeyhash(admin):
decho( 'dc_ass: %s, your keyhash is %s' % (bf2.PlayerManager.Player(admin.issuer.index).getName(), admin.issuer.keyhash), 1 )
def privGetMyKeyhash(admin):
host.rcon_feedback( admin.issuer.index, '%s, your keyhash is %s' % (bf2.PlayerManager.Player(admin.issuer.index).getName(), admin.issuer.keyhash) )
decho( 'dc_ass: %s, check your console for your keyhash' % bf2.PlayerManager.Player(admin.issuer.index).getName(), 1 )
def getStatus(admin):
# if the issuer is an admin
decho('dc_ass: debug 1', 5)
if admin.issuer.level == admin.adminLevel:
decho('dc_ass: debug 2', 5)
# if no argument was given just print the status of the issuer
if admin.command.arguments == None:
decho('dc_ass: debug 3', 5)
decho( "dc_ass: %s has %d of %d kick points" % (bf2.PlayerManager.Player(admin.issuer.index).getName(), admin.getPointsFromIndex(admin.issuer.index), admin.kickThreshold), 1 )
# get victimIDs
elif admin.getVictimIDs(admin.command.arguments):
decho('dc_ass: debug 4', 5)
for vID in admin.victimID:
decho('dc_ass: debug 5', 5)
decho( 'dc_ass: %s has %d of %d kick points' % (bf2.PlayerManager.Player(vID).getName(), admin.getPointsFromIndex(vID), admin.kickThreshold), 1 )
# if the issuer is NOT an admin
else:
decho('dc_ass: debug 7', 5)
decho( 'dc_ass: %s has %d of %d kick points' % (bf2.PlayerManager.Player(admin.issuer.index).getName(), admin.getPointsFromIndex(admin.issuer.index), admin.kickThreshold), 1 )
def clearPoints(admin):
admin.getVictimIDs(admin.command.arguments)
for vId in admin.victimID:
if admin.victimTracker.has_key(vId):
admin.victimTracker.pop(vId)
# I figure I can say their all cleared even if we're not tracking them... yet
decho( "dc_ass: Points for %s have been cleared" % bf2.PlayerManager.Player(vId).getName(), 1 )
def adminKickVote(admin):
admin.getVictimIDs(admin.command.arguments)
if admin.issuer.level == admin.adminLevel:
admin.issuer.level = 4
admin.processPoints()
def forgiveTK(admin):
#Have a victim?
if admin.getVictimIDs(admin.command.arguments):
#Is the victim in victimTracker?
if admin.victimTracker.has_key(admin.victimID[0]):
#If so, is the issuer's keyhash in the victim's tracker?
if admin.victimTracker[admin.victimID[0]].has_key(admin.issuer.keyhash):
#If the reason in the tracker is the same reason we're looking for, adjust points
if admin.command.reason == admin.victimTracker[admin.victimID[0]][admin.issuer.keyhash]['reason']:
#Adjust points, check them, and escape from the function.
if admin.manageTracker(admin.victimID[0], admin.issuer.keyhash, -2, admin.command.reason):
decho('ftk 6', 5)
admin.checkPoints()
return
#If any preceding step failed, we have no TK to forgive.
decho('dc_ass: No teamkills to forgive for '+bf2.PlayerManager.Player(admin.victimID[0]).getName(), 1)
def getCommands(admin):
output = "kick commands:\n"
for cmd in admin.config.sections():
if admin.config.get(cmd, 'type').strip() == 'kick':
output += "%s - %s" % (cmd, admin.config.get(cmd, 'reason').strip()) + "\n"
output += "\nban commands:\n"
for cmd in admin.config.sections():
if admin.config.get(cmd, 'type').strip() == 'ban':
output += "%s - %s" % (cmd, admin.config.get(cmd, 'reason').strip()) + "\n"
output += "\nrcon commands:\n"
for cmd in admin.config.sections():
if admin.config.get(cmd, 'type').strip() == 'rcon':
output += "%s" % cmd + "\n"
output += "\nextensions:\n"
for cmd in admin.config.sections():
if admin.config.get(cmd, 'type').strip() == 'extension':
output += "%s" % cmd + "\n"
host.rcon_feedback( admin.issuer.index, output )
def customKick(admin):
slicePoint = admin.command.arguments.find(' ')
if slicePoint == -1:
decho( 'dc_ass: 2 arguments are required for this command', 1 )
else:
argVictim = admin.command.arguments[:slicePoint]
admin.command.reason = admin.command.arguments[slicePoint + 1:]
if admin.getVictimIDs(argVictim):
admin.processPoints()
def switchTeam(admin):
admin.getVictimIDs(admin.command.arguments)
for vID in admin.victimID:
decho('debug 1', 5)
p = bf2.PlayerManager.Player(vID)
# if they were not in a vehicle
if killPlayerAtIndex(vID):
decho( '%s is on team %d' % ( p.getName(), p.getTeam() ), 5 )
if p.getTeam() == 1:
p.setTeam(2)
decho( 'dc_ass: switched %s to team 2' % p.getName(), 1)
else:
p.setTeam(1)
decho( 'dc_ass: switched %s to team 1' % p.getName(), 1)
else:
decho( 'dc_ass: unable to switch teams for %s' % p.getName(), 1 )
def tacticalSwitchTeam(admin):
splitArgs = admin.splitArguments(admin.command.arguments)
splitArgsLen = len(splitArgs)
if splitArgsLen == 1:
roundNum = int(splitArgs[0])
if roundNum == 1:
host.rcon_invoke('admin.restartmap');
host.rcon_invoke('sv.startDelay 900');
elif roundNum == 2:
host.rcon_invoke('admin.restartmap');
host.rcon_invoke('sv.startDelay 300');
admin.command.arguments = '@'
switchTeam(admin)
elif roundNum == 3:
host.rcon_invoke('admin.restartmap');
host.rcon_invoke('sv.startDelay 600');
elif roundNum == 4:
host.rcon_invoke('admin.restartmap');
host.rcon_invoke('sv.startDelay 300');
admin.command.arguments = '@'
switchTeam(admin)
else:
decho('dc_ass: Argument must be a valid tactical round number', 1)
else:
decho('dc_ass: Number of arguments for !tst is 1', 1)
def kill(admin):
argReason = None
argSpawnTime = None
argVictim = None
splitArgs = admin.splitArguments(admin.command.arguments)
splitArgsLen = len(splitArgs)
if splitArgsLen > 2:
argReason = splitArgs[2]
if splitArgsLen > 1:
argSpawnTime = int(splitArgs[1])
if splitArgsLen > 0:
argVictim = splitArgs[0]
admin.getVictimIDs(argVictim)
for vID in admin.victimID:
if not killPlayerAtIndex(vID):
decho( 'dc_ass: unable to kill %s' % bf2.PlayerManager.Player(vID).getName(), 1 )
else:
if argSpawnTime != None:
bf2.PlayerManager.Player(vID).setTimeToSpawn(argSpawnTime)
if argReason != None:
decho( 'dc_ass: %s was killed \'%s\'' % ( bf2.PlayerManager.Player(vID).getName(), argReason ), 1 )
else:
decho( 'dc_ass: %s was killed via admin system' % bf2.PlayerManager.Player(vID).getName(), 1 )
def killPlayerAtIndex(index):
# we had a dirty function that did this before but the POE2 guy's code was a bit prettier. So, thanks guys!
# set default returnValue
p = bf2.PlayerManager.Player(index)
if p:
# we'll return true if we found someone at index
returnValue = True
# make some vars!
playerVehicle = p.getVehicle()
playerDefaultVehicle = p.getDefaultVehicle()
parent = playerVehicle.getParent()
parentDefault = playerDefaultVehicle.getParent()
# if player is not driving a vehicle or on a vehicle's gun
if playerVehicle == playerDefaultVehicle:
# player using parachute
if parentDefault:
playerDefaultVehicle.setDamage(0.01)
else:
playerDefaultVehicle.setDamage(0.0)
else:
playerDefaultVehicle.setDamage(0.01)
playerVehicle.setDamage(0.01)
else:
returnValue = False
return returnValue
def setTickets(admin):
slicePoint = admin.command.arguments.find(' ')
if slicePoint == -1:
arg = int(admin.command.arguments)
if arg > 999 or arg < 0:
decho( 'dc_ass: First argument must be a valid team number or a ticket value for both teams.', 1)
return
else:
bf2.gameLogic.setTickets(1, arg)
bf2.gameLogic.setTickets(2, arg)
else:
argTeam = int(admin.command.arguments[:slicePoint])
argTickets = int(admin.command.arguments[slicePoint + 1:])
if argTeam != 1 and argTeam != 2:
decho( 'dc_ass: First argument must be a valid team number or a ticket value for both teams. ', 1)
return
if argTickets > 999 or argTickets < 0:
decho( 'dc_ass: Second argument must be a valid ticket value.', 1)
return
bf2.gameLogic.setTickets(argTeam, argTickets)
def showTime(admin):
decho( 'The time is %s' % time.strftime('%H:%M:%S'), 1 )
def setNextMap(admin):
# default restltValue
result = False
argSize = None
argGPM = None
argName = None
# split our args
splitArgs = admin.splitArguments(admin.command.arguments)
splitArgsLen = len(splitArgs)
# this is for future use, right now we just use the argName
if splitArgsLen > 2:
argSize = splitArgs[2]
if splitArgsLen > 1:
argGPM = splitArgs[1]
if splitArgsLen > 0:
argName = splitArgs[0]
if argName == None:
decho( 'dc_ass: (ERROR) at least one argument is required', 1 )
else:
# set a centinal value for id so we know if we've found a map yet
id = -1
mapList = admin.getMapList()
# search our maplist
for mapID, mapData in mapList.iteritems():
if mapData['name'].lower().find( argName.lower() ) != -1:
decho( 'dc_ass: %s loosely matches %s' % ( argName, mapData['name'] ), 5 )
# if this is the first map we've found...
if id == -1:
decho( 'dc_ass: found %s in %s' % ( argName, mapData['name'] ), 5 )
id = mapID
result = True
# if we've gotten another possible match...
else:
result = False
break
if id != -1:
if result:
decho( 'dc_ass: mapID found @ %d' % id, 5 )
if host.rcon_invoke('admin.nextLevel %d' % id):
decho( 'dc_ass: nextmap will be %s' % mapList[id]['name'], 1 )
else:
decho( 'dc_ass: (ERROR) failed to set nextmap', 1 )
else:
decho( 'dc_ass: %s is ambiguous.' % argName, 1 )
else:
decho( 'dc_ass: no maps can be matched to %s' % argName, 1 )
def showMapList(admin):
mapList = admin.getMapList()
output = "current maplist:\n"
for mapID, mapData in mapList.iteritems():
output += mapData['name'] + ' ' + mapData['gpm'] + ' ' + mapData['size'] + "\n"
host.rcon_feedback( admin.issuer.index, output )
```
#### File: jpwoodbu/dontcamp_bf2/dc_debug.py
```python
import host
import time
try:
debug_level_fh = open('admin/standard_admin/dc_debug.level', 'r')
debug_level = int(debug_level_fh.read(1))
debug_level_fh.close()
except:
debug_level = 5
log_to_file = True
log_filename = 'dc_debug.log'
log_to_stdout = False
def decho(msg, level = 1):
# 1 = info w/ in-game feedback
# 2 = info
# 5 = debug
if debug_level > 0 and debug_level >= level:
string = time.strftime('%Y-%m-%d %H:%M:%S') + ' - ' + msg
if log_to_stdout:
print string
if log_to_file:
fh = open(log_filename, 'a')
fh.write(string + '\n')
fh.close()
if level == 1:
host.rcon_invoke('game.sayall "%s"' % msg)
def showLevel():
decho('dc_debug: The current debug level is %d' % debug_level, 2)
``` |
{
"source": "jpwright/foobot-slack",
"score": 3
} |
#### File: jpwright/foobot-slack/foobot_grapher.py
```python
from pyfoobot import Foobot
import requests
import matplotlib
matplotlib.use('Agg')
import matplotlib.dates
import matplotlib.pyplot
import datetime
from imgurpython import ImgurClient
import ConfigParser
def getSensorReadings(notify):
config = ConfigParser.ConfigParser()
config.read("config.txt")
settings = {
'foobot_api_key': '',
'foobot_email': '',
'foobot_password': '',
'imgur_id': '',
'imgur_secret': '',
'slack_webhook': '',
'averaging_period': 15,
'periods_to_graph': 12,
'threshold_pm': 25.0,
'threshold_temperature': 26.5,
'threshold_humidity': 60.0,
'threshold_co2': 30000.0,
'threshold_tvoc': 500.0
}
for settings_key in settings:
try:
value_to_set = config.get('default', settings_key)
settings[settings_key] = value_to_set
except:
pass
imgur_supported = False
if (len(settings['imgur_id']) > 0 and len(settings['imgur_secret']) > 0):
imgur_supported = True
imgur = ImgurClient(settings['imgur_id'], settings['imgur_secret'])
fb = Foobot(settings['foobot_api_key'], settings['foobot_email'], settings['foobot_password'])
devices = fb.devices()
device = devices[0]
measurement_interval = 60*(int(settings['averaging_period']) * int(settings['periods_to_graph']))
data = device.data_period(measurement_interval, 0)
alerts = []
labels = ["PM2.5", "Temperature", "Humidity", "CO2", "tVOC"]
units = ["ug/m3", "C", "%", "ppm", "ppb"]
max_vals = [0, 0, 0, 0, 0]
sums = [0, 0, 0, 0, 0]
datapoints = [[], [], [], [], []]
timeseries = []
thresholds = [
float(settings['threshold_pm']),
float(settings['threshold_temperature']),
float(settings['threshold_humidity']),
float(settings['threshold_co2']),
float(settings['threshold_tvoc'])
]
num_averaging_samples = int(len(data['datapoints']) / int(settings['periods_to_graph']))
for i in range(0, len(data['datapoints'])):
datapoint = data['datapoints'][i]
time = datapoint[0]
pm = datapoint[1]
tmp = datapoint[2]
hum = datapoint[3]
co2 = datapoint[4]
voc = datapoint[5]
allpollu = datapoint[6]
for j in range(0, 5):
datapoints[j].append(datapoint[j+1])
if (i >= (len(data['datapoints']) - num_averaging_samples)):
sums[j] += datapoint[j+1]
if datapoint[j] > max_vals[j]:
max_vals[j] = datapoint[j+1]
timeseries.append(datetime.datetime.fromtimestamp(time))
hours = matplotlib.dates.HourLocator()
minutes = matplotlib.dates.MinuteLocator(interval = 10)
hoursFmt = matplotlib.dates.DateFormatter('%-I:%M')
if notify:
for i in range(0, 5):
sums[i] = sums[i] / num_averaging_samples
if sums[i] > thresholds[i]:
print("Sending alert for "+labels[i])
fig, ax = matplotlib.pyplot.subplots()
ax.plot(timeseries, datapoints[i])
ax.xaxis.set_major_locator(hours)
ax.xaxis.set_major_formatter(hoursFmt)
ax.grid(True)
matplotlib.pyplot.xlabel("Time")
matplotlib.pyplot.ylabel(labels[i] + " ("+units[i]+")")
fig.autofmt_xdate()
matplotlib.pyplot.savefig("figure.png")
if imgur_supported:
image = imgur.upload_from_path("figure.png", anon=True)
else:
image = {"link": "http://imgur.not.supported.com/alter_your_config.txt"}
payload = '{"text": "Warning: '+labels[i]+' levels at '+"{0:.2f}".format(sums[i])+' '+units[i]+'.", "attachments": [{"fallback": "Graph.", "image_url": "'+image["link"]+'"}]}'
r = requests.post("https://hooks.slack.com/services/"+settings['slack_webhook'], data={"payload": payload})
else:
fig, axarr = matplotlib.pyplot.subplots(1,5)
for i in range(0, 5):
ax = axarr[i]
ax.plot(timeseries, datapoints[i])
ax.xaxis.set_major_locator(hours)
ax.xaxis.set_major_formatter(hoursFmt)
ax.grid(True)
ax.set_xlabel("Time")
ax.set_title(labels[i] + " ("+units[i]+")")
fig.autofmt_xdate()
fig.set_size_inches(18, 4)
matplotlib.pyplot.savefig("figure.png", bbox_inches='tight')
if (imgur_supported):
image = imgur.upload_from_path("figure.png", anon=True)
else:
image = {"link": "http://imgur.not.supported.com/alter_your_config.txt"}
return image["link"]
if __name__ == "__main__":
getSensorReadings(True)
``` |
{
"source": "jpwsutton/phat_iotf",
"score": 2
} |
#### File: jpwsutton/phat_iotf/sprites.py
```python
import scrollphat
face_normal_happy = ['00001001000',
'00001001000',
'00000000000',
'00010000100',
'00001111000']
face_normal_tongue = ['00001001000',
'00001001000',
'00000000000',
'00001111000',
'00000011000']
face_normal_wink = ['00000001000',
'00001001000',
'00000000000',
'00010000100',
'00001111000']
face_normal_sad = ['00001001000',
'00001001000',
'00000000000',
'00001111000',
'00010000100']
face_normal_meh = ['00001001000',
'00001001000',
'00000000000',
'00001111000',
'00000000000']
face_normal_shock = ['00001001000',
'00001001000',
'00000000000',
'00000110000',
'00000110000']
face_chibbi_happy = ['00010001000',
'00101010100',
'00000000000',
'00010001000',
'00001110000']
face_chibbi_sad = ['00010001000',
'00101010100',
'00000000000',
'00001110000',
'00010001000']
face_chibbi_shock = ['00000000000',
'01000000100',
'00100001000',
'01001100100',
'00001100000']
face_chibbi_meh = ['00000000000',
'01000000100',
'00100001000',
'01000000100',
'00011110000']
face_chibbi_dead = ['10100001010',
'01000000100',
'10100001010',
'00000000000',
'00011110000']
face_chibbi_ugh = ['01010010100',
'01010010100',
'00100001000',
'00000000000',
'00011110000']
def setCell(row, col, cell):
if cell eq '0':
scrollphat.set_pixel(row, col, 0)
else:
scrollphat.set_pixel(row, col, 1)
# Displays a sprite defined in an array of 5
def displaySprite(sprite):
for rowNum, row in sprite:
for colNum, cell in row:
setCell(rowNum, colNum, cell)
scrollphat.update()
``` |
{
"source": "jpxor/Investment-Dashboard-Builder",
"score": 2
} |
#### File: Investment-Dashboard-Builder/src/utils.py
```python
import calendar
import datetime
from openpyxl import load_workbook
#==========================
def add_months(sourcedate, num_months):
month = sourcedate.month - 1 + num_months
year = int(sourcedate.year + month / 12 )
month = month % 12 + 1
day = min(sourcedate.day,calendar.monthrange(year,month)[1])
return datetime.date(year,month,day)
#==========================
def dateNotInRange( startDate, testDate, endDate ):
if startDate.year < testDate.year < endDate.year:
return False
if startDate.year == testDate.year and startDate.month <= testDate.month:
return False
if endDate.year == testDate.year and testDate.month <= endDate.month:
return False
return True
#==========================
def dateInRange( startDate, testDate, endDate ):
return not dateNotInRange( startDate, testDate, endDate )
#==========================
def loadDataFromExcel(filepath):
wb = load_workbook(filename=filepath, read_only=True, data_only=True)
ws_info = wb["Info"]
data = {}
#get date range
dateRange = []
data["dateRange"] = dateRange
startDate = ws_info["E3"].value
endDate = ws_info["E4"].value
dateRange.append(startDate)
tmpdate = add_months(startDate, 1)
while dateInRange( startDate, tmpdate, endDate ):
dateRange.append( tmpdate )
tmpdate = add_months(tmpdate, 1)
#get account names (and init data structures)
accounts = {}
data["accounts"] = accounts
column = "B{}"
row = 3
while True:
cell = column.format(row)
accountName = ws_info[cell].value
if accountName == None:
break #stop loop
else:
accounts[accountName] = {} #create data structure
accounts[accountName]["row"] = 2*row-3
accounts[accountName]["value"] = []
accounts[accountName]["cw"] = []
row = row + 1
#for each year (worksheets)
for year in range( startDate.year, endDate.year+1 ):
ws_year = wb[ str(year) ]
#for each valid month
for column in range(3, 3+12):
date = ws_year.cell(row=2, column=column).value
if dateNotInRange( startDate, date, endDate ):
continue #next loop iteration (ie: skip this date)
#collect data (account value and contributions/withdrawals) for each account
for accountName,accountData in accounts.items():
row = accountData["row"]
value = ws_year.cell( row=row, column=column ).value
accountData["value"].append( 0 if value==None else value )
cw = ws_year.cell( row=(row+1), column=column ).value
accountData["cw"].append( 0 if cw==None else cw )
return data
#==========================
def builddashboard(filename, plot1, plot2, plot3, plot4, plot5, bgcolor_hex):
htmltext = """
<!DOCTYPE html>
<html>
<head>
<style>
.chart-stage {{
display: inline-block;
margin: 0.3%;
border: 1px solid #444444;
}}
</style>
</head>
<body style="background-color:{};">
<div class="chart-stage" style="width:49%;">
<iframe width="100%" height="525px" frameborder="0" scrolling="no" src="{}"></iframe>
</div>
<div class="chart-stage" style="width:49%;">
<iframe width="100%" height="525px" frameborder="0" scrolling="no" src="{}"></iframe>
</div>
<div class="chart-stage" style="width:99%;">
<iframe width="100%" height="525" frameborder="0" scrolling="no" src="{}"></iframe>
</div>
<div class="chart-stage" style="width:99%;">
<iframe width="100%" height="525" frameborder="0" scrolling="no" src="{}"></iframe>
</div>
<div class="chart-stage" style="width:99%;">
<iframe width="100%" height="525" frameborder="0" scrolling="no" src="{}"></iframe>
</div>
</body>
</html>
"""
htmltext = htmltext.format( bgcolor_hex, plot1, plot2, plot3, plot4, plot5 )
with open(filename, 'w') as file:
file.write(htmltext)
file.close()
return filename
#==========================
``` |
{
"source": "JPYamamoto/PythonDjango-PlasmaCMS",
"score": 2
} |
#### File: PythonDjango-PlasmaCMS/posts/views.py
```python
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.template.loader import render_to_string
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404
from django.core.urlresolvers import reverse_lazy
from django.conf import settings
from django.utils import timezone
from django.views.generic import CreateView, UpdateView, DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import Post, Comment
from .forms import PostForm, CommentForm
import json
# Create your views here.
def index(request):
if request.method == 'GET':
posts = Post.objects.all().order_by('-last_update_datetime')[:12]
slider_posts = posts[:5]
posts = [posts[x:x+3] for x in range(0, len(posts), 3)]
return render(request, 'posts/index.html', {'posts': posts, 'slider': slider_posts})
else:
posts_list = Post.objects.all().order_by('-last_update_datetime')
posts_list = [posts_list[x:x + 3] for x in range(0, len(posts_list), 3)]
page = request.POST.get('page', 1)
paginator = Paginator(posts_list, 4)
try:
posts_list = paginator.page(page)
except PageNotAnInteger:
posts_list = paginator.page(1)
page = 1
except EmptyPage:
posts_list = []
html = render_to_string('posts/index_page.html', {'posts_list': posts_list, 'MEDIA_URL': settings.MEDIA_URL})
more_pages = True
try:
paginator.page(int(page)+1)
except EmptyPage:
more_pages = False
response = {'html': html, 'more_pages': more_pages}
return HttpResponse(json.dumps(response), content_type="application/json")
class NewPost(LoginRequiredMixin, CreateView):
login_url = reverse_lazy('settings:login')
model = Post
form_class = PostForm
template_name = 'posts/form.html'
success_url = reverse_lazy('posts:index')
def form_valid(self, form):
form.instance.author = self.request.user
form.instance.last_update_datetime = timezone.now()
return super(NewPost, self).form_valid(form)
class EditPost(LoginRequiredMixin, UpdateView):
login_url = reverse_lazy('settings:login')
model = Post
form_class = PostForm
template_name = 'posts/form.html'
def form_valid(self, form):
form.instance.last_update_datetime = timezone.now()
return super(EditPost, self).form_valid(form)
def get_success_url(self, **kwargs):
return reverse_lazy('posts:view', args=(self.object.id,))
class RemovePost(LoginRequiredMixin, DeleteView):
login_url = reverse_lazy('settings:login')
model = Post
template_name = 'posts/delete_form.html'
success_url = reverse_lazy('posts:index')
def show_post(request, pk):
if request.method == 'POST':
form_comment = CommentForm(request.POST)
if form_comment.is_valid():
comment = form_comment.save(commit=False)
comment.post = Post.objects.get(pk=pk)
comment.user = request.user
comment.save()
success = True
response = {'comment_id': comment.id,
'user_id': str(reverse_lazy('profiles:view', kwargs={'pk': comment.user.pk})),
'user_name': comment.user.username,
'user_image': comment.user.profile.image.url,
'date': comment.creation_datetime.strftime("%B %d, %Y"),
'title': comment.title,
'content': comment.content
}
else:
success = False
response = form_comment.errors
context = {'success': success, 'response': response}
return HttpResponse(json.dumps(context), content_type='application/json')
else:
post = get_object_or_404(Post, pk=pk)
if post.allow_comments:
form_comment = CommentForm()
else:
form_comment = None
context = {'post': post,
'form_comment': form_comment,
'comments': Comment.objects.filter(post=pk).order_by('-creation_datetime')
}
return render(request, 'posts/view.html', context)
class RemoveComment(DeleteView):
model = Comment
template_name = 'posts/delete_form.html'
success_url = reverse_lazy('posts:index')
```
#### File: PythonDjango-PlasmaCMS/profiles/views.py
```python
from django.shortcuts import render
from django.contrib.auth.models import User
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import DetailView, UpdateView, DeleteView
from django.core.urlresolvers import reverse_lazy
from django.http import Http404
from django.shortcuts import get_object_or_404
from django.db.models import ProtectedError
from .forms import UserProfileForm, UserForm
from .models import Profile
from posts.models import Post
# Create your views here.
class ShowUser(LoginRequiredMixin, DetailView):
login_url = reverse_lazy('settings:login')
model = User
template_name = 'profiles/view.html'
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
pk = self.kwargs.get(self.pk_url_kwarg)
if pk is not None:
queryset = queryset.filter(pk=pk)
if pk is None:
pk = self.request.user.pk
queryset = queryset.filter(pk=pk)
try:
obj = queryset.get()
except queryset.model.DoesNotExist:
raise Http404(("No %(verbose_name)s found matching the query") %
{'verbose_name': queryset.model._meta.verbose_name})
return obj
def get_context_data(self, **kwargs):
context = super(ShowUser, self).get_context_data(**kwargs)
try:
posts = Post.objects.filter(author=self.request.user)
except Post.DoesNotExist:
posts = None
context['posts'] = posts
return context
class UserProfileEdit(LoginRequiredMixin, UpdateView):
login_url = reverse_lazy('settings:login')
model = Profile
second_model = User
template_name = 'profiles/form.html'
form_class = UserProfileForm
second_form_class = UserForm
success_url = reverse_lazy('profiles:view_self')
def get_context_data(self, **kwargs):
context = super(UserProfileEdit, self).get_context_data(**kwargs)
profile = self.get_object()
user_account = self.second_model.objects.get(id=profile.user_id)
if 'form' not in context:
context['form'] = self.form_class(instance=profile)
if 'form2' not in context:
context['form2'] = self.second_form_class(instance=user_account)
return context
def get_object(self, queryset=None):
current_user, created = Profile.objects.get_or_create(
user_id=self.request.user.id)
pk = current_user.pk
return get_object_or_404(Profile, pk=pk)
class RemoveUser(LoginRequiredMixin, DeleteView):
login_url = reverse_lazy('settings:login')
model = User
template_name = 'profiles/delete_form.html'
success_url = reverse_lazy('settings:index')
def post(self, request, *args, **kwargs):
try:
return self.delete(request, *args, **kwargs)
except ProtectedError:
error_posts = Post.objects.filter(author=self.object.id)
return render(request, self.template_name, {'message': 'You can\'t delete this user, because it is referenced by the following posts.',
'error_posts': error_posts})
```
#### File: PythonDjango-PlasmaCMS/settings/views.py
```python
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.core.urlresolvers import reverse_lazy
from django.conf import settings
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout as logout_django
from django.contrib.auth.decorators import login_required
from django.template.loader import render_to_string
from django.db import transaction
from django.db.models import Count, Q
from django.contrib.auth.models import User
from django.views.generic import View, ListView, CreateView
from .forms import BlogSettingsForm, LoginForm, RegisterForm
from .models import Blog
from posts.models import Post
from categories.models import Category
from profiles.models import Profile
# Create your views here.
@login_required(login_url=reverse_lazy('settings:login'))
def blog_settings(request):
current_config = Blog.objects.last()
if request.method == 'GET':
form = BlogSettingsForm(instance=current_config)
else:
form = BlogSettingsForm(request.POST, request.FILES,
instance=current_config)
if form.is_valid():
form.save()
return redirect('settings:index')
return render(request, 'settings/blog_settings.html', {'form': form})
class IndexList(ListView):
model = Post
template_name = 'settings/blog_index.html'
context_object_name = 'index_elements'
paginate_by = 6
ordering = ['-creation_datetime']
def get_context_data(self, **kwargs):
context = super(IndexList, self).get_context_data(**kwargs)
context['categories'] = Category.objects.annotate(
times=Count('post__category')).order_by('-times')[:3]
return context
class Login(View):
form = LoginForm()
message = None
template = 'settings/blog_login.html'
def get(self, request, *args, **kwargs):
if request.user.is_authenticated():
return redirect(self.get_next_url())
return render(request, self.template, self.get_context())
def post(self, request, *args, **kwargs):
username_post = request.POST['username']
password_post = request.POST['password']
user = authenticate(username=username_post, password=password_post)
if user is not None:
login(request, user)
return redirect(self.get_next_url())
else:
self.message = 'Wrong username or password.'
return render(request, self.template, self.get_context())
def get_context(self):
return {'form': self.form, 'message': self.message}
def get_next_url(self):
return self.request.GET.get('next', 'settings:index')
class Register(CreateView):
success_url = reverse_lazy('settings:login')
model = User
template_name = 'settings/blog_register.html'
form_class = RegisterForm
@transaction.atomic
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.set_password(form.cleaned_data['<PASSWORD>'])
self.object.save()
profile = Profile(user=self.object)
profile.save()
return HttpResponseRedirect(self.get_success_url())
def logout(request):
logout_django(request)
return redirect('settings:index')
def search(request):
if request.method == 'GET':
string = request.GET.get('search_string')
posts = Post.objects.filter(title__icontains=string)[:20]
categories = Category.objects.filter(Q(name__icontains=string) | Q(description__icontains=string))[:20]
users = User.objects.filter(Q(username__icontains=string) | Q(profile__name__icontains=string))[:20]
context = {'posts_list': posts,
'categories_list': categories,
'users_list': users}
return render(request, 'settings/blog_search.html', context)
``` |
{
"source": "JPYamamoto/rpsls",
"score": 3
} |
#### File: rpsls/algorithms/markov.py
```python
import random
from choices import Choice
class MarkovChainMixin:
matrix = {
'rock': {'rock': 0, 'paper': 0, 'scissors': 0, 'lizard': 0, 'spock': 0},
'paper': {'rock': 0, 'paper': 0, 'scissors': 0, 'lizard': 0, 'spock': 0},
'scissors': {'rock': 0, 'paper': 0, 'scissors': 0, 'lizard': 0, 'spock': 0},
'lizard': {'rock': 0, 'paper': 0, 'scissors': 0, 'lizard': 0, 'spock': 0},
'spock': {'rock': 0, 'paper': 0, 'scissors': 0, 'lizard': 0, 'spock': 0}
}
prev_throw = None
last_throw = None
def prepare_round(self, throw):
self.prev_throw = self.last_throw
self.last_throw = throw
if self.prev_throw is not None:
self.matrix[str(self.prev_throw)][str(self.last_throw)] += 1
def likely_next(self):
if self.last_throw is None:
return Choice.random_choice()
return self.implementation()
class MarkovChain(MarkovChainMixin):
def implementation(self):
matrix = self.matrix[str(self.last_throw)]
max_times = matrix[max(matrix, key=lambda val: matrix[val])]
next_options = [k for (k,v) in matrix.items() if v == max_times]
next_str = random.choice(next_options)
return Choice.parse(next_str)
class RandomMarkovChain(MarkovChainMixin):
def implementation(self):
matrix = self.matrix[str(self.last_throw)]
options, weights = zip(*matrix.items())
next_str = random.choices(options, weights=weights)[0]
return Choice.parse(next_str)
```
#### File: rpsls/algorithms/random.py
```python
import random
from choices import Choice
class Random:
def prepare_round(self, _):
pass
def likely_next(self):
return Choice.random_choice()
```
#### File: rpsls/rpsls/choices.py
```python
import random
class Choice:
@staticmethod
def parse(choice):
choice_id = None
choice_lower = choice.lower()
if choice_lower == 'rock':
return Rock()
elif choice_lower == 'paper':
return Paper()
elif choice_lower == 'scissors':
return Scissors()
elif choice_lower == 'lizard':
return Lizard()
elif choice_lower == 'spock':
return Spock()
else:
raise ValueError('Invalid choice')
@staticmethod
def random_choice():
choices = Choice.__subclasses__()
return random.choice(choices)
@classmethod
def is_draw(cls, attacker):
return isinstance(attacker, cls)
@classmethod
def get_attacker(cls):
next_attacker = random.choice(cls.is_beaten_by)
return Choice.parse(next_attacker)
@classmethod
def __eq__(cls, other):
return isinstance(other, cls)
class Rock(Choice):
name = 'rock'
is_beaten_by = ('paper', 'spock')
def beats(self, attacker):
if isinstance(attacker, Scissors):
return True
if isinstance(attacker, Lizard):
return True
return False
def __str__(self):
return self.name
class Paper(Choice):
name = 'paper'
is_beaten_by = ('lizard', 'scissors')
def beats(self, attacker):
if isinstance(attacker, Rock):
return True
if isinstance(attacker, Spock):
return True
return False
def __str__(self):
return self.name
class Scissors(Choice):
name = 'scissors'
is_beaten_by = ('rock', 'spock')
def beats(self, attacker):
if isinstance(attacker, Paper):
return True
if isinstance(attacker, Lizard):
return True
return False
def __str__(self):
return self.name
class Lizard(Choice):
name = 'lizard'
is_beaten_by = ('rock', 'scissors')
def beats(self, attacker):
if isinstance(attacker, Spock):
return True
if isinstance(attacker, Paper):
return True
return False
def __str__(self):
return self.name
class Spock(Choice):
name = 'spock'
is_beaten_by = ('lizard', 'paper')
def beats(self, attacker):
if isinstance(attacker, Rock):
return True
if isinstance(attacker, Scissors):
return True
return False
def __str__(self):
return self.name
``` |
{
"source": "JPYamamoto/secret_sharing_shamir",
"score": 4
} |
#### File: secret_sharing_shamir/shamir/io.py
```python
import getpass
class IO:
"""Input/Output related utilities.
"""
@staticmethod
def input_secret(msg):
"""Prompt the user for an input using a given message. The input will
not be visible on the terminal.
Args:
msg: The message to display to the user prompting for an input.
"""
return getpass.getpass(msg)
@staticmethod
def read_file(filename, *, binary = False):
"""Read a file whose name was supplied as a parameter. By default
reads its content as a text, unless specified, in which case it will
be read as a byte array.
Args:
filename: The name of the file to be read.
binary: Whether to read the file as binary or not.
"""
mode = 'r'
if binary:
mode += 'b'
with open(filename, mode) as f:
return f.read()
@staticmethod
def write_file(filename, content, *, binary = False):
"""Write a given content to a file. The content can be supplied as
a byte array or as plain text.
Args:
filename: The name of the file to be written.
content: The content to write to the specified file.
binary: Whether to read the file as binary or not.
"""
mode = 'w'
if binary:
mode += 'b'
with open(filename, mode) as f:
f.write(content)
```
#### File: secret_sharing_shamir/shamir/polynomial.py
```python
class Polynomial:
"""Polynomial related methods.
A polynomial is described by a list of its coefficient, where
the entry at index i is the coefficient of x^i.
"""
def __init__(self, coefficients):
self.coefficients = coefficients
def evaluate(self, x, prime):
"""Horner's method.
This method allows the evaluation of a polynomial of degree n
with only n multiplications and n additions (arithmetic operation).
This is optimal, since there are polynomials of degree n that cannot be evaluated
with fewer arithmetic operations.
Args:
x: The number in which we will evaluate the polynomial.
prime: The prime number which will apply the module operation.
"""
result = 0
for coefficient in reversed(self.coefficients):
result = ((result * x) + coefficient) % prime
return result
@staticmethod
def lagrange(points, x, prime):
"""Lagrange polynomials are used for polynomial interpolation.
For a given list of points (x_j , y_j) with all differents x_j values for all x_j in the list,
the Lagrange interpolation polynomial is the polynomial of lowest degree that assumes at each value x_j
the corresponding value y_j, so that the functions coincide at each point.
Args:
points: List of tuples representing points in the plane, which pass through the polynomial.
x: List of tuples representing points in the plane, which pass through the polynomial.
prime: The prime number which will apply the module operation.
"""
at_zero = 0
for i in range(len(points)):
x_i, y_i = points[i]
at_zero = (prime + at_zero + Polynomial.multiply_points(points, x_i, x, i, y_i, prime)) % prime
return at_zero
@staticmethod
def multiply_points(points, x_i, x, i, y_i, prime):
"""We calculate the interpolation polynomial of Lagrange in x_i.
Args:
points: List of tuples representing points in the plane, which pass through the polynomial.
x_i: The point at which we are making the interpolation of the polynomial.
i: Index
y_i: f(x_i)
prime: The prime number which will apply the module operation.
"""
polynom = []
for j in range(len(points)):
if (j == i):
continue
numerator = (x - points[j][0]) % prime
denominator = (x_i - points[j][0]) % prime
inverse_d = Polynomial.mod_inverse(denominator, prime)
polynom.append(numerator * inverse_d)
result = 1
for k in range(len(polynom)):
result *= polynom[k]
return result * y_i
@staticmethod
def mod_inverse(denominator, prime):
"""We neet to get the multiplicative inverse of denominator
in the field Z_p, for this reason we must implement the Fermat's
little theorem.
Fermat's little theorem states that if p is a prime number, then
for any integer a, the number a^p − a is an integer multiple of p.
Args:
denominator: The number to which we should get your multiplicative inverse in the field Z_p.
prime: The prime number which will help us to get the multiplicative inverse.
"""
return pow(denominator, prime-2, prime)
```
#### File: secret_sharing_shamir/test/test_io.py
```python
from shamir.io import IO
import os
import string
import random
TEST_FILE = './test/test_assets/io_test.txt'
class TestIO:
def test_read_write_text(self):
length = random.getrandbits(8)
content = ''.join(random.choice(string.ascii_letters) for _ in range(length))
IO.write_file(TEST_FILE, content)
written = IO.read_file(TEST_FILE)
print(content)
print(written)
assert (content == written)
def test_read_write_binary(self):
content = os.urandom(2**16)
IO.write_file(TEST_FILE, content, binary=True)
written = IO.read_file(TEST_FILE, binary=True)
assert (content == written)
``` |
{
"source": "JPYamamoto/ultrasonic_tardis",
"score": 3
} |
#### File: src/entities/tardis.py
```python
import pygame
from pathlib import Path
from constants import WINDOWS_HEIGHT
_WIDTH = 80
_OFFSET = 0.1
class Tardis(pygame.sprite.Sprite):
"""Object representing a TARDIS (the main character).
Attributes:
image: An image instance provided by Pygame.
rect: The rect surface that will occupy the object instance.
"""
def __init__(self):
""" Init the TARDIS (player entity). """
super(Tardis, self).__init__()
path = Path.cwd() / 'assets' / 'image' / 'tardis.png'
image = pygame.image.load(str(path))
scale = (_WIDTH, round(image.get_height() * _WIDTH / image.get_width()))
self.image = pygame.transform.scale(image, scale)
self.rect = self.image.get_rect(center=(150, 0))
def update(self, distance):
"""Move the TARDIS according to the info of the sensor.
The TARDIS will move in the y-axis (up and down) given a
position ranging from 0 to 1, as a float.
It will be retrieved by getting the distance measured by the
ultrasonic sensor, and only taking into account a certain RANGE
after a certain OFFSET.
Args:
distance: The distance measured by the ultrasonic sensor.
Returns:
Nothing. This method is only used to modify the internal
state of the object.
"""
if distance < _OFFSET:
distance = _OFFSET
distance = (distance - _OFFSET) * 5
distance = 1 - distance
self.rect.center = (150, WINDOWS_HEIGHT * distance)
if self.rect.top < 0:
self.rect.top = 0
elif self.rect.bottom > WINDOWS_HEIGHT:
self.rect.bottom = WINDOWS_HEIGHT
``` |
{
"source": "jpyankel/Teapot-Wars-2",
"score": 3
} |
#### File: objects/item/BagOfTeaPlusThree.py
```python
from .Item import Item
from .ItemType import ItemType
from panda3d.core import LPoint3f
MODEL_FILE_PATH = "objects/item/models/Teabag.egg"
MODEL_SCALE = LPoint3f(0.1, 0.1, 0.1)
class BagOfTeaPlusThree (Item):
"""
The BagOfTeaPlusThree is the most powerful item in the game. Holding
this means the player wins. Yay.
"""
def __init__ (self, gameManager, cID, **kwargs):
Item.__init__(self, gameManager, cID,
modelPath=MODEL_FILE_PATH, modelScale=MODEL_SCALE,
**kwargs)
def activateItem (self, pickupChar):
""" All item activations are done client side and then synced """
Item.activateItem(self, pickupChar)
self._gameManager.localPlayerWinStateAchieved()
def getItemTypeEnum (self):
return ItemType.BagOfTeaPlusThree
```
#### File: objects/localPlayer/PlayerController.py
```python
from direct.actor.Actor import Actor
from .CameraSystem import CameraSystem
from objects.characters.Teapot import Teapot
from .InputSystem import InputSystem
from objects.gameUI.BarUI import BarUI
from objects.gameUI.RespawnScreen import RespawnScreen
from objects.gameUI.AbilityBar import AbilityBar
from objects.defaultConfig.Consts import *
from direct.task import Task
import time
class PlayerController ():
"""
The Player class lets the user play the game.
Handles client side player systems such as input, camera, and gameplay.
"""
def __init__ (self, gameManager, cID, initialPos, charClass):
self._gameManager = gameManager # Reference to gameManager for callbacks
# Initialize this clients gameObject:
self._character = Teapot(self, gameManager, cID, coords=initialPos)
# Assign class and stats:
self._charClass = charClass
self._character.setMaxEnergy(PLAYER_MAX_ENERGY)
# Initialize Camera Input:
self.cameraSystem = CameraSystem(target=self._character.getNodePath())
# Initialize the player's Input and UI:
self.inputSystem = InputSystem(self, gameManager.getTileMap())
self._energyBar = BarUI(self._character.getNodePath(),
ENERGY_BAR_OFFSET, 1,
ENERGY_BAR_FG_COLOR, ENERGY_BAR_BG_COLOR)
self._abilityBar = AbilityBar(self)
self._respawnScreen = None
# Register object in the tileMap
self._gameManager.getTileMap().spawnObject(self._character, initialPos)
self._lastActionEndTime = 0 # Used for energy recharge delay
self._energyRecharger = taskMgr.add(self._rechargeEnergyTask,
"Player Energy Recharger")
def activateAbility (self, abilityIndex):
""" Called by the AbilityBar on click. Tell the inputSystem to update"""
self.inputSystem.activateAbility(abilityIndex)
def onAbilityActivated (self, abilityIndex):
""" Highlight the activate ability in the AbilityBar """
self._abilityBar.onAbilityActivated(abilityIndex)
def onAbilityUsed (self):
""" Deselects the abilities in the AbilityBar """
self._abilityBar.deactivateAbilities()
def updateEnergyBar (self):
"""
Visually updates the energyBar value.
"""
percentage = self._character.getEnergy() / PLAYER_MAX_ENERGY
self._energyBar.setValue(percentage)
def _rechargeEnergyTask (self, task):
"""
Recharges the player's energy if they haven't acted for a certain
delay.
"""
# If we are currently in an action, simply update the _lastActionEndTime
if self._character.getCurrentActionSequence():
self._lastActionEndTime = time.time()
return task.cont
# If we are already full, skip this function:
if self._character.getEnergy() > self._character.getMaxEnergy():
self._character.setEnergy(self._character.getMaxEnergy())
self.updateEnergyBar()
return task.cont
if time.time() >= self._lastActionEndTime\
+ PLAYER_ENERGY_RECOVERY_DELAY:
deltaTime = globalClock.getDt()
self._character.setEnergy(self._character.getEnergy() + \
PLAYER_ENERGY_RECOVERY_RATE * deltaTime)
self.updateEnergyBar()
return task.cont
def isHostPlayer (self):
""" Returns whether we are a host player. """
return self._gameManager.isHost()
def getClass (self):
return self._charClass
def getClassAbilities (self):
return self._charClass.classAbilities
def getCharacter (self):
return self._character
def onDeath (self):
"""
Notify the player that they cannot act until a respawn timer is set.
Start that respawn timer
"""
# Prevent players from acting:
self.inputSystem.clearAbility()
self.inputSystem.setControllable(False)
taskMgr.remove(self._energyRecharger)
self._energyRecharger = None
self._energyBar.removeNode()
self._energyBar = None
self._respawnScreen = RespawnScreen(self)
def respawnRequest (self):
"""
Sends a respawn request to the server.
"""
self._gameManager.respawnLocalPlayer(self.getCharacter())
def onRespawned (self):
"""
Clientside function called when the controlled creature respawns.
Reinitializes UI and controls.
"""
# Remove blocking respawn screen
self._respawnScreen.close()
self._respawnScreen = None
# Enable energy regen and GUIs:
self._energyBar = BarUI(self._character.getNodePath(),
ENERGY_BAR_OFFSET, 1,
ENERGY_BAR_FG_COLOR, ENERGY_BAR_BG_COLOR)
self._energyRecharger = taskMgr.add(self._rechargeEnergyTask,
"Player Energy Recharger")
# Allow the player to control again:
self.inputSystem.setControllable(True)
def syncAction (self, cID, actionID, **kwargs):
""" Tells gameManager to sync action to the server """
self._gameManager.onLocalPlayerAction(cID, actionID, **kwargs)
def onActionStarted (self):
""" Keeps track of time for energy regen purposes """
self._lastActionEndTime = time.time()
def onActionCanceled (self):
""" Keeps track of time for energy regen purposes """
self._lastActionEndTime = time.time()
#TODO: Warn the user that the action canceled prematurely
def onActionEnded (self):
""" Keeps track of the action ending """
self._lastActionEndTime = time.time()
```
#### File: objects/networking/NetworkClient.py
```python
from panda3d.core import QueuedConnectionManager, QueuedConnectionReader,\
ConnectionWriter
from panda3d.core import ConfigVariableInt, Point2D
from panda3d.core import PointerToConnection, NetAddress, NetDatagram
from direct.distributed.PyDatagramIterator import PyDatagramIterator
from direct.task import Task
from objects.defaultConfig.DefaultConfig import *
from objects.defaultConfig.Consts import *
from objects.networking.NetworkMessages import *
import sys
from objects.networking.PlayerInfo import PlayerInfo
from objects.characters.CharacterNetworkingUtilities import \
getCharacterTypeAsClass
from direct.interval.FunctionInterval import Func
class NetworkClient ():
"""
All remote clients will have one of these in their GameManager. This
class communicates with a server (NetworkHost) to update game state.
"""
def __init__ (self, gameManager):
self._connManager = QueuedConnectionManager()
self._timeout = CLIENT_TIMEOUT
self._loadConfig()
self._gameManager = gameManager
self._playerInfo = dict() # Party Member Info
self._creatures = dict() # Creates by cID.
self._connection = None
def _loadConfig (self):
"""
Loads network configuration defaults.
"""
self._portAddress = ConfigVariableInt("default-port",
DEFAULT_PORT).getValue()
def startClient (self, ipAddress):
"""
Finishes client init and attempts a connection.
"""
# Initialize Reader and Writer:
self._connReader = QueuedConnectionReader(self._connManager, 0)
self._connWriter = ConnectionWriter(self._connManager, 0)
# Initialize connection:
self._connection = self._connManager.openTCPClientConnection(
ipAddress, self._portAddress, self._timeout)
if self._connection:
print ("[Client Connected]")
self._connReader.addConnection(self._connection)
# Begin handling messages (start listening):
taskMgr.add(self._onReaderPoll,"Poll the connection reader",
-40)
self._gameManager.onLocalClientJoinedParty(self._connection\
.this) # GameManager callback
def _onReaderPoll (self, taskdata):
"""
Called on an interval to interpret messages from the reader.
"""
if self._connReader.dataAvailable():
newDatagram = NetDatagram()
# Double check to make sure (Multithreading safety):
if self._connReader.getData(newDatagram):
self._interpretDatagram(newDatagram)
return Task.cont # Repeat this call on an interval
def sendMessage (self, msg, msgType):
"""
Sends a given message to the server.
"""
print("[Client Sending %s message type %s]"%(str(self._connection),
str(msgType)))
self._connWriter.send(msg, self._connection)
def _interpretDatagram (self, datagram):
"""
Interprets a received datagram and performs actions based on
its values.
"""
msg = PyDatagramIterator(datagram)
msgType = msg.getUint8()
if msgType == DEBUG_MESSAGE:
print(msg.getString())
elif msgType == MAP_MESSAGE:
print("[Client Received Map Data]")
if self._gameManager.getTileMap() == None:
data = msg.getString32()
self._gameManager.onClientFirstReceivedMap(data)
elif msgType == UPDATE_PLAYER_INFO:
data = msg.getString()
self._updatePlayerInfoHandler(data)
elif msgType == SPAWN_CHARACTER:
data = msg.getString()
dataDict = json.loads(data)
self._onSpawnHandler(dataDict)
elif msgType == SYNC_ACTION:
data = msg.getString()
dataDict = json.loads(data)
self._onActionSyncHandler(dataDict)
elif msgType == SYNC_HEALTH:
data = msg.getString()
dataDict = json.loads(data)
self._onHealthSyncHandler(dataDict)
elif msgType == SYNC_DEATH:
data = msg.getString()
dataDict = json.loads(data)
self._onDeathSyncHandler(dataDict)
elif msgType == SYNC_RESPAWN:
data = msg.getString()
dataDict = json.loads(data)
self._onRespawnPermissionGranted(dataDict)
elif msgType == SPAWN_ITEM:
data = msg.getString()
dataDict = json.loads(data)
self._onItemSpawned(dataDict)
elif msgType == WIN_STATE:
data = msg.getString()
self._onGameWon(data)
def _onGameWon (self, data):
"""
Show the win state achieved screen with the specified playerinfo as
the winner details.
"""
newPlayerData = PlayerInfo(fromJson=data)
self._gameManager.onWinStateAchieved(newPlayerData)
def _onItemSpawned(self, dataDict):
""" The server spawned an item, handle spawning on this client """
itemType = ITEM_ID_DICT[dataDict['itemType']]
itemID = dataDict['objID']
newPos = Point2D(dataDict['pos'][0], dataDict['pos'][1])
# Create item locally:
newItem = itemType(self._gameManager, itemID, coords=newPos)
self._gameManager.getTileMap().spawnItem(newItem, newPos)
# Track new item:
self._creatures[itemID] = newItem
def _onDeathSyncHandler(self, dataDict):
""" Handles syncing of death for the given creature """
deadCreature = self._creatures[dataDict['objID']]
# Play death sequence on this character:
deadCreature.deathSequence(amClient=True)
def _onHealthSyncHandler (self, dataDict):
""" Handles syncing of health values for creatures """
print ("_onHealthSyncHandler")
newHealth = dataDict['newHealth']
affectedCreature = self._creatures[dataDict['objID']]
affectedCreature.onHPModified(newHealth)
def _onSpawnHandler (self, dataDict):
""" Handles networking spawning characters """
# Spawn object locally if the object at cID doesn't already exist.
if not dataDict['objID'] in self._creatures.keys():
# Spawn object of charType at pos
objectType = getCharacterTypeAsClass(dataDict['charType'])
newPos = Point2D(dataDict['pos'][0], dataDict['pos'][1])
newChar = objectType(parentCtrlr=None, cID=dataDict['objID'],
gameManager=self._gameManager, coords=newPos)
self._creatures[dataDict['objID']] = newChar
self._gameManager.getTileMap().spawnObject(newChar, newPos)
print("[Client Spawned %s]" % dataDict['objID'])
# If we have a player info for this player, use their name for the
# displayName:
print (dataDict['objID'], self._playerInfo)
if dataDict['objID'] in self._playerInfo:
newName = self._playerInfo[dataDict['objID']].cName
newChar.setNameDisplay(newName)
else:
# Ignore Overwrite
pass
def _updatePlayerInfoHandler (self, data):
"""
Update our player list with a player info given by data.
"""
newPlayerData = PlayerInfo(fromJson=data)
self._playerInfo[newPlayerData.cID] = newPlayerData
self._gameManager.updatePartyInfo(self._playerInfo,
self._connection.this)
# Update the creature's floating display name (unless it is ours):
ignoreThisUpdate = False
if self._gameManager._localPlayer:
if newPlayerData.cID == self._gameManager._localPlayer\
.getCharacter().getCID():
ignoreThisUpdate = True
if not ignoreThisUpdate and newPlayerData.cID in self._creatures:
self._creatures[newPlayerData.cID]\
.setNameDisplay(newPlayerData.cName)
def _onRespawnPermissionGranted (self, dataDict):
"""
Respawn the given character at the given location.
"""
targetObj = self._creatures[dataDict['objID']]
newPos = Point2D(dataDict['pos'][0], dataDict['pos'][1])
targetObj.respawn(newPos)
def _onActionSyncHandler (self, dataDict):
"""
Attempts to queue an action for execution on a target denoted by
dataDict['objID']
"""
syncedAction = ACTION_NETWORKING_DICT[dataDict['actionID']]
# Add a few local variables to dataDict:
targetObj = self._creatures[dataDict['objID']] # TODO Maybe make this part of dataDict!
dataDict['tileMap'] = self._gameManager.getTileMap()
if 'targetCID' in dataDict: # If there is another target:
# Assign the target:
dataDict['target'] = self._creatures[dataDict['targetCID']]
dataDict['isServer'] = False # Let sync function know we are remote
# Create the newAction
newAction = syncedAction(targetObj, **dataDict)
targetObj.startAction(newAction) # queue or start the new action
def updateLocalPlayerInfo (self, info=None):
"""
Updates info for this local player and sends it to the server.
"""
if not info:
initData = PlayerInfo(cID=self._connection.this)
self._playerInfo[self._connection.this] = initData
infoMsg = createPlayerInfoMessage(initData)
self.sendMessage(infoMsg, UPDATE_PLAYER_INFO)
else:
infoMsg = createPlayerInfoMessage(info)
self.sendMessage(infoMsg, UPDATE_PLAYER_INFO)
def syncAction (self, actionID, **kwargs):
"""
The local player has performed an action that must be synced across
the network. Send a message to all clients telling them to perform
a related action on that character.
"""
msg = createSyncActionMessage(self._connection.this, actionID, **kwargs)
self.sendMessage(msg, SYNC_ACTION)
def sendPlayerRespawnRequest (self):
msg = createRespawnRequest(self._connection.this)
self.sendMessage(msg, SYNC_RESPAWN)
def localPlayerWins (self):
"""
Tell host we won. Then wait for a response.
"""
msg = createWinMessage(self._playerInfo[self._connection.this])
self.sendMessage(msg, WIN_STATE)
def spawnGameObject (self, gameObject):
"""
Tracks the given gameObject and sends it to the server
"""
# First, track it locally:
self._creatures[self.getCID()] = gameObject
# Tell server to spawn it for them and everyone else:
msg = createSpawnCharacterMessage(gameObject, self.getCID())
self.sendMessage(msg, SPAWN_CHARACTER)
def getCID(self):
return self._connection.this
```
#### File: objects/pathfinding/BFS.py
```python
from panda3d.core import Point2D
"""
This file is dedicated to any pathfinding functionality.
"""
PATHFINDING_LEGAL_DIRECTIONS = [(-1, 0), (-1, 1), (0, 1), (1, 1),
(1, 0), (1, -1), (0, -1), (-1, -1)]
def findTilesFromTo (fromPos, toPos, tileMap, includeOcuppied=False):
"""
Returns a list of grid coordinates to get from fromPos to toPos.
All calculations are done and returned in grid space (row, col).
"fromPos" and "toPos" are Point2D inputs.
Returns None if failed.
"""
# Do two efficiency checks:
if not tileMap.isFloor(toPos): return None
if fromPos == toPos: return None
if not includeOcuppied and tileMap.isTileOccupied(toPos): return None
visitedNodes = list()
queue = [[fromPos]] # list of paths to check
while len(queue) > 0:
# Check each starting node to ensure we haven't already visited:
currentPath = queue.pop(0)
node = currentPath[-1] # Last node in the current Path
if not node in visitedNodes:
visitedNodes.append(node)
# Get list of legal tiles in any legal direction in tileMap
nextNodes = getLegalTilesInDirections(node, tileMap)
# We will visit each of these nodes in the next step:
for nextNode in nextNodes:
newPath = currentPath[:] # Copy currentPath
newPath.append(nextNode)
queue.append(newPath)
if nextNode == toPos: # If we found our destination!
return newPath
return None
def getAreaTiles (originCoords, tileMap, areaRange, includeCenter=False):
"""
Returns a list of positions centered around an originCoords that
stretches out until a given range.
"""
tiles = list()
rangeCount = 0
while rangeCount < areaRange:
rangeCount += 1
# Get all tiles in all directions:
for direction in PATHFINDING_LEGAL_DIRECTIONS:
# Add a vector based on range and direction to the origin position
# for every direction:
newVector = Point2D(direction[0], direction[1]) * rangeCount
newPosition = originCoords + newVector
if tileMap.isFloor(newPosition):
tiles.append(newPosition)
if includeCenter:
tiles.append(originCoords)
return tiles
def getLegalTilesInDirections (originCoords, tileMap, includeOcuppied=False):
"""
Returns a list of legal tiles to check based on a current position
and a tileMap. Takes Point2D inputs.
"""
newTiles = list()
for direction in PATHFINDING_LEGAL_DIRECTIONS:
newTile = Point2D(originCoords.getX()+direction[0],
originCoords.getY()+direction[1])
if includeOcuppied and tileMap.isTileFloor(newTile):
newTiles.append(newTile)
elif not tileMap.isTileOccupied(newTile):
newTiles.append(newTile)
return newTiles
``` |
{
"source": "jpyan/my-learning-analytics",
"score": 3
} |
#### File: dashboard/common/utils.py
```python
import logging, os
from dashboard.settings import SHA_ABBREV_LENGTH
logger = logging.getLogger(__name__)
def format_github_url_using_https(github_url):
ssh_base = "git@"
https_base = "https://"
# If the URL is formatted for SSH, convert, otherwise, do nothing
if ssh_base == github_url[:len(ssh_base)]:
github_url = github_url.replace(":", "/").replace(".git", "").replace(ssh_base, https_base)
return github_url
def get_git_version_info():
logger.debug(get_git_version_info.__name__)
commit = os.getenv("GIT_COMMIT", "")
if commit != "":
commit_abbrev = commit[:SHA_ABBREV_LENGTH]
else:
commit_abbrev = ""
# Only include the branch name and not remote info
branch = os.getenv("GIT_BRANCH", "").split('/')[-1]
git_version = {
"repo": format_github_url_using_https(os.getenv("GIT_REPO", "")),
"commit": commit,
"commit_abbrev": commit_abbrev,
"branch": branch
}
return git_version
def look_up_key_for_value(myDict, searchFor):
for key, value in myDict.items():
for v in value:
if searchFor in v:
return key
return None
``` |
{
"source": "jpycroft/OG-Core",
"score": 2
} |
#### File: OG-Core/ogcore/output_plots.py
```python
import numpy as np
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
from ogcore.constants import (GROUP_LABELS, VAR_LABELS, ToGDP_LABELS,
CBO_UNITS, DEFAULT_START_YEAR)
import ogcore.utils as utils
from ogcore.utils import Inequality
def plot_aggregates(base_tpi, base_params, reform_tpi=None,
reform_params=None, var_list=['Y', 'C', 'K', 'L'],
plot_type='pct_diff', num_years_to_plot=50,
start_year=DEFAULT_START_YEAR,
vertical_line_years=None,
plot_title=None, path=None):
'''
Create a plot of macro aggregates.
Args:
base_tpi (dictionary): TPI output from baseline run
base_params (OG-Core Specifications class): baseline parameters
object
reform_tpi (dictionary): TPI output from reform run
reform_params (OG-Core Specifications class): reform parameters
object
var_list (list): names of variable to plot
plot_type (string): type of plot, can be:
'pct_diff': plots percentage difference between baselien
and reform ((reform-base)/base)
'diff': plots difference between baseline and reform
(reform-base)
'levels': plot variables in model units
'cbo': plots variables in levels relative to CBO baseline
projection (only available for macro variables in CBO
long-term forecasts)
num_years_to_plot (integer): number of years to include in plot
start_year (integer): year to start plot
vertical_line_years (list): list of integers for years want
vertical lines at
plot_title (string): title for plot
path (string): path to save figure to
Returns:
fig (Matplotlib plot object): plot of macro aggregates
'''
assert isinstance(start_year, (int, np.integer))
assert (isinstance(num_years_to_plot, int))
# Make sure both runs cover same time period
if reform_tpi:
assert (base_params.start_year == reform_params.start_year)
year_vec = np.arange(start_year, start_year + num_years_to_plot)
start_index = start_year - base_params.start_year
# Check that reform included if doing pct_diff or diff plot
if plot_type == 'pct_diff' or plot_type == 'diff':
assert (reform_tpi is not None)
fig1, ax1 = plt.subplots()
for i, v in enumerate(var_list):
if plot_type == 'pct_diff':
if v in ['r_gov', 'r', 'r_p']:
# Compute just percentage point changes for rates
plot_var = reform_tpi[v] - base_tpi[v]
else:
plot_var = (reform_tpi[v] - base_tpi[v]) / base_tpi[v]
ylabel = r'Pct. change'
plt.plot(year_vec,
plot_var[start_index: start_index +
num_years_to_plot], label=VAR_LABELS[v])
elif plot_type == 'diff':
plot_var = reform_tpi[v] - base_tpi[v]
ylabel = r'Difference (Model Units)'
plt.plot(year_vec,
plot_var[start_index: start_index +
num_years_to_plot], label=VAR_LABELS[v])
elif plot_type == 'levels':
plt.plot(year_vec,
base_tpi[v][start_index: start_index +
num_years_to_plot],
label='Baseline ' + VAR_LABELS[v])
if reform_tpi:
plt.plot(year_vec,
reform_tpi[v][start_index: start_index +
num_years_to_plot],
label='Reform ' + VAR_LABELS[v])
ylabel = r'Model Units'
elif plot_type == 'cbo':
# Need reform and baseline to ensure CBO plot makes sense
assert (reform_tpi is not None)
# read in CBO forecasts
df_cbo = utils.read_cbo_forecast()
# assert variable in cbo data
assert (v in df_cbo.columns)
# assert cbo data has start year and end year
assert (df_cbo.year.min() <= start_year)
assert (df_cbo.year.max() >= start_year + num_years_to_plot)
cbo_data = df_cbo[
(df_cbo['year'] >= start_year) &
(df_cbo['year'] <= start_year +
num_years_to_plot - 1)][v].values
# Plot CBO baseline
plot_var_base = cbo_data
plt.plot(year_vec, plot_var_base, label='Baseline ' +
VAR_LABELS[v])
# Plot change in CBO baseline
pct_change = ((reform_tpi[v] - base_tpi[v]) /
base_tpi[v])[start_index: start_index +
num_years_to_plot]
plot_var_reform = (1 + pct_change) * cbo_data
plt.plot(year_vec, plot_var_reform, label='Reform ' +
VAR_LABELS[v])
# making units labels will not work if multiple variables
# and they are in different units
ylabel = CBO_UNITS[v]
else:
print('Please enter a valid plot type')
assert(False)
# vertical markers at certain years
if vertical_line_years:
for yr in vertical_line_years:
plt.axvline(x=yr, linewidth=0.5, linestyle='--', color='k')
plt.xlabel(r'Year $t$')
plt.ylabel(ylabel)
if plot_title:
plt.title(plot_title, fontsize=15)
ax1.set_yticks(ax1.get_yticks().tolist())
vals = ax1.get_yticks()
if plot_type == 'pct_diff':
ax1.set_yticklabels(['{:,.2%}'.format(x) for x in vals])
plt.xlim((base_params.start_year - 1, base_params.start_year +
num_years_to_plot))
plt.legend(loc=9, bbox_to_anchor=(0.5, -0.15), ncol=2)
if path:
fig_path1 = os.path.join(path)
plt.savefig(fig_path1, bbox_inches="tight")
else:
return fig1
plt.close()
def ss_3Dplot(base_params, base_ss, reform_params=None, reform_ss=None,
var='bssmat_splus1', plot_type='levels', plot_title=None,
path=None):
'''
Create a 3d plot of household decisions.
Args:
base_params (OG-Core Specifications class): baseline parameters object
base_ss (dictionary): SS output from baseline run
reform_params (OG-Core Specifications class): reform parameters object
reform_ss (dictionary): SS output from reform run
var (string): name of variable to plot
plot_type (string): type of plot, can be:
'pct_diff': plots percentage difference between baselien
and reform ((reform-base)/base)
'diff': plots difference between baseline and reform (reform-base)
'levels': plot variables in model units
plot_title (string): title for plot
path (string): path to save figure to
Returns:
fig (Matplotlib plot object): plot of household decisions
'''
if reform_params:
assert(base_params.J == reform_params.J)
assert(base_params.starting_age == reform_params.starting_age)
assert(base_params.ending_age == reform_params.ending_age)
assert(base_params.S == reform_params.S)
domain = np.linspace(base_params.starting_age,
base_params.ending_age, base_params.S)
Jgrid = np.zeros(base_params.J)
for j in range(base_params.J):
Jgrid[j:] += base_params.lambdas[j]
if plot_type == 'levels':
data = base_ss[var].T
elif plot_type == 'diff':
data = (reform_ss[var] - base_ss[var]).T
elif plot_type == 'pct_diff':
data = ((reform_ss[var] - base_ss[var]) / base_ss[var]).T
cmap1 = matplotlib.cm.get_cmap('jet')
X, Y = np.meshgrid(domain, Jgrid)
fig5 = plt.figure()
ax5 = fig5.gca(projection='3d')
ax5.set_xlabel(r'age-$s$')
ax5.set_ylabel(r'ability type-$j$')
ax5.set_zlabel(r'individual savings $\bar{b}_{j,s}$')
ax5.plot_surface(X, Y, data, rstride=1, cstride=1, cmap=cmap1)
if plot_title:
plt.title(plot_title)
if path:
plt.savefig(path)
else:
return plt
def plot_gdp_ratio(base_tpi, base_params, reform_tpi=None,
reform_params=None, var_list=['D'],
plot_type='levels', num_years_to_plot=50,
start_year=DEFAULT_START_YEAR, vertical_line_years=None,
plot_title=None, path=None):
'''
Create a plot of some variable to GDP.
Args:
base_tpi (dictionary): TPI output from baseline run
base_params (OG-Core Specifications class): baseline parameters object
reform_tpi (dictionary): TPI output from reform run
reform_params (OG-Core Specifications class): reform parameters object
p (OG-Core Specifications class): parameters object
var_list (list): names of variable to plot
plot_type (string): type of plot, can be:
'diff': plots difference between baseline and reform
(reform-base)
'levels': plot variables in model units
num_years_to_plot (integer): number of years to include in plot
start_year (integer): year to start plot
vertical_line_years (list): list of integers for years want
vertical lines at
plot_title (string): title for plot
path (string): path to save figure to
Returns:
fig (Matplotlib plot object): plot of ratio of a variable to GDP
'''
assert isinstance(start_year, (int, np.integer))
assert (isinstance(num_years_to_plot, int))
if plot_type == 'diff':
assert (reform_tpi is not None)
# Make sure both runs cover same time period
if reform_tpi:
assert (base_params.start_year == reform_params.start_year)
year_vec = np.arange(start_year, start_year + num_years_to_plot)
start_index = start_year - base_params.start_year
fig1, ax1 = plt.subplots()
for i, v in enumerate(var_list):
if plot_type == 'levels':
plot_var_base = (base_tpi[v][:base_params.T] /
base_tpi['Y'][:base_params.T])
if reform_tpi:
plot_var_reform = (reform_tpi[v][:base_params.T] /
reform_tpi['Y'][:base_params.T])
plt.plot(year_vec, plot_var_base[start_index: start_index +
num_years_to_plot],
label='Baseline ' + ToGDP_LABELS[v])
plt.plot(year_vec, plot_var_reform[start_index: start_index +
num_years_to_plot],
label='Reform ' + ToGDP_LABELS[v])
else:
plt.plot(year_vec, plot_var_base[start_index: start_index +
num_years_to_plot],
label=ToGDP_LABELS[v])
else: # if plotting differences in ratios
var_base = (base_tpi[v][:base_params.T] /
base_tpi['Y'][:base_params.T])
var_reform = (reform_tpi[v][:base_params.T] /
reform_tpi['Y'][:base_params.T])
plot_var = var_reform - var_base
plt.plot(year_vec, plot_var[start_index: start_index +
num_years_to_plot],
label=ToGDP_LABELS[v])
ylabel = r'Percent of GDP'
# vertical markers at certain years
if vertical_line_years:
for yr in vertical_line_years:
plt.axvline(x=yr, linewidth=0.5, linestyle='--', color='k')
plt.xlabel(r'Year $t$')
plt.ylabel(ylabel)
if plot_title:
plt.title(plot_title, fontsize=15)
ax1.set_yticks(ax1.get_yticks().tolist())
vals = ax1.get_yticks()
if plot_type == 'levels':
ax1.set_yticklabels(['{:,.0%}'.format(x) for x in vals])
else:
ax1.set_yticklabels(['{:,.2%}'.format(x) for x in vals])
plt.xlim((base_params.start_year - 1, base_params.start_year +
num_years_to_plot))
plt.legend(loc=9, bbox_to_anchor=(0.5, -0.15), ncol=2)
if path:
fig_path1 = os.path.join(path)
plt.savefig(fig_path1, bbox_inches="tight")
else:
return fig1
plt.close()
def ability_bar(base_tpi, base_params, reform_tpi,
reform_params, var='n_mat', num_years=5,
start_year=DEFAULT_START_YEAR, plot_title=None, path=None):
'''
Plots percentage changes from baseline by ability group for a
given variable.
Args:
base_tpi (dictionary): TPI output from baseline run
base_params (OG-Core Specifications class): baseline parameters
object
reform_tpi (dictionary): TPI output from reform run
reform_params (OG-Core Specifications class): reform parameters
object
var (string): name of variable to plot
num_year (integer): number of years to compute changes over
start_year (integer): year to start plot
plot_title (string): title for plot
path (string): path to save figure to
Returns:
fig (Matplotlib plot object): plot of results by ability type
'''
assert isinstance(start_year, (int, np.integer))
assert isinstance(num_years, (int, np.integer))
# Make sure both runs cover same time period
if reform_tpi:
assert (base_params.start_year == reform_params.start_year)
N = base_params.J
fig, ax = plt.subplots()
ind = np.arange(N) # the x locations for the groups
width = 0.2 # the width of the bars
start_index = start_year - base_params.start_year
omega_to_use = base_params.omega[:base_params.T, :].reshape(
base_params.T, base_params.S, 1)
base_val = (base_tpi[var] * omega_to_use)[
start_index:start_index + num_years, :, :].sum(1).sum(0)
reform_val = (reform_tpi[var] * omega_to_use)[
start_index:start_index + num_years, :, :].sum(1).sum(0)
var_to_plot = (reform_val - base_val) / base_val
ax.bar(ind, var_to_plot * 100, width, bottom=0)
ax.set_xticks(ind + width / 4)
ax.set_xticklabels(list(GROUP_LABELS[base_params.J].values()))
plt.xticks(rotation=45)
plt.ylabel(r'Percentage Change in ' + VAR_LABELS[var])
if plot_title:
plt.title(plot_title, fontsize=15)
if path:
fig_path1 = os.path.join(path)
plt.savefig(fig_path1, bbox_inches="tight")
else:
return fig
plt.close()
def ability_bar_ss(base_ss, base_params, reform_ss, reform_params,
var='nssmat', plot_title=None, path=None):
'''
Plots percentage changes from baseline by ability group for a
given variable.
Args:
base_ss (dictionary): SS output from baseline run
base_params (OG-Core Specifications class): baseline parameters
object
reform_ss (dictionary): SS output from reform run
reform_params (OG-Core Specifications class): reform parameters
object
var (string): name of variable to plot
plot_title (string): title for plot
path (string): path to save figure to
Returns:
fig (Matplotlib plot object): plot of results by ability type
'''
N = base_params.J
fig, ax = plt.subplots()
ind = np.arange(N) # the x locations for the groups
width = 0.2 # the width of the bars
base_val = (
base_ss[var] *
base_params.omega_SS.reshape(base_params.S, 1)).sum(0)
reform_val = (
reform_ss[var] *
reform_params.omega_SS.reshape(reform_params.S, 1)).sum(0)
var_to_plot = (reform_val - base_val) / base_val
ax.bar(ind, var_to_plot * 100, width, bottom=0)
ax.set_xticks(ind + width / 4)
ax.set_xticklabels(list(GROUP_LABELS[base_params.J].values()))
plt.xticks(rotation=45)
plt.ylabel(r'Percentage Change in ' + VAR_LABELS[var])
if plot_title:
plt.title(plot_title, fontsize=15)
plt.legend(loc=9, bbox_to_anchor=(0.5, -0.15), ncol=2)
if path:
fig_path1 = os.path.join(path)
plt.savefig(fig_path1, bbox_inches="tight")
else:
return fig
plt.close()
def tpi_profiles(base_tpi, base_params, reform_tpi=None,
reform_params=None, by_j=True, var='n_mat',
num_years=5, start_year=DEFAULT_START_YEAR, plot_title=None,
path=None):
'''
Plot lifecycle profiles of given variable in the SS.
Args:
base_ss (dictionary): TPI output from baseline run
base_params (OG-Core Specifications class): baseline parameters
object
reform_ss (dictionary): TPI output from reform run
reform_params (OG-Core Specifications class): reform parameters
object
var (string): name of variable to plot
num_year (integer): number of years to compute changes over
start_year (integer): year to start plot
plot_title (string): title for plot
path (string): path to save figure to
Returns:
fig (Matplotlib plot object): plot of lifecycle profiles
'''
assert isinstance(start_year, (int, np.integer))
assert isinstance(num_years, (int, np.integer))
if reform_tpi:
assert (base_params.start_year == reform_params.start_year)
assert (base_params.S == reform_params.S)
assert (base_params.starting_age == reform_params.starting_age)
assert (base_params.ending_age == reform_params.ending_age)
age_vec = np.arange(base_params.starting_age,
base_params.starting_age + base_params.S)
fig1, ax1 = plt.subplots()
start_idx = start_year - base_params.start_year
end_idx = start_idx + num_years
if by_j:
cm = plt.get_cmap('coolwarm')
ax1.set_prop_cycle(color=[cm(1. * i / 7) for i in range(7)])
for j in range(base_params.J):
plt.plot(age_vec,
base_tpi[var][start_idx: end_idx, :,
j].sum(axis=0) / num_years,
label='Baseline, j = ' + str(j))
if reform_tpi:
plt.plot(age_vec,
reform_tpi[var][start_idx: end_idx, :,
j].sum(axis=0) / num_years,
label='Reform, j = ' + str(j), linestyle='--')
else:
base_var = ((
base_tpi[var][start_idx: end_idx, :, :] *
base_params.lambdas.reshape(1, 1, base_params.J)
).sum(axis=2).sum(axis=0) / num_years)
plt.plot(age_vec, base_var, label='Baseline')
if reform_tpi:
reform_var = ((
reform_tpi[var][start_idx: end_idx, :, :] *
reform_params.lambdas.reshape(1, 1, base_params.J)
).sum(axis=2).sum(axis=0) / num_years)
plt.plot(age_vec, reform_var, label='Reform',
linestyle='--')
plt.xlabel(r'Age')
plt.ylabel(VAR_LABELS[var])
plt.legend(loc=9, bbox_to_anchor=(0.5, -0.15), ncol=2)
if plot_title:
plt.title(plot_title, fontsize=15)
if path:
fig_path1 = os.path.join(path)
plt.savefig(fig_path1, bbox_inches="tight")
else:
return fig1
plt.close()
def ss_profiles(base_ss, base_params, reform_ss=None,
reform_params=None, by_j=True, var='nssmat',
plot_data=None,
plot_title=None, path=None):
'''
Plot lifecycle profiles of given variable in the SS.
Args:
base_ss (dictionary): SS output from baseline run
base_params (OG-Core Specifications class): baseline parameters
object
reform_ss (dictionary): SS output from reform run
reform_params (OG-Core Specifications class): reform parameters
object
var (string): name of variable to plot
plot_data (array_like): series of data to add to plot
plot_title (string): title for plot
path (string): path to save figure to
Returns:
fig (Matplotlib plot object): plot of lifecycle profiles
'''
if reform_ss:
assert (base_params.S == reform_params.S)
assert (base_params.starting_age == reform_params.starting_age)
assert (base_params.ending_age == reform_params.ending_age)
age_vec = np.arange(base_params.starting_age,
base_params.starting_age + base_params.S)
fig1, ax1 = plt.subplots()
if by_j:
cm = plt.get_cmap('coolwarm')
ax1.set_prop_cycle(color=[cm(1. * i / 7) for i in range(7)])
for j in range(base_params.J):
plt.plot(age_vec, base_ss[var][:, j],
label='Baseline, j = ' + str(j))
if reform_ss:
plt.plot(age_vec, reform_ss[var][:, j],
label='Reform, j = ' + str(j), linestyle='--')
else:
base_var = (
base_ss[var][:, :] *
base_params.lambdas.reshape(1, base_params.J)).sum(axis=1)
plt.plot(age_vec, base_var, label='Baseline')
if reform_ss:
reform_var = (
reform_ss[var][:, :] *
reform_params.lambdas.reshape(1, reform_params.J)).sum(axis=1)
plt.plot(age_vec, reform_var, label='Reform', linestyle='--')
if plot_data is not None:
plt.plot(age_vec, plot_data, linewidth=2.0, label='Data',
linestyle=':')
plt.xlabel(r'Age')
plt.ylabel(VAR_LABELS[var])
plt.legend(loc=9, bbox_to_anchor=(0.5, -0.15), ncol=2)
if plot_title:
plt.title(plot_title, fontsize=15)
if path:
fig_path1 = os.path.join(path)
plt.savefig(fig_path1, bbox_inches="tight")
else:
return fig1
plt.close()
def plot_all(base_output_path, reform_output_path, save_path):
'''
Function to plot all default output plots.
Args:
base_output_path (str): path to baseline results
reform_output_path (str): path to reform results
save_path (str): path to save plots to
Returns:
None: All output figures saved to disk.
'''
# Make directory in case it doesn't exist
utils.mkdirs(save_path)
# Read in data
# Read in TPI output and parameters
base_tpi = utils.safe_read_pickle(
os.path.join(base_output_path, 'TPI', 'TPI_vars.pkl')
)
base_ss = utils.safe_read_pickle(
os.path.join(base_output_path, 'SS', 'SS_vars.pkl')
)
base_params = utils.safe_read_pickle(
os.path.join(base_output_path, 'model_params.pkl')
)
reform_tpi = utils.safe_read_pickle(
os.path.join(reform_output_path, 'TPI', 'TPI_vars.pkl')
)
reform_ss = utils.safe_read_pickle(
os.path.join(reform_output_path, 'SS', 'SS_vars.pkl')
)
reform_params = utils.safe_read_pickle(
os.path.join(reform_output_path, 'model_params.pkl')
)
# Percentage changes in macro vars (Y, K, L, C)
plot_aggregates(base_tpi, base_params, reform_tpi=reform_tpi,
reform_params=reform_params,
var_list=['Y', 'K', 'L', 'C'], plot_type='pct_diff',
num_years_to_plot=150,
start_year=base_params.start_year,
vertical_line_years=[
base_params.start_year + base_params.tG1,
base_params.start_year + base_params.tG2],
plot_title='Percentage Changes in Macro Aggregates',
path=os.path.join(save_path, 'MacroAgg_PctChange.png'))
# Percentage change in fiscal vars (D, G, TR, Rev)
plot_aggregates(base_tpi, base_params, reform_tpi=reform_tpi,
reform_params=reform_params,
var_list=['D', 'G', 'TR', 'total_tax_revenue'],
plot_type='pct_diff', num_years_to_plot=150,
start_year=base_params.start_year,
vertical_line_years=[
base_params.start_year + base_params.tG1,
base_params.start_year + base_params.tG2],
plot_title='Percentage Changes in Fiscal Variables',
path=os.path.join(save_path, 'Fiscal_PctChange.png'))
# r and w in baseline and reform -- vertical lines at tG1, tG2
plot_aggregates(base_tpi, base_params, reform_tpi=reform_tpi,
reform_params=reform_params,
var_list=['r'],
plot_type='levels', num_years_to_plot=150,
start_year=base_params.start_year,
vertical_line_years=[
base_params.start_year + base_params.tG1,
base_params.start_year + base_params.tG2],
plot_title='Real Interest Rates Under Baseline and Reform',
path=os.path.join(save_path, 'InterestRates.png'))
plot_aggregates(base_tpi, base_params, reform_tpi=reform_tpi,
reform_params=reform_params,
var_list=['w'],
plot_type='levels', num_years_to_plot=150,
start_year=base_params.start_year,
vertical_line_years=[
base_params.start_year + base_params.tG1,
base_params.start_year + base_params.tG2],
plot_title='Wage Rates Under Baseline and Reform',
path=os.path.join(save_path, 'WageRates.png'))
# Debt-GDP in base and reform-- vertical lines at tG1, tG2
plot_gdp_ratio(base_tpi, base_params, reform_tpi, reform_params,
var_list=['D'], num_years_to_plot=150,
start_year=base_params.start_year,
vertical_line_years=[
base_params.start_year + base_params.tG1,
base_params.start_year + base_params.tG2],
plot_title='Debt-to-GDP',
path=os.path.join(save_path, 'DebtGDPratio.png'))
# Tax revenue to GDP in base and reform-- vertical lines at tG1, tG2
plot_gdp_ratio(base_tpi, base_params, reform_tpi, reform_params,
var_list=['total_tax_revenue'], num_years_to_plot=150,
start_year=base_params.start_year,
vertical_line_years=[
base_params.start_year + base_params.tG1,
base_params.start_year + base_params.tG2],
plot_title='Tax Revenue to GDP',
path=os.path.join(save_path, 'RevenueGDPratio.png'))
# Pct change in c, n, b, y, etr, mtrx, mtry by ability group over 10 years
var_list = ['c_path', 'n_mat', 'bmat_splus1', 'etr_path',
'mtrx_path', 'mtry_path', 'y_before_tax_mat']
title_list = ['consumption', 'labor supply', 'savings',
'effective tax rates',
'marginal tax rates on labor income',
'marginal tax rates on capital income',
'before tax income']
path_list = ['Cons', 'Labor', 'Save', 'ETR', 'MTRx', 'MTRy',
'Income']
for i, v in enumerate(var_list):
ability_bar(base_tpi, base_params, reform_tpi, reform_params,
var=v, num_years=10,
start_year=base_params.start_year,
plot_title='Percentage changes in ' + title_list[i],
path=os.path.join(save_path, 'PctChange_' +
path_list[i] + '.png'))
# lifetime profiles, base vs reform, SS for c, n, b, y - not by j
var_list = ['cssmat', 'nssmat', 'bssmat_splus1', 'etr_ss',
'mtrx_ss', 'mtry_ss']
for i, v in enumerate(var_list):
ss_profiles(base_ss, base_params, reform_ss, reform_params,
by_j=False, var=v,
plot_title='Lifecycle Profile of ' + title_list[i],
path=os.path.join(save_path, 'SSLifecycleProfile_' +
path_list[i] + '.png'))
# lifetime profiles, c, n , b, y by j, separately for base and reform
for i, v in enumerate(var_list):
ss_profiles(base_ss, base_params,
by_j=True, var=v,
plot_title='Lifecycle Profile of ' + title_list[i],
path=os.path.join(save_path, 'SSLifecycleProfile_' +
path_list[i] + '_Baseline.png'))
ss_profiles(reform_ss, reform_params,
by_j=True, var=v,
plot_title='Lifecycle Profile of ' + title_list[i],
path=os.path.join(save_path, 'SSLifecycleProfile_' +
path_list[i] + '_Reform.png'))
def inequality_plot(
base_tpi, base_params, reform_tpi=None, reform_params=None,
var='c_path', ineq_measure='gini', pctiles=None,
plot_type='levels', num_years_to_plot=50,
start_year=DEFAULT_START_YEAR, vertical_line_years=None,
plot_title=None, path=None):
'''
Plot measures of inequality over the time path.
Args:
base_tpi (dictionary): TPI output from baseline run
base_params (OG-Core Specifications class): baseline parameters
object
reform_tpi (dictionary): TPI output from reform run
reform_params (OG-Core Specifications class): reform parameters
object
var(string): name of variable to plot
ineq_measure (string): inequality measure to plot, can be:
'gini': Gini coefficient
'var_of_logs': variance of logs
'pct_ratio': percentile ratio
'top_share': top share of total
pctiles (tuple or None): percentiles for percentile ratios
(numerator, denominator) or percentile for top share (not
required for Gini or var_of_logs)
plot_type (string): type of plot, can be:
'pct_diff': plots percentage difference between baselien
and reform ((reform-base)/base)
'diff': plots difference between baseline and reform
(reform-base)
'levels': plot variables in model units
num_years_to_plot (integer): number of years to include in plot
start_year (integer): year to start plot
vertical_line_years (list): list of integers for years want
vertical lines at
plot_title (string): title for plot
path (string): path to save figure to
Returns:
fig (Matplotlib plot object): plot of inequality measure
'''
assert isinstance(start_year, (int, np.integer))
assert (isinstance(num_years_to_plot, int))
# Make sure both runs cover same time period
if reform_tpi:
assert (base_params.start_year == reform_params.start_year)
assert ineq_measure in ['gini', 'var_of_logs', 'pct_ratio',
'top_share']
if (ineq_measure == 'pct_ratio') | (ineq_measure == 'top_share'):
assert pctiles
year_vec = np.arange(start_year, start_year + num_years_to_plot)
# Check that reform included if doing pct_diff or diff plot
if plot_type == 'pct_diff' or plot_type == 'diff':
assert (reform_tpi is not None)
fig1, ax1 = plt.subplots()
base_values = np.zeros(num_years_to_plot)
for t in range(num_years_to_plot):
idx = (t + start_year) - base_params.start_year
ineq = Inequality(
base_tpi[var][idx, :, :], base_params.omega[idx, :],
base_params.lambdas, base_params.S, base_params.J)
if ineq_measure == 'gini':
base_values[t] = ineq.gini()
ylabel = r'Gini Coefficient'
elif ineq_measure == 'var_of_logs':
base_values[t] = ineq.var_of_logs()
ylabel = r'var(ln(' + VAR_LABELS[var] + r'))'
elif ineq_measure == 'pct_ratio':
base_values[t] = ineq.ratio_pct1_pct2(pctiles[0],
pctiles[1])
ylabel = r'Ratio'
elif ineq_measure == 'top_share':
base_values[t] = ineq.top_share(pctiles)
ylabel = r'Share of Total ' + VAR_LABELS[var]
if reform_tpi:
reform_values = np.zeros_like(base_values)
for t in range(num_years_to_plot):
idx = (t + start_year) - base_params.start_year
ineq = Inequality(
reform_tpi[var][idx, :, :], reform_params.omega[idx, :],
reform_params.lambdas, reform_params.S, reform_params.J)
if ineq_measure == 'gini':
reform_values[t] = ineq.gini()
elif ineq_measure == 'var_of_logs':
reform_values[t] = ineq.var_of_logs()
elif ineq_measure == 'pct_ratio':
reform_values[t] = ineq.ratio_pct1_pct2(pctiles[0],
pctiles[1])
elif ineq_measure == 'top_share':
reform_values[t] = ineq.top_share(pctiles)
if plot_type == 'pct_diff':
plot_var = (reform_values - base_values) / base_values
ylabel = r'Pct. change'
plt.plot(year_vec, plot_var)
elif plot_type == 'diff':
plot_var = reform_values - base_values
ylabel = r'Difference'
plt.plot(year_vec, plot_var)
elif plot_type == 'levels':
plt.plot(year_vec, base_values, label='Baseline')
if reform_tpi:
plt.plot(year_vec, reform_values, label='Reform')
# vertical markers at certain years
if vertical_line_years:
for yr in vertical_line_years:
plt.axvline(x=yr, linewidth=0.5, linestyle='--', color='k')
plt.xlabel(r'Year $t$')
plt.ylabel(ylabel)
if plot_title:
plt.title(plot_title, fontsize=15)
vals = ax1.get_yticks()
if plot_type == 'pct_diff':
ax1.set_yticklabels(['{:,.2%}'.format(x) for x in vals])
plt.xlim((base_params.start_year - 1, base_params.start_year +
num_years_to_plot))
plt.legend(loc=9, bbox_to_anchor=(0.5, -0.15), ncol=2)
if path:
fig_path1 = os.path.join(path)
plt.savefig(fig_path1, bbox_inches="tight")
else:
return fig1
plt.close()
```
#### File: ogcore/tests/test_output_plots.py
```python
import pytest
import os
import numpy as np
import matplotlib.image as mpimg
from ogcore import utils, output_plots
# Load in test results and parameters
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
base_ss = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'SS_vars_baseline.pkl'))
base_ss['r_p_ss'] = base_ss.pop('r_hh_ss')
base_tpi = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'TPI_vars_baseline.pkl'))
base_tpi['r_p'] = base_tpi.pop('r_hh')
base_params = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'model_params_baseline.pkl'))
reform_ss = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'SS_vars_reform.pkl'))
reform_ss['r_p_ss'] = reform_ss.pop('r_hh_ss')
reform_tpi = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'TPI_vars_reform.pkl'))
reform_tpi['r_p'] = reform_tpi.pop('r_hh')
reform_params = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'model_params_reform.pkl'))
reform_taxfunctions = utils.safe_read_pickle(
os.path.join(CUR_PATH, 'test_io_data', 'TxFuncEst_reform.pkl'))
test_data = [(base_tpi, base_params, reform_tpi, reform_params,
'pct_diff', None, None),
(base_tpi, base_params, reform_tpi, reform_params, 'diff',
None, None),
(base_tpi, base_params, reform_tpi, reform_params, 'cbo',
None, None),
(base_tpi, base_params, reform_tpi, reform_params,
'levels', None, None),
(base_tpi, base_params, None, None, 'levels', None, None),
(base_tpi, base_params, None, None, 'levels', [2040, 2060],
None),
(base_tpi, base_params, None, None, 'levels', None,
'Test plot title')
]
@pytest.mark.parametrize(
'base_tpi,base_params,reform_tpi,reform_parms,plot_type,' +
'vertical_line_years,plot_title',
test_data, ids=['Pct Diff', 'Diff', 'CBO', 'Levels w reform',
'Levels w/o reform', 'Vertical line included',
'Plot title included'])
def test_plot_aggregates(base_tpi, base_params, reform_tpi,
reform_parms, plot_type, vertical_line_years,
plot_title):
fig = output_plots.plot_aggregates(
base_tpi, base_params, reform_tpi=reform_tpi,
reform_params=reform_params, var_list=['Y', 'r'],
plot_type=plot_type, num_years_to_plot=20,
vertical_line_years=vertical_line_years, plot_title=plot_title)
assert fig
test_data = [(base_tpi, base_params, None, None, None, None),
(base_tpi, base_params, reform_tpi, reform_params, None,
None),
(base_tpi, base_params, reform_tpi, reform_params,
[2040, 2060], None),
(base_tpi, base_params, None, None, None,
'Test plot title')
]
def test_plot_aggregates_save_fig(tmpdir):
path = os.path.join(tmpdir, 'test_plot.png')
output_plots.plot_aggregates(
base_tpi, base_params, plot_type='levels', path=path)
img = mpimg.imread(path)
assert isinstance(img, np.ndarray)
test_data = [(base_tpi, base_params, None, None, None, None, 'levels'),
(base_tpi, base_params, reform_tpi, reform_params, None,
None, 'levels'),
(base_tpi, base_params, reform_tpi, reform_params, None,
None, 'diffs'),
(base_tpi, base_params, reform_tpi, reform_params,
[2040, 2060], None, 'levels'),
(base_tpi, base_params, None, None, None,
'Test plot title', 'levels')
]
@pytest.mark.parametrize(
'base_tpi,base_params,reform_tpi,reform_params,' +
'vertical_line_years,plot_title,plot_type',
test_data, ids=['No reform', 'With reform', 'Differences',
'Vertical line included', 'Plot title included'])
def test_plot_gdp_ratio(base_tpi, base_params, reform_tpi,
reform_params, vertical_line_years, plot_title,
plot_type):
fig = output_plots.plot_gdp_ratio(
base_tpi, base_params, reform_tpi=reform_tpi,
reform_params=reform_params, plot_type=plot_type,
vertical_line_years=vertical_line_years, plot_title=plot_title)
assert fig
def test_plot_gdp_ratio_save_fig(tmpdir):
path = os.path.join(tmpdir, 'test_plot.png')
output_plots.plot_aggregates(
base_tpi, base_params, reform_tpi=reform_tpi,
reform_params=reform_params, path=path)
img = mpimg.imread(path)
assert isinstance(img, np.ndarray)
def test_ability_bar():
fig = output_plots.ability_bar(
base_tpi, base_params, reform_tpi, reform_params,
plot_title=' Test Plot Title')
assert fig
def test_ability_bar_save_fig(tmpdir):
path = os.path.join(tmpdir, 'test_plot.png')
output_plots.ability_bar(
base_tpi, base_params, reform_tpi, reform_params, path=path)
img = mpimg.imread(path)
assert isinstance(img, np.ndarray)
def test_ability_bar_ss():
fig = output_plots.ability_bar_ss(
base_ss, base_params, reform_ss, reform_params,
plot_title=' Test Plot Title')
assert fig
data_for_plot = np.ones(80) * 0.3
@pytest.mark.parametrize(
'by_j,plot_data',
[(True, None), (False, None), (False, data_for_plot)],
ids=['By j', 'Not by j', 'Plot data'])
def test_ss_profiles(by_j, plot_data):
fig = output_plots.ss_profiles(
base_ss, base_params, reform_ss, reform_params, by_j=by_j,
plot_data=plot_data, plot_title=' Test Plot Title')
assert fig
def test_ss_profiles_save_fig(tmpdir):
path = os.path.join(tmpdir, 'test_plot.png')
output_plots.ss_profiles(
base_ss, base_params, reform_ss, reform_params, path=path)
img = mpimg.imread(path)
assert isinstance(img, np.ndarray)
@pytest.mark.parametrize(
'by_j', [True, False], ids=['By j', 'Not by j'])
def test_tpi_profiles(by_j):
fig = output_plots.tpi_profiles(
base_tpi, base_params, reform_tpi, reform_params, by_j=by_j,
plot_title=' Test Plot Title')
assert fig
test_data = [(base_params, base_ss, None, None, 'levels', None),
(base_params, base_ss, reform_params, reform_ss, 'levels',
None),
(base_params, base_ss, reform_params, reform_ss, 'diff',
None),
(base_params, base_ss, reform_params, reform_ss,
'pct_diff', None),
(base_params, base_ss, reform_params, reform_ss,
'pct_diff', 'Test Plot Title')
]
def test_tpi_profiles_save_fig(tmpdir):
path = os.path.join(tmpdir, 'test_plot.png')
output_plots.tpi_profiles(
base_tpi, base_params, reform_tpi, reform_params, path=path)
img = mpimg.imread(path)
assert isinstance(img, np.ndarray)
@pytest.mark.parametrize(
'base_params,base_ss,reform_params,reform_ss,plot_type,plot_title',
test_data, ids=['Levels', 'Levels w/ reform', 'Differences',
'Pct Diffs', 'Plot title included'])
def test_ss_3Dplot(base_params, base_ss, reform_params, reform_ss,
plot_type, plot_title):
fig = output_plots.ss_3Dplot(
base_params, base_ss, reform_params=reform_params,
reform_ss=reform_ss, plot_type=plot_type, plot_title=plot_title)
assert fig
def test_ss_3Dplot_save_fig(tmpdir):
path = os.path.join(tmpdir, 'test_plot.png')
output_plots.ss_3Dplot(
base_params, base_ss, reform_params=reform_params,
reform_ss=reform_ss, path=path)
img = mpimg.imread(path)
assert isinstance(img, np.ndarray)
@pytest.mark.parametrize(
'base_tpi,base_params,reform_tpi, reform_params,ineq_measure,' +
'pctiles,plot_type',
[(base_tpi, base_params, None, None, 'gini', None, 'levels'),
(base_tpi, base_params, reform_tpi, reform_params, 'gini', None,
'levels'),
(base_tpi, base_params, reform_tpi, reform_params, 'var_of_logs',
None, 'diff'),
(base_tpi, base_params, reform_tpi, reform_params, 'pct_ratio',
(0.9, 0.1), 'levels'),
(base_tpi, base_params, reform_tpi, reform_params, 'top_share',
(0.01), 'pct_diff')],
ids=['Just baseline', 'Baseline + Reform',
'Base + Refore, var logs, diff',
'Base + Refore, pct ratios',
'Base + Refore, top share, pct diff'])
def test_inequality_plot(base_tpi, base_params, reform_tpi,
reform_params, ineq_measure, pctiles,
plot_type):
fig = output_plots.inequality_plot(
base_tpi, base_params, reform_tpi=reform_tpi,
reform_params=reform_params, ineq_measure=ineq_measure,
pctiles=pctiles, plot_type=plot_type)
assert fig
def test_inequality_plot_save_fig(tmpdir):
path = os.path.join(tmpdir, 'test_plot.png')
output_plots.inequality_plot(
base_tpi, base_params, reform_tpi=reform_tpi,
reform_params=reform_params, path=path)
img = mpimg.imread(path)
assert isinstance(img, np.ndarray)
def test_plot_all(tmpdir):
base_output_path = os.path.join(CUR_PATH, 'test_io_data', 'OUTPUT')
reform_output_path = os.path.join(CUR_PATH, 'test_io_data', 'OUTPUT')
output_plots.plot_all(base_output_path, reform_output_path, tmpdir)
img1 = mpimg.imread(os.path.join(tmpdir, 'MacroAgg_PctChange.png'))
img2 = mpimg.imread(os.path.join(
tmpdir, 'SSLifecycleProfile_Cons_Reform.png'))
img3 = mpimg.imread(os.path.join(
tmpdir, 'SSLifecycleProfile_Save_Reform.png'))
assert isinstance(img1, np.ndarray)
assert isinstance(img2, np.ndarray)
assert isinstance(img3, np.ndarray)
``` |
{
"source": "jpyeah/alimama-python",
"score": 3
} |
#### File: jpyeah/alimama-python/alimama.py
```python
import json
import time
import requests
from selenium import webdriver
class Spider(object):
def __init__(self):
self.web = webdriver.Chrome()
self.__username = '钟继飘'
self.__password = '<PASSWORD>'
self.headers = {
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.8,en;q=0.6',
'cache-control': 'max-age=0',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36'
}
self.req = requests.Session()
self.cookies = {}
#登录
def login(self):
self.web.get(
'https://login.taobao.com/member/login.jhtml?style=mini&newMini2=true&css_style=alimama&from=alimama&redirectURL=http%3A%2F%2Fwww.alimama.com&full_redirect=true&disableQuickLogin=true')
self.web.find_element_by_class_name('login-switch').click()
self.web.find_element_by_id('TPL_username_1').send_keys(self.__username)
self.web.find_element_by_id('TPL_password_1').send_keys(self.__password)
time.sleep(2)
self.web.find_element_by_id('J_SubmitStatic').click()
# 等待5秒
time.sleep(5)
self.web.get('http://pub.alimama.com/myunion.htm')
cookie = ''
for elem in self.web.get_cookies():
cookie += elem["name"] + "=" + elem["value"] + ";"
if elem["name"] == '_tb_token_':
self.token = elem["value"]
self.cookies = cookie
self.headers['Cookie'] = self.cookies
self.web.quit()
#转换链接获取口令
def get_goods_list(self):
res = self.req.get(
'http://pub.alimama.com/urltrans/urltrans.json?pvid=&_input_charset=utf-8&promotionURL=https://item.taobao.com/item.htm?id=559103665936&siteid=39800881&adzoneid=150032591&t=&_tb_token_=' + self.token,
headers=self.headers
)
print(res.text)
# 获取淘宝客订单列表
def get_taoke_order_list(self):
url = 'http://pub.alimama.com/report/getTbkPaymentDetails.json?startTime=2017-05-28&endTime=2017-08-03&payStatus=&queryType=1&toPage=1&perPageSize=50&total=&t=1501747895837&pvid=&_tb_token_=<PASSWORD>&_input_charset=utf-8'
web_data = self.req.get(url, headers=self.headers)
data = json.loads(web_data.text)
print(data['data']['paymentList'])
#创建推广位
def add_ad(self):
name = input()
res = self.req.post('http://pub.alimama.com/common/adzone/selfAdzoneCreate.json', data={
'tag': '29',
'gcid': '8',
'siteid': 'xxxxxxxx',#这里改成导购位ID
'selectact': 'add',
'newadzonename': name,
'_tb_token_': self.token
}, headers=self.headers)
print(res.text)
#获取推广位列表
def get_ad_list(self):
res = self.req.get(
'http://pub.alimama.com/common/adzone/adzoneManage.json?tab=3&toPage=1&perPageSize=40&gcid=8',
headers=self.headers)
print(res.text)
if __name__ == '__main__':
sp = Spider()
sp.login()
sp.get_goods_list()
# sp.add_ad()
# sp.get_ad_list()
# for i in range(1000):
# sp.get_taoke_order_list()
# time.sleep(30)
``` |
{
"source": "jpy-git/flake8-bugbear",
"score": 2
} |
#### File: flake8-bugbear/tests/b006_b008.py
```python
import collections
import datetime as dt
import logging
import operator
import random
import re
import time
import types
from operator import attrgetter, itemgetter, methodcaller
from types import MappingProxyType
# B006
# Allow immutable literals/calls/comprehensions
def this_is_okay(value=(1, 2, 3)):
...
async def and_this_also(value=tuple()):
pass
def frozenset_also_okay(value=frozenset()):
pass
def mappingproxytype_okay(
value=MappingProxyType({}), value2=types.MappingProxyType({})
):
pass
def re_compile_ok(value=re.compile("foo")):
pass
def operators_ok(
v=operator.attrgetter("foo"),
v2=operator.itemgetter("foo"),
v3=operator.methodcaller("foo"),
):
pass
def operators_ok_unqualified(
v=attrgetter("foo"),
v2=itemgetter("foo"),
v3=methodcaller("foo"),
):
pass
def kwonlyargs_immutable(*, value=()):
...
# Flag mutable literals/comprehensions
def this_is_wrong(value=[1, 2, 3]):
...
def this_is_also_wrong(value={}):
...
def and_this(value=set()):
...
def this_too(value=collections.OrderedDict()):
...
async def async_this_too(value=collections.defaultdict()):
...
def dont_forget_me(value=collections.deque()):
...
# N.B. we're also flagging the function call in the comprehension
def list_comprehension_also_not_okay(default=[i**2 for i in range(3)]):
pass
def dict_comprehension_also_not_okay(default={i: i**2 for i in range(3)}):
pass
def set_comprehension_also_not_okay(default={i**2 for i in range(3)}):
pass
def kwonlyargs_mutable(*, value=[]):
...
# Recommended approach for mutable defaults
def do_this_instead(value=None):
if value is None:
value = set()
# B008
# Flag function calls as default args (including if they are part of a sub-expression)
def in_fact_all_calls_are_wrong(value=time.time()):
...
def f(when=dt.datetime.now() + dt.timedelta(days=7)):
pass
def can_even_catch_lambdas(a=(lambda x: x)()):
...
# Recommended approach for function calls as default args
LOGGER = logging.getLogger(__name__)
def do_this_instead_of_calls_in_defaults(logger=LOGGER):
# That makes it more obvious that this one value is reused.
...
# Handle inf/infinity/nan special case
def float_inf_okay(value=float("inf")):
pass
def float_infinity_okay(value=float("infinity")):
pass
def float_plus_infinity_okay(value=float("+infinity")):
pass
def float_minus_inf_okay(value=float("-inf")):
pass
def float_nan_okay(value=float("nan")):
pass
def float_minus_NaN_okay(value=float("-NaN")):
pass
def float_infinity_literal(value=float("1e999")):
pass
# But don't allow standard floats
def float_int_is_wrong(value=float(3)):
pass
def float_str_not_inf_or_nan_is_wrong(value=float("3.14")):
pass
# B006 and B008
# We should handle arbitrary nesting of these B008.
def nested_combo(a=[float(3), dt.datetime.now()]):
pass
# Don't flag nested B006 since we can't guarantee that
# it isn't made mutable by the outer operation.
def no_nested_b006(a=map(lambda s: s.upper(), ["a", "b", "c"])):
pass
# B008-ception.
def nested_b008(a=random.randint(0, dt.datetime.now().year)):
pass
# Ignore lambda contents since they are evaluated at call time.
def foo(f=lambda x: print(x)):
f(1)
```
#### File: flake8-bugbear/tests/b017.py
```python
import asyncio
import unittest
CONSTANT = True
def something_else() -> None:
for i in (1, 2, 3):
print(i)
class Foo:
pass
class Foobar(unittest.TestCase):
def evil_raises(self) -> None:
with self.assertRaises(Exception):
raise Exception("Evil I say!")
def context_manager_raises(self) -> None:
with self.assertRaises(Exception) as ex:
raise Exception("Context manager is good")
self.assertEqual("Context manager is good", str(ex.exception))
def regex_raises(self) -> None:
with self.assertRaisesRegex(Exception, "Regex is good"):
raise Exception("Regex is good")
def raises_with_absolute_reference(self):
with self.assertRaises(asyncio.CancelledError):
Foo()
``` |
{
"source": "jpylephilalegal/docassemble-electronicsignaturels",
"score": 3
} |
#### File: docassemble/electronicsignaturels/functions.py
```python
from docassemble.base.util import user_info, get_config, url_of, interview_url
import re
__all__ = ['short_url', 'form_email_address']
def short_url():
info = user_info()
url = None
for key, val in get_config('dispatch').iteritems():
interview_name = re.sub(r'\:([^\/]+)$', r':data/questions/\1', val)
if interview_name == info.filename:
url = '%sstart/%s?session=%s' % (url_of('root', _external=True), key, info.session)
break
if url is None:
url = interview_url()
return url
def form_email_address(name, email):
return '"' + re.sub(r'[^ A-Za-z]', '', unicode(name)) + '"' + " <" + unicode(email) + ">"
``` |
{
"source": "jpylephilalegal/docassemble-michildsupport",
"score": 2
} |
#### File: docassemble/michildsupport/misc.py
```python
from bs4 import BeautifulSoup as Soup
import re
import json
import sys
import shutil
import tempfile
import os
import subprocess
from pathlib import Path
from docassemble.base.util import log, path_and_mimetype, validation_error, DADict, DAList, Individual, value, force_ask, space_to_underscore
__all__ = ['run_automation', 'noquote', 'number_with_max', 'retirement_index_increment', 'ParentDict', 'ChildrenList']
class ParentDict(DADict):
def init(self, *pargs, **kwargs):
super().init(*pargs, **kwargs)
self.object_type = Individual
self.auto_gather = False
class ChildrenList(DAList):
def init(self, *pargs, **kwargs):
super().init(*pargs, **kwargs)
self.object_type = Individual
self.ask_number = True
def hook_on_gather(self):
if 'C' in value('child_support_group') and not any(child.lives_with_non_parent_custodian for child in self.elements):
force_ask('no_child_with_guardian')
def hook_after_gather(self):
self.sort(key=lambda y: y.birthdate, reverse=True)
def retirement_index_increment(parent):
if parent.tax_method == 'estimated':
for income_source in parent.income_sources:
if income_source.type == 'Employer Wages' and income_source.must_contribute_to_retirement and income_source.mandatory_percentage > 0:
return 1
return 0
def number_with_max(number, maximum):
if number >= maximum:
return str(maximum) + '+'
return str(number)
def noquote(text):
if re.search(r'[^A-Za-z\' 0-9\_\-\n\r]', text):
raise validation_error("You are only allowed to type characters A-Z, a-z, 0-9, and -.")
return True
def run_automation(feature_file, html_file, png_file, json_file, base_name):
base_name = space_to_underscore(base_name)
try:
with tempfile.TemporaryDirectory(prefix='datemp') as temp_directory:
output_file = os.path.join(temp_directory, 'output.html')
output_png = os.path.join(temp_directory, 'output.png')
features_directory = shutil.copytree(path_and_mimetype('data/sources/features')[0], os.path.join(temp_directory, 'features'))
shutil.copyfile(feature_file, os.path.join(features_directory, 'calculate.feature'))
Path(os.path.join(features_directory, '__init__.py')).touch()
Path(os.path.join(features_directory, 'steps', '__init__.py')).touch()
output = ''
with open(feature_file, encoding='utf-8') as x:
output += x.read()
try:
commands = ["aloe", "--stop", "--verbosity=3", "features/calculate.feature"]
output += "\n\n" + ' '.join(commands) + "\n"
#output += subprocess.check_output(["ls", "-lR"], cwd=temp_directory, stderr=subprocess.STDOUT).decode()
output += subprocess.check_output(commands, cwd=temp_directory, stderr=subprocess.STDOUT).decode()
success = True
except subprocess.CalledProcessError as err:
output += err.output.decode()
success = False
if success:
if os.path.isfile(output_file):
html_file.initialize(filename=base_name + '.html')
html_file.copy_into(output_file)
html_file.commit()
else:
success = False
output += "\nFile not found after process completed.\n"
if os.path.isfile(output_png):
png_file.initialize(filename=base_name + '.png')
png_file.copy_into(output_png)
png_file.commit()
# else:
# success = False
# output += "\nPNG file not found after process completed.\n"
except Exception as err:
success = False
output = err.__class__.__name__ + ": " + str(err)
if success:
try:
output_data = extract_data(html_file.path())
json_file.initialize(filename=base_name + '.json')
json_file.write(json.dumps(output_data, indent=2))
json_file.commit()
except Exception as err:
success = False
output += err.__class__.__name__ + ": " + str(err)
output_data = {"error": err.__class__.__name__, "message": str(err)}
else:
output_data = {}
return success, output, output_data
def process_table(table):
result = dict()
result['title'] = table.get('title', None)
result['columns'] = []
result['rows'] = []
result['footer'] = []
for head in table.find_all('thead', recursive=False):
result['columns'].append(head.get_text().strip())
for body in table.find_all('tbody', recursive=False):
for row in body.find_all('tr', recursive=False):
output_row = []
item = list()
for col in row.find_all('td', recursive=False):
output_row.append(fixup(col))
result['rows'].append(output_row)
for foot in table.find_all('tfoot', recursive=False):
result['footer'].append(foot.get_text().strip())
return result
def fixup(elem):
children = [item for item in elem.find_all(recursive=False) if item.name != 'br']
if len(children) == 1:
orig_elem = elem
elem = children[0]
#log("kids1: found a " + elem.name + " with " + repr(elem.get_text()))
if elem.name == 'output':
text = orig_elem.get_text().strip()
elif elem.name == 'div':
found = False
tables = list()
for table in elem.find_all('table'):
found = True
tables.append(process_table(table))
# for head in table.find_all('thead', recursive=False):
# tables.append(head.get_text().strip())
if found:
return tables
text = orig_elem.get_text().strip()
elif elem.name == 'table':
#tables = list()
#for head in elem.find_all('thead', recursive=False):
# tables.append(head.get_text().strip())
#return tables
return process_table(elem)
elif elem.name == 'input':
text = elem.get('value')
else:
#log("doing get text and strip")
text = elem.text.strip()
#log("doing elem is" + repr(text))
text = re.sub(r'<br/?>', ' ', text)
elif len(children) == 2 and children[0].name == 'table' and children[1].name == 'table':
return [process_table(children[0]), process_table(children[1])]
elif len(children) == 2 and children[0].name == 'a' and children[1].name == 'label':
text = children[1].get_text().strip()
elif len(children) == 2 and children[0].name == 'output' and children[1].name == 'output':
text = children[0].get_text().strip() + " " + children[1].get_text().strip()
elif len(children) == 3 and children[0].name == 'div' and children[1].name == 'div' and children[2].name == 'div':
#log("Triple div first kid is " + repr(str(children[0])))
text = children[0].get_text().strip() + " " + children[1].get_text().strip() + " " + children[2].get_text().strip()
#log("Triple div Got " + repr(text))
elif len(children) == 2 and children[0].name == 'div' and children[1].name == 'div':
text = children[0].get_text().strip() + " " + children[1].get_text().strip()
elif len(children) == 2 and children[0].name == 'strong' and children[1].name == 'strong':
text = children[0].get_text().strip() + " " + children[1].get_text().strip()
elif len(children) == 2 and children[0].name == 'p' and children[1].name == 'p':
text = children[0].get_text().strip() + " " + children[1].get_text().strip()
elif len(children) == 2 and children[0].name == 'div' and children[1].name == 'p':
text = children[1].get_text().strip()
else:
#log("found a " + elem.name + " with " + repr(elem.get_text()))
#log("kids is " + ";".join(repr(item.name) for item in children))
text = elem.decode_contents().strip()
#log("elem is" + repr(text))
text = re.sub(r'<br/?>', ' ', text)
if not isinstance(text, str):
return text
text = re.sub(r' ', ' ', text)
text = re.sub(r' +', ' ', text)
text = re.sub(r'\n\t+', ' ', text)
text = text.strip()
m = re.search(r'^\$([0-9]+\.[0-9][0-9])$', text)
if m:
text = float(m.group(1))
return text
def nulltruefalse(item):
if isinstance(item, str):
if item in ('false', 'No'):
return False
if item in ('true', 'Yes'):
return True
if item in ('-', ''):
return None
if re.search(r'^\-?[0-9]+$', item):
try:
return int(item)
except:
pass
if '.' in item and re.search(r'^\-?[0-9\.]+$', item):
try:
return float(item)
except:
pass
if re.search(r'^[0-9\.]+\%$', item):
try:
return float(item[0:-1])/100.0
except:
pass
return item
def get_amount_potential(text):
if not isinstance(text, str):
return (text, False)
if '(PC)' in text:
potential = True
else:
potential = False
m = re.search(r'^\$([0-9\.]+)', text)
if m:
try:
text = float(m.group(1))
except:
pass
return (text, potential)
def extract_data(filename):
results = {"parts": [], "hidden": {}, "summary": []}
with open(filename) as fp:
s = Soup(fp.read(), "html.parser")
for inp in s.select('input[type="hidden"]'):
results['hidden'][inp.get('id') or inp.get('name')] = inp.get('value')
for i in range(3):
for div in s.select('#showResult' + str(i)):
link_text = div.get_text().strip()
link_text = re.sub(r'\s+', ' ', link_text)
link_text = re.sub(r'Show Result [0-9]+: ', '', link_text)
results['summary'].append(link_text)
for div in s.select('#paymentRelationship' + str(i)):
result = {}
for table in div.find_all('table', recursive=False):
heading = None
for head in table.find_all('thead', recursive=False):
heading = head.get_text().strip()
if not heading:
raise Exception("Table has no heading")
heading = re.sub(r'^Section:\s*', '', heading)
result[heading] = []
for body in table.find_all('tbody', recursive=False):
for row in body.find_all('tr', recursive=False):
item = list()
for col in row.find_all('td', recursive=False):
item.append(fixup(col))
result[heading].append(item)
results['parts'].append(result)
#log("Raw:")
#log(json.dumps(results, indent=2))
main_output = {'results': [], 'information': {}, 'summaries': []}
for part in results['parts']:
output = dict()
for item in ('General Information', 'Eliminate Ordinary Medical Expenses', 'Calculation Results', 'Children', 'Financial', 'Base Support Calculation', 'Child Care'):
if item not in part:
raise Exception(item + " not found")
for item in part['General Information']:
if item[0] == 'Court Case Number' and len(item) >= 4:
output['Court Case Number'] = item[1]
if item[2] == 'Court Case County':
output['Court Case County'] = item[3]
elif item[0] == 'Calculation Parties' and len(item) >= 4:
output['Calculation Parties'] = [item[1], item[3]]
elif item[0] == 'Description' and len(item) > 1:
output['Description'] = item[1]
elif item[0] == 'Michigan Child Support Formula Year' and len(item) >= 6:
output[item[0]] = item[1]
output[item[2]] = item[3]
output[item[4]] = item[5]
headers = None
for item in part['Eliminate Ordinary Medical Expenses']:
if item[0] == "":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Eliminate Ordinary Medical Expenses")
subout = dict()
for item in part['Eliminate Ordinary Medical Expenses']:
if item[0] == "":
continue
if len(item) == 1 + len(headers):
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
subout[item[0]] = subsubout
if len(item) == 2 and item[0] == 'Select Reason for Eliminating the Ordinary Medical Expense(s):':
subout[item[0]] = item[1]
output['Eliminate Ordinary Medical Expenses'] = subout
headers = None
for item in part['Calculation Results']:
if item[0] == "":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Calculation Results")
subout = dict()
for item in part['Calculation Results']:
if item[0] == "":
continue
if len(item) == 1 + len(headers):
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
subout[item[0]] = subsubout
if len(item) == 2 and item[0] == 'Select Reason for Eliminating the Ordinary Medical Expense(s):':
subout[item[0]] = item[1]
output['Calculation Results'] = subout
headers = None
for item in part['Children']:
if item[0] == "Children's Overnights Spent Per Year":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Children")
subout = dict()
overnights = dict()
for item in part['Children']:
if item[0] == "Children's Overnights Spent Per Year":
continue
if len(item) == 1 + len(headers):
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
if item[0] in ('Additional Children from Other Relationships', 'Child Support Children in Other Payment Relationships', 'Total Other Children', 'Income Adjustment Percentage Multiplier'):
subout[item[0]] = subsubout
else:
for i in range(len(headers)):
if headers[i] not in overnights:
overnights[headers[i]] = dict()
overnights[headers[i]][item[0]] = nulltruefalse(item[i + 1])
subout["Children's Overnights Spent Per Year"] = overnights
output["Children"] = subout
subout = dict(notes=list())
headers = None
for item in part['Financial']:
if item[0] == "See 2021 MCSF 2.01":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Financial")
for item in part['Financial']:
if len(item) > 0 and isinstance(item[0], list):
if len(item[0]) > len(headers):
raise Exception("Unrecognized row of tables in Financial section. Expected " + str(len(headers)) + " and got " + str(len(item[0])) + " where content is " + repr(item[0]) + " and headers are " + repr(headers))
for i in range(len(headers)):
if i >= len(item[0]):
continue
table = item[0][i]
if not isinstance(table, dict) or 'title' not in table or 'columns' not in table or 'rows' not in table:
raise Exception("Unrecognized table " + repr(table) + " in Financial section")
table_title = re.sub(r'^Party [0-9]+ ', '', table['title'])
if table_title not in subout:
subout[table_title] = dict()
subsubout = dict()
for subitem in table['rows']:
if not len(subitem) == 2:
raise Exception("Unrecognized row in table in Financial section")
subsubout[subitem[0]] = subitem[1]
subout[table_title][headers[i]] = subsubout
elif len(item) == 1 and isinstance(item[0], str):
subout['notes'].append(item[0])
elif len(item) == 2:
subout[item[0]] = item[1]
elif len(item) == 1 + len(headers):
if item[0] in ("See 2021 MCSF 2.01", "Additional Deductions"):
continue
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
label = item[0]
label = re.sub(r' See 2021 MCSF 2.01', '', item[0])
subout[label] = subsubout
output["Financial"] = subout
subout = dict()
headers = None
for item in part['Base Support Calculation']:
if item[0] == "See 2021 MCSF 3.02(A)":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Base Support Calculation")
for item in part['Base Support Calculation']:
if not len(item) == 1 + len(headers):
raise Exception("Unrecognized row in Base Support Calculation")
if item[0] == "See 2021 MCSF 3.02(A)":
continue
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
subout[item[0]] = subsubout
output["Base Support Calculation"] = subout
subout = dict(notes=list())
reimbursement_end_dates = list()
headers = None
for item in part['Child Care']:
if len(item) and item[0] == "See 2021 MCSF 3.06(C) and 2021 MCSF 3.06(D)":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Child Care")
for item in part['Child Care']:
if len(item) > 0 and isinstance(item[0], list):
if len(item[0]) != len(headers):
raise Exception("Unrecognized row of tables in Child Care section")
for i in range(len(headers)):
table = item[0][i]
if not isinstance(table, dict) or 'title' not in table or 'columns' not in table or 'rows' not in table:
raise Exception("Unrecognized table " + repr(table) + " in Child Care section")
if len(table['rows']) == 1:
continue
table_title = re.sub(r'^Party [0-9]+ ', '', table['title'])
table_title = re.sub(r'Child Care Expense Information Table', 'Child Care Expenses Information Table', table_title)
if table_title not in subout:
subout[table_title] = dict()
subsubout = list()
for subitem in table['rows']:
if not len(subitem) == 2:
raise Exception("Unrecognized row in table in Child Care section")
if subitem[0] == 'Months':
if len(subsubout) == 0:
raise Exception("Unrecognized Months row in Child Care section")
subsubout[-1]['months'] = subitem[1]
else:
amount, is_potential = get_amount_potential(subitem[1])
subsubout.append({'child': subitem[0], 'amount': amount, 'potential': is_potential})
subout[table_title][headers[i]] = subsubout
elif len(item) == 0:
continue
elif len(item) == 1 and isinstance(item[0], str):
subout['notes'].append(item[0])
elif len(item) == 2:
reimbursement_end_dates.append({'child': item[0], 'date': item[1]})
elif len(item) == 1 + len(headers):
if item[0] == "See 2021 MCSF 3.06(C) and 2021 MCSF 3.06(D)":
continue
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
subout[item[0]] = subsubout
subout["Reimbursement End Dates"] = reimbursement_end_dates
output["Medical"] = subout
subout = dict(notes=list())
headers = None
for item in part['Medical']:
if len(item) and item[0] == "See 2021 MCSF 3.05(C) See 2021 MCSF 3.04(B)":
headers = item[1:]
break
if headers is None:
raise Exception("Could not find header row for Medical")
for item in part['Medical']:
if len(item) > 0 and isinstance(item[0], list):
if len(item[0]) != len(headers):
raise Exception("Unrecognized row of tables in Medical section")
for i in range(len(headers)):
table = item[0][i]
if not isinstance(table, dict) or 'title' not in table or 'columns' not in table or 'rows' not in table:
raise Exception("Unrecognized table " + repr(table) + " in Medical section")
if len(table['rows']) == 1:
continue
table_title = re.sub(r'^Party [0-9]+ ', '', table['title'])
if table_title not in subout:
subout[table_title] = dict()
subsubout = list()
for subitem in table['rows']:
if not len(subitem) == 2:
raise Exception("Unrecognized row in table in Medical section")
subsubout.append({'child': subitem[0], 'amount': amount})
subout[table_title][headers[i]] = subsubout
if 'footer' in table:
subout[table_title + " Note"] = '\n'.join(table['footer'])
elif len(item) == 0:
continue
elif len(item) == 1 and isinstance(item[0], str):
subout['notes'].append(item[0])
elif len(item) == 2:
subout[item[0]] = item[1]
elif len(item) == 1 + len(headers):
if item[0] in ("See 2021 MCSF 3.05(C) See 2021 MCSF 3.04(B)", "Additional Out-of-pocket Medical Expenses Per Child"):
continue
subsubout = dict()
for i in range(len(headers)):
subsubout[headers[i]] = nulltruefalse(item[i + 1])
subout[item[0]] = subsubout
output["Medical"] = subout
main_output['results'].append(output)
for item, val in results['hidden'].items():
main_output["information"][item] = nulltruefalse(val)
for item in results['summary']:
main_output['summaries'].append(item)
return main_output
# if __name__ == "__main__":
# filename = 'mi-results.html'
# raw_data = extract_data('mi-results.html')
# print("Final:")
# print(json.dumps(raw_data, indent=2))
``` |
{
"source": "jpyoo/pysqlemail",
"score": 3
} |
#### File: pysqlemail/modules/Add_Image.py
```python
import pandas as pd
import openpyxl
from openpyxl.styles.alignment import Alignment
import os
# In[102]:
def addImage(path, imageCol):
wb = openpyxl.load_workbook(filename = path)
ws = wb.worksheets[0]
#style number must be in first column to add image
rows = range(2, ws.max_row+1)
for row in rows:
ws.row_dimensions[row].height = 150
columns = range(1, ws.max_column+1)
for row in rows:
for col in columns:
ws.cell(row, col).alignment = Alignment(horizontal='center', vertical='center', wrap_text=True)
for i in rows:
try:
jpegName = ws.cell(row = i, column = 1).value.strip()+"_front.jpg"
image = os.path.join("N:/IMAGES/200xImages/",jpegName)
img = openpyxl.drawing.image.Image(image)
img.anchor = imageCol+ str(i)
img.height = 200
ws.add_image(img)
except:
pass
wb.save(path)
``` |
{
"source": "jpypi/dup-image-search",
"score": 3
} |
#### File: dup-image-search/compare_gui/image_compare.py
```python
import sys
import argparse
import json
from PySide.QtGui import QApplication
from PySide.QtGui import QMainWindow
from PySide.QtGui import QBoxLayout
from PySide.QtGui import QWidget
from PySide.QtGui import QPushButton
from PySide.QtGui import QAction
from PySide.QtGui import QMessageBox
from PySide.QtGui import QLabel
from PySide.QtGui import QPixmap
from PySide.QtCore import Qt
class MainUI(QMainWindow):
def __init__(self, filepath):
super(MainUI, self).__init__()
self.title = "Image Verification GUI"
fd = open(filepath, "r")
self.sets = json.load(fd)
fd.close()
self.current_set_index = -1
self.within_set_index = -1
self.init_ui()
def init_ui(self):
# geometry is x offset, y offset, x width, y width
self.setGeometry(150, 150, 640, 300)
self.setWindowTitle(self.title)
menu_bar = self.menuBar()
file_menu = menu_bar.addMenu('&File')
exitAction = QAction('E&xit', self)
exitAction.setStatusTip('Exit the application.')
exitAction.triggered.connect(self.handle_exit)
file_menu.addAction(exitAction)
main_layout_container = QWidget()
main_layout = QBoxLayout(QBoxLayout.TopToBottom)
image_layout = QBoxLayout(QBoxLayout.LeftToRight)
image_layout.addStretch(1)
self.image1 = QLabel()
self.image1.setAlignment(Qt.AlignCenter)
image_layout.addWidget(self.image1)
image_layout.addWidget(QLabel("vs."))
self.image2 = QLabel()
self.image2.setAlignment(Qt.AlignCenter)
image_layout.addWidget(self.image2)
image_layout.addStretch(1)
main_layout.addLayout(image_layout)
main_layout.addStretch(1)
button_layout = QBoxLayout(QBoxLayout.LeftToRight)
button_layout.addStretch(1)
self.yes_button = QPushButton("Yes")
button_layout.addWidget(self.yes_button)
self.yes_button.clicked.connect(self.handle_yes_pressed)
self.no_button = QPushButton("No")
button_layout.addWidget(self.no_button)
self.no_button.clicked.connect(self.handle_no_pressed)
button_layout.addStretch(1)
main_layout.addLayout(button_layout)
main_layout_container.setLayout(main_layout)
self.image1_filepath = ""
self.image2_filepath = ""
self.load_more_images()
self.setCentralWidget(main_layout_container)
def handle_exit(self):
self.close()
def load_more_images(self):
if(self.within_set_index == -1):
print "New set."
self.current_set_index += 1
arr = self.sets["sets"][self.current_set_index]
if(len(arr) == 0):
ret = QMessageBox.information(self, "Image Verification UI",
"Ran out of images to compare.",
QMessageBox.Ok)
self.close()
self.image1_filepath = arr[0].strip()
self.image2_filepath = arr[1].strip()
#print self.image1_filepath
#print self.image2_filepath
self.image1.setPixmap(QPixmap(self.image1_filepath).scaledToHeight(192))
self.image2.setPixmap(QPixmap(self.image2_filepath).scaledToHeight(192))
self.within_set_index = 2
else:
arr = self.sets["sets"][self.current_set_index]
if(self.within_set_index >= len(arr)):
self.within_set_index = -1
self.load_more_images()
else:
self.image2_filepath = arr[self.within_set_index].strip()
#print self.image2_filepath
self.image2.setPixmap(QPixmap(self.image2_filepath).scaledToHeight(192))
self.within_set_index += 1
def handle_yes_pressed(self):
with open("user_results.txt", "a") as f:
f.write("{0}, {1}, {2}\n".format(
self.image1_filepath, self.image2_filepath, "YES"))
self.load_more_images()
def handle_no_pressed(self):
with open("user_results.txt", "a") as f:
f.write("{0}, {1}, {2}\n".format(
self.image1_filepath, self.image2_filepath, "NO"))
self.load_more_images()
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("filepath")
args = parser.parse_args()
app = QApplication(sys.argv)
ex = MainUI(args.filepath)
ex.show()
# Qt application main loop
app.exec_()
sys.exit()
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: dup-image-search/dct_hash/fast_dct_hash.py
```python
import sys
import numpy
import argparse
import glob
from PIL import Image
from scipy import fftpack
from multiprocessing import Pool
def calculate_dct_hash(image):
"""
Calculates the DCT (discrete cosine transform) hash of an image.
The basic steps (verbatim from hackerfactor, see heading):
1. Reduce size to 32x32
2. Reduce color to greyscale
3. Calculate the DCT
4. Take the top left only
5. Average using the first term of the low frequency values
6. Compute the 64 bits - 1 if above average, 0 if not
7. Construct the hash
"""
# reduce size to 32x32
image = image.resize((32, 32))
# convert to greyscale
image = image.convert("L")
# calculate the DCT
imgdata = image.getdata()
float_imgdata = [float(i) for i in imgdata]
dct_data = calculate_DCTII_2D(float_imgdata)
# Top left only
smaller_dct = dct_data[:8, :8]
average = (numpy.sum(smaller_dct) - smaller_dct[0,0])/64
hash = 0
i = 0
for x in smaller_dct.flat:
hash |= (x > average) << i
i += 1
return hash
def calculate_DCTII_2D(matrix):
"""
Calculates the 2D transform of the DCT II algorithm.
Assumes a square matrix.
See:
http://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II
We are using the plain version, which seems to work better.
"""
a = numpy.reshape(numpy.array(matrix), (32, 32))
return fftpack.dct(fftpack.dct(a.T).T)
def hash_directory(directory):
with open("dct_hashes.txt", "a") as f:
for filepath in glob.iglob("{0!s}/*".format(directory)):
try:
image = Image.open(filepath)
image_hash = calculate_dct_hash(image)
f.write("{0!s},{1!s}\n".format(image_hash, filepath))
except:
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="directory to scan")
args = parser.parse_args()
hash_directory(args.directory)
```
#### File: dup-image-search/md5/calculate_md5_hashes.py
```python
import sys
import glob
import csv
import hashlib
from multiprocessing import Pool
def md5_file(filename):
with open(filename) as f:
return (hashlib.md5(f.read()).hexdigest(),filename)
directories = ["a","b","c","d","e","f","g","h","i","j","k","l","m",
"n","o","p","q","r","s","t","the","u","v","w","x","y"]
try:
base_directory = sys.argv[1]
pool = Pool(8)
with open("md5sums.txt","w") as f:
writer = csv.writer(f)
for d in directories:
print "Calculating hashes for the {} directory.".format(d)
image_files = glob.iglob("{}/{}/*".format(base_directory,d))
for hash_and_name in pool.imap(md5_file, image_files):
writer.writerow(hash_and_name)
except IndexError:
print "{0}: Syntax: {0} <album covers base directory>".format(sys.argv[0])
sys.exit(0)
```
#### File: dup-image-search/md5/find_all_image_duplicates.py
```python
import sys
import argparse
import filecmp
from PIL import Image
def is_valid_image(filepath):
try:
image = Image.open(filepath)
image.verify()
except IndexError:
return False
except IOError:
return False
return True
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("filename", help="file containing hashes")
args = parser.parse_args()
duplicates_file = open("exact_duplicates.txt", "w")
corruption_file = open("corrupt_images.txt", "w")
line_counter = 0
duplicate_counter = 0
corruption_counter = 0
hash_collisions = 0
with open(args.filename, "r") as f:
last_hash = None
identical_hash_filenames = []
line = f.readline()
while line:
line_arr = line.strip().split()
hash = line_arr[0]
image_filename = " ".join(line_arr[1:])
if(hash == last_hash):
found = False
for file in identical_hash_filenames:
if(filecmp.cmp(image_filename, file)):
duplicates_file.write(
"{0},{1}\n".format(image_filename, file))
duplicate_counter += 1
found = True
break
if(not found):
if(is_valid_image(image_filename)):
identical_hash_filenames.append(image_filename)
hash_collisions += 1
else:
corruption_file.write(
"{0}\n".format(image_filename))
corruption_counter += 1
else:
if(is_valid_image(image_filename)):
identical_hash_filenames = [image_filename]
else:
identical_hash_filenames = []
corruption_file.write(
"{0}\n".format(image_filename))
corruption_counter += 1
last_hash = hash
line_counter += 1
if(line_counter % 50000 == 0):
print "Update: scanned {0!s} files.".format(line_counter)
line = f.readline()
print "Scanned {0!s} files.".format(line_counter)
print "Total exact duplicates: {0!s}.".format(duplicate_counter)
print "Total corrupt files: {0!s}.".format(corruption_counter)
print "Hash collisions: {0!s}.".format(hash_collisions)
print "See {0} and {1} for more details.".format(
"exact_duplicates.txt", "corrupt_images.txt")
if __name__ == "__main__":
main(sys.argv[1:])
```
#### File: dup-image-search/simple_hash/fast_simple_hash.py
```python
import sys
import argparse
import numpy
import glob
from PIL import Image
from multiprocessing import Pool
def calculate_simple_hash(image):
"""
Calculates the simple hash of an image.
The basic steps (verbatim from hackerfactor, see heading):
1. Reduce size to 8x8
2. Reduce color to greyscale
3. Average the colors
4. Compute the 64 bits - 1 if above average, 0 if not
5. Construct the hash
"""
# reduce size to 8x8
image = image.resize((8, 8))
# convert to greyscale
image = image.convert("L")
# average the colors
imgdata = image.getdata()
average = numpy.mean(imgdata)
image_hash = 0
for i in xrange(0, len(imgdata)):
image_hash |= (imgdata[i] > average) << i
return image_hash
def hash_directory(directory):
with open("simple_hashes.txt", "a") as f:
for filepath in glob.iglob("{0!s}/*".format(directory)):
try:
image = Image.open(filepath)
image_hash = calculate_simple_hash(image)
f.write("{0!s},{1!s}\n".format(image_hash, filepath))
except:
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("directory", help="directory to scan")
args = parser.parse_args()
hash_directory(args.directory)
``` |
{
"source": "jpypi/Multitron",
"score": 3
} |
#### File: jpypi/Multitron/multitron1.py
```python
import numpy as np
import pickle
import collections
import pprint
# Load little mnist data
train, validate, test = np.array(pickle.load(open("littlemnist.pkl", "rb"), encoding = "latin1"))
# Add columns of 1s to the train, validate, and test sets
train[0] = np.c_[train[0], np.ones((len(train[0]), 1))]
validate[0] = np.c_[validate[0], np.ones((len(validate[0]), 1))]
test[0] = np.c_[test[0], np.ones((len(test[0]), 1))]
# Set learning rate (TODO: Maybe start this higher and decay in relation to err)
alpha = 0.4
n_classes = 10
dim_data = len(train[0][0])
# Initialize random weights (+1 to dim_data for bias)
w = np.random.rand(n_classes, dim_data)
# A zero vector for comparison later
z10 = np.zeros((n_classes,1))
def Classify(example):
return w.dot(example)
def OneHot(index, dim = 10):
"""
Converts an index into a one-hot encoded column vector.
"""
a = np.zeros((dim,1))
a[index] = 1
return a
def Validate():
"""
Runs through all the validation data to get an error estimate.
"""
correct = sum((np.argmax(Classify(x))) == validate[1][x_i] for x_i, x in enumerate(validate[0]))
print("Validation set correct: %d"%correct)
Validate()
try:
iteration = 0
while True:
correct = 0
# Enumerate training examples
for x_i, x in enumerate(train[0]):
y = Classify(x)
d = OneHot(train[1][x_i])
# Move a proportion (alpha) of the difference between where we want to be
# and where we are
delta = d - OneHot(np.argmax(np.reshape(y, (10,1))))
#delta = d - np.reshape(y, (10,1))
w += alpha * delta * x
correct += np.alltrue(delta == z10)
if iteration % 10 == 0:
print("Train right: %d"%correct)
Validate()
# Break out once we achieve max on train data
if correct == 10000:
break
iteration += 1
except KeyboardInterrupt:
print()
# Calculate results on the test set
confusion = collections.defaultdict(lambda: [0]*10)
errors = 0
for x_i, x in enumerate(test[0]):
y = np.argmax(Classify(x))
confusion[test[1][x_i]][y] += 1
errors += test[1][x_i] != y
pprint.pprint(confusion)
print("Test set error: %f"%(errors/len(test[0])))
#with open("weights.bin", "wb") as f:
# pickle.dump(w, f)
``` |
{
"source": "jpypi/pandemic-game-ai",
"score": 3
} |
#### File: jpypi/pandemic-game-ai/pandemic_sim_engine.py
```python
import csv
from player import Player
from city import City
from cards import PlayerCard, PlayerCardDeck, ShuffleDeck
from game import Game
import helper_ai
class Graph:
def __init__(self, nodes) :
# Store the adjacency list as a dictionary
# 0 : { 1, 2 }
# 1 : { 3, 4 }
self.adjlist = {}
self.nodes = nodes
# Assuming that the edge is bidirectional
def AddEdge (self, src, dst) :
if src not in self.adjlist :
self.adjlist[src] = []
if dst not in self.adjlist :
self.adjlist[dst] = []
self.adjlist[src].append(dst)
#self.adjlist[dst].append(src) not needed for my case
def GetNeighbors(self, src):
return self.adjlist[src]
def Display_AdjList(self) :
for item in self.adjlist.items() :
print (item)
if __name__ == "__main__" :
#main area
test_game = Game(4,'hard',None)
test_game.print_game_state()
print("testing probability distribution")
print(helper_ai.calculate_drawing_infection_city_card(test_game,test_game.get_city_by_name('Atlanta')))
print("outbreak chance is " + str(helper_ai.calculate_probability_of_outbreak(test_game)))
"""
#Step 2: construct the graph
map = Graph(len(city_list))
city_id_to_name = {}
city_name_to_id = {}
for c in city_list:
# create a dictionary of the city ID to name and a dictionary of name to ID
real_id = int(c.ID)-1
city_id_to_name[real_id] = c.name
city_name_to_id[c.name] = real_id
for d in c.neighbors:
map.AddEdge(real_id,int(d)-1)
print("adjaceny list for storing graph")
#map.Display_AdjList()
"""
"""
#Display which cities have infection and research stations
print("====================")
print("Diseased Cities List")
for c in city_list:
c.ShowDiseaseStatus()
print("====================")
print("Research Center List")
for c in city_list:
c.ShowResearchCenterStatus()
print("====================")
print("Players")
#display player cards and roles
for p in player_list:
p.ShowCharacter(city_list)
p.ShowActionOptions(city_list, player_list, None)
print("====================")
#--------
#Print a demo players options
"""
```
#### File: jpypi/pandemic-game-ai/player.py
```python
class Player:
def __init__(self, role, name, ID):
self.role = role
self.ID = ID
self.name = name
self.position_id = 0
self.card_list = []
self.stored_card = []
self.current_action_count = 0
@property
def city_cards(self):
return filter(lambda card: card.kind == "city", self.card_list)
@property
def event_cards(self):
return filter(lambda card: card.kind == "event", self.card_list)
def HasEventCard(self, card_name):
for c in p.event_cards:
if c.name == card_name:
return True
return False
def reset_action_counter(self):
self.current_action_count = 0
def increment_action_counter(self):
self.current_action_count += 1
def ShowCharacter(self, city_list):
print("----------")
print(f"{self.name} is the {self.role} and is in {city_list[self.position_id].name}")
self.ShowHand()
def ShowHand(self):
for c in self.card_list:
print(c)
def ShowTeamActions(self, players):
if self.ForecastChoice(players):
print("Can use Forecast")
if self.AirliftChoice(players):
print("Can use Airlift")
if self.QuietNightChoice(players):
print("Can use Quiet Night")
if self.GovernmentGrantChoice(players):
print("Can use Government Grant")
if self.ResilientPopChoice(players):
print("Can use Resilient Population")
def ShowActionOptions(self, city_list, players, player_discard):
print("Showcasing all actions for " + self.name)
#ferry action
ferry_opts = self.FerryActionChoices(city_list)
for k in range(len(ferry_opts)):
print("Ferry to " + city_list[ferry_opts[k]].name)
#shuttle action
value = self.ShuttleActionChoices(city_list)
if value != 0:
for k in range(len(value)):
print("Shuttle to " + city_list[value[k]].name)
else:
print("No shuttles available")
#charter Options
if self.CharterFlightChoices(city_list):
print("Can charter a flight anywhere")
else:
print("No Charter flight available")
#Direct Flight Choices
direct_opts = self.DirectFlightChoices()
for k in range(len(direct_opts)):
print("Direct Flight available to " + city_list[direct_opts[k]].name)
#Build Research Center
if self.BuildResearchStationChoice(city_list):
print("Can build Research Station")
else:
print("Cannot build Research Station")
#Treat Diseases Choices
val = self.TreatDiseaseChoices(city_list)
print("Can treat " + str(val) + " disease cubes")
#Cure Disease choices
cure_opts = self.CureDiseaseChoice(city_list)
print("Can cure these diseases: " + str(cure_opts))
#Share Knowledge
share_opts = self.ShareAllKnowledgeChoice(players)
print("Can share card with " + str(share_opts))
#Special Abilities
if self.ContingencySpecialGrab(player_discard):
print("Can recover a contingency plan")
else:
print("Cannot recover a contingency plan")
if self.DispatcherControl():
print("Can move others")
else:
print("Not a dispatcher")
def GetNumberOfCardsOfColor(self, cities, color):
total = 0
for card in self.city_cards:
if cities[card.ID].color == color:
total += 1
return total
def FerryActionChoices(self, cities):
nid = cities[self.position_id].get_neighbors()
return nid
def ShuttleActionChoices(self, cities):
if cities[self.position_id].research_center:
mycity = cities[self.position_id]
targets = []
for c in cities:
if c.research_center and c != mycity:
targets.append(c)
return targets
else:
return 0
def CharterFlightChoices(self, cities):
if self.role == 'Operations Expert' and cities[self.position_id].research_center:
for card in self.card_list:
if card.kind == 'city':
return True
else:
for card in self.city_cards:
if card.ID == self.position_id:
return True
return False
def DirectFlightChoices(self):
targets = []
for card in self.city_cards:
if card.ID != self.position_id:
targets.append(card.ID)
return targets
def BuildResearchStationChoice(self, cities):
if not cities[self.position_id].research_center:
if self.role == 'Operations Expert':
return True
else:
for card in self.city_cards:
if card.ID == self.position_id:
return True
return False
def TreatDiseaseChoices(self, cities):
#right now we are returning the number of available actions;
return sum(cities[self.position_id].diseases.values())
def CureDiseaseChoice(self, cities):
curable = []
required = 4
if self.role == 'Scientist':
required = 3
if cities[self.position_id].research_center:
for color in ['blue', 'yellow', 'black', 'red']:
count = self.GetNumberOfCardsOfColor(cities, color)
if count > required:
curable.append(color)
return curable
def ShareAllKnowledgeChoice(self, playerlist):
x = []
for p in playerlist:
if p != self:
x.append(self.ShareKnowledgeChoice(p))
return x
def ShareKnowledgeChoice(self, friend):
if friend.position_id == self.position_id:
if friend.role == 'Researcher':
for c in friend.card_list:
if c.kind == 'city':
return True
else:
for c in friend.card_list:
if c.ID == friend.position_id:
return True
if self.role == 'Researcher':
for c in self.card_list:
if c.kind == 'city':
return True
else:
for c in self.card_list:
if c.ID == self.position_id:
return True
return False
def ContingencySpecialGrab(self, player_discard_pile):
if self.role != 'Contingency Planner':
return False
for card in player_discard_pile:
if card.kind == 'event':
return True
return False
def DispatcherControl(self):
if self.role == 'Dispatcher':
return True
else:
return False
def ForecastChoice(self, player_list):
for p in player_list:
if p.role == 'Contingency Planner':
if len(p.stored_card) > 0:
if p.stored_card.kind == 'Forecast':
return True
if p.HasEventCard('Forecast'):
return True
return False
def AirliftChoice(self,player_list):
for p in player_list:
if p.role == 'Contingency Planner':
if len(p.stored_card) > 0:
if p.stored_card.kind == 'Airlift':
return True
if p.HasEventCard('Airlift'):
return True
return False
def QuietNightChoice(self,player_list):
for p in player_list:
if p.role == 'Contingency Planner':
if len(p.stored_card) > 0:
if p.stored_card.kind == 'Quiet Night':
return True
if p.HasEventCard('Quiet Night'):
return True
return False
def GovernmentGrantChoice(self,player_list):
for p in player_list:
if p.role == 'Contingency Planner':
if len(p.stored_card) > 0:
if p.stored_card.kind == 'Government Grant':
return True
if p.HasEventCard('Government Grant'):
return True
return False
def ResilientPopChoice(self,player_list):
for p in player_list:
if p.role == 'Contingency Planner':
if len(p.stored_card) > 0:
if p.stored_card.kind == 'Resilient Population':
return True
if p.HasEventCard('Resilient Population'):
return True
return False
def AddCard(self, card):
self.card_list.append(card)
``` |
{
"source": "jpypi/X10-Command",
"score": 3
} |
#### File: jpypi/X10-Command/command.py
```python
import datetime
import hashlib
from converters import *
class Command(object):
next_id = 0
def __init__(self, init_string):
# Auto generate an id for each command
self.id = Command.next_id
Command.next_id += 1
# Just set this to anything a long time before today
self.last_run = datetime.date(1990, 1, 1)
self.init_string = init_string.upper().split(" ")
self.hash = self.GetHash()
self.type = "SET"
self.days = range(0, 7)
self.time = None
self.addresses = []
self.function = None
self.dims = 0
was_special = False
for unit in self.init_string:
if not was_special:
time = Time(unit)
days = Days(unit)
addr = Address(unit)
if time != False:
# Must compare time to false because if you get a time of 12am
# (==00:00:00) is evaluated to False but is not equal to false
self.time=time
if days:
self.days = days
if addr:
self.addresses.append(addr)
if unit in ("ON", "OFF", "DIM", "BRIGHT"):
self.function = unit
if unit in ("DIM", "BRIGHT"):
was_special = unit
else:
if was_special in ("DIM", "BRIGHT"):
if unit.endswith("%"):
self.dims = int(round(int(unit.rstrip("%")) / 100.0 * 22))
else:
self.dims = int(unit)
was_special = False # Now we can go back to normal mode
def Run(self, conn, current_date, current_time, time_info):
if current_date>self.last_run and time_info.tm_wday in self.days and\
current_time >= self.time: # or if this were a KEEP command
send_function = False
for address in self.addresses:
house_code = address[0]
if conn.SendAddr(address):
send_function = True
if send_function and conn.SendFunc(house_code + " " + self.function, self.dims):
self.last_run = current_date
def GetHash(self):
return hashlib.sha1(" ".join(self.init_string)).hexdigest()
if __name__ =="__main__":
c = Command("a5 dim 11 @ 6:59am MTWTF")
print c.time
print c.days
print c.function
print c.addresses
```
#### File: jpypi/X10-Command/interface.py
```python
import time
from string import ascii_uppercase
import serial
from x10constants import *
VERBOSE=False
TRYOUTS=2
def MakeAddr(code):
house=code.upper()[0]
device=code.upper()[1:]
return house_code[house]<<4|device_code[int(device)]
def MakeFunc(code):
house, func=code.upper().split(" ", 1)
return house_code[house]<<4|function_code[func]
def FillByte(num):
shift=8-len(bin(num).lstrip("0b"))
return num<<shift
class InterfaceCon:
def __init__(self, port, baud_rate, timeout=2, parity=serial.PARITY_NONE):
self.con=serial.Serial(port, baud_rate, timeout=timeout, parity=parity)
self.history=[]
self.last_transmission_size=0
def Read(self):
data=self.con.read()
if data == '\xa5':
self.DownloadTime()
return False
else:
return data
def ReadBin(self):
try: print bin(ord(self.con.read()))
except TypeError: print "Could not read from connection."
def Write(self, data):
"""
**DEPRECIATED!**
Use SendAddr, SendFunc, or Action!
"""
#raise DeprecationWarning, "Use SendAddr, SendFunc, or Action!"
self.con.write(data)
def Close(self):
self.con.close()
def DownloadTime(self):
# header seconds minutes hours year_day week_mask house stuff
#"10011011 11111111 11111111 11111111 [11111111 1]1111111 1111 1 1 1 1"
print "Downloading time..."
t=time.localtime()
if t.tm_wday == 6: day_mask=64
else: day_mask=1<<(5-t.tm_wday)
bit8=t.tm_yday&1
time_data="\x9b"+\
chr(t.tm_sec)+\
chr(t.tm_min)+\
chr(t.tm_hour/2)+\
chr(t.tm_yday>>1)+\
chr(bit8<<7|day_mask)+\
chr(house_code["A"]<<4|0b0111)
self.con.write(time_data)
if VERBOSE:print "Check sum: %s"%bin(sum(map(ord, list(time_data)))&0xff)
self.ReadBin()
print "Done"
def SendAddr(self, address):
## if VERBOSE: print "Connection ID: %s"%self.ID
if VERBOSE: print "Seinding address %s"%address
data=[0x04, MakeAddr(address)]
check=chr(sum(data)&0xff)
self.con.write(serial.to_bytes(data))
tries=0
while tries<TRYOUTS:
con_data=self.con.read()
if con_data!=check:
if VERBOSE: print con_data
self.con.write(serial.to_bytes(data))
if VERBOSE: print "Resending address"
tries+=1
else:
break
if tries>=TRYOUTS:
if VERBOSE:print "Unsucessful address!"
return False
self.con.write(chr(0x00))#Checksum correct, OK to transmit
while self.con.read()!="U" and tries<TRYOUTS*2:
time.sleep(0.5);tries+=1
if VERBOSE:print "Not ready after sending address"
if tries>=TRYOUTS*2:
if VERBOSE:print "Error after sending address!"
return False
self.history.append(address)
return True
def SendFunc(self, func, dims=0):
## if VERBOSE: print "Connection ID: %s"%self.ID
if VERBOSE: print "Seinding function %s"%func
dims=int(round(dims))
data=[dims<<3|0b110, MakeFunc(func)]
check=chr(sum(data)&0xff)
self.con.write(serial.to_bytes(data))
tries=0
while tries<TRYOUTS and self.con.read()!=check:
self.con.write(serial.to_bytes(data))
if VERBOSE:print "Resending function"
tries+=1
if tries>=TRYOUTS:
if VERBOSE:print "Unsucessful function!"
return False
self.con.write(chr(0x00))
while self.con.read()!="U" and tries<TRYOUTS*2:
time.sleep(0.5);tries+=1
if VERBOSE:print "Not ready after sending function"
if tries>=TRYOUTS*2:
if VERBOSE:print "Error after sending function!"
return False
f=func.upper().replace(" ", "")
change=dims/22.0*100
if f[1:] == "DIM": #[1:] removes the housecode from the function
self.history.append(f+"-%s"%change)
elif f[1:] == "BRIGHT":
self.history.append(f+"+%s"%change)
else:
self.history.append(f)
return True
def Action(self, addr, func, dims=0):
"""
A combo version of SendAddr and SendFunc
Supports only 1 address! Could easily be re-written to support multiple
"""
if self.SendAddr(addr):
if self.SendFunc(func, dims):
return True
return False
def ReceiveTrans(self):
self.con.write("\xc3")#This is the "it's ok to send data now" code
received=[self.Read() for i in xrange(ord(self.Read()))]
if VERBOSE: print "Receieved: "+str(received)
if len(received)>2:
mask=list(bin(ord(received.pop(0))).lstrip("0b").zfill(8))
mask.reverse()
info=[]
for i, d in enumerate(received):
if d:
d=ord(d)
if info and info[-1].find("DIMF") == 1:
hc=info[-1][0]
info.pop(-1)
info.append(hc+"DIM-"+str(d/210.0*100))
elif info and info[-1].find("BRIGHTF") == 1:
hc=info[-1][0]
info.pop(-1)
info.append(hc+"BRIGHT+"+str(d/210.0*100))
else:
hc=str(code_house[d>>4])#house code
mask_val=mask[i]
if VERBOSE:print bin(d), mask_val
if mask_val == "0":
dc=str(device_code.index(d&15))#device code
info.append(hc+dc)
if mask_val == "1":
info.append(hc+code_function[d&15]+"F")#function code;The "F" is for denoting a function, used in detecting dims and brights
elif VERBOSE>1:
print "Receieved error: "+str(received)
for i in info:
if i[-1] == "F":i=i[:-1]
self.history.append(i)
def Log(info):
try:
f=open("data/log.txt", "a")
except:
f=open("data/log.txt", "w")
f.write(str(info)+"\n")
f.close()
class StateManager:
def __init__(self):
self.states={}
self.entire_history=[]
self.history=[]
def append(self, data):
if VERBOSE:
print "Appending data to history: %s"%str(data)
self.entire_history.append(data)
self.history.append(data)
if data[1:].rstrip("-+1234567890.") in function_code:
self.UpdateStates()
def UpdateStates(self):
parsed_to=0
last_addresses=[]
for i, h in enumerate(self.history):
if h[0].isalpha() and h[1:].isdigit():
if VERBOSE:
print "Found address"
if len(last_addresses)>0:
print "Last address house code: %s"%last_addresses[-1][0]
if (last_addresses and last_addresses[-1][0] == h[0]) or not last_addresses:
last_addresses.append(h)
if VERBOSE:
print "Adding address: %s"%h
elif last_addresses and h[0] == last_addresses[-1][0]:
if VERBOSE:
print "Found command"
parsed_to=i
for a in last_addresses:
last_value=self.states.get(a, [""])[0]
new_state=None
if (h.find("BRIGHT") == 1 or h.find("DIM") == 1) and last_value:
if type(last_value) == float:
level = last_value+float(h.lstrip(ascii_uppercase))
if level > 100:
level = 100.0
if level < 0:
level = 0.0
new_state = level
elif h.find("DIM") == 1:
new_state = 100 + float(h.lstrip(ascii_uppercase))
else:
new_state = "ON"
else:
if h[1:] == "OFF":
new_state=h[1:]
# Last_value in on, off is so that when the light is dimmed and
# someone presses on we don't record it as being fully on.
# Unless, however, we don't know what state it was in beforehand.
elif h[1:] == "ON" and (last_value in ("ON", "OFF") or last_value == ""):
new_state=h[1:]
if new_state:
self.states[a]=[new_state, time.time()]
Log("%s : %s : %s"%(a, h, time.ctime()))
del self.history[:parsed_to+1]
# TODO: These look like evidence of bad things
history=[]
last_addr=False
last_addrs=[]
def LastAddrs():
"""
Returns the last transmited addresses in lifo form
Last address will be first second to last will be second etc.
"""
last=[]
if len(history) == 0:
return []
# This makes it so the list is traversed in reverse without changing it's actual order
for i in xrange(-1, -len(history)-1, -1):
hval=history[i]
# Detect only addresses
if hval[0].isalpha() and hval[1:].isdigit():
last.append(hval)
return last
def ParseHistory(history, units_states):
global last_addr
for h in history:
if h[0].isalpha() and h[1:].isdigit():
last_addr=h
elif last_addr and h[0] == last_addr[0]:#Check housecodes are the same
try:
val=units_states[last_addr][0]
except KeyError: val=""
new_state=None
if (h.find("BRIGHT") == 1 or h.find("DIM") == 1) and val!="":
if type(val) == float:
level = units_states[last_addr][0] + float(h.lstrip(ascii_uppercase))
if level>100:
level=100.0
if level<0:
level=0.0
new_state=level
elif val == "ON" and h.find("DIM") == 1:
new_state=100+float(h.lstrip(ascii_uppercase))
else:
new_state="ON"
# An on or off command and/or there was no previously recorded
# addr for the action.
else:
if h[1:] == "OFF":#Command is off
new_state=h[1:]
# Command is on and light is off or on (The on "on" part is so that we
# still update the view and log that way I know if someone keeps trying
# to turn a light on.)
elif val in ("ON", "OFF") or val == "":
new_state=h[1:]
if new_state:
if units_states.has_key(last_addr):
#if units_states[last_addr][0]!=new_state:
units_states[last_addr]=[new_state, time.time()]
else:
units_states[last_addr]=[new_state, time.time()]
Log("%s : %s : %s"%(last_addr, h, time.ctime()))
return units_states
``` |
{
"source": "jpyy/ruuvitag-sensor",
"score": 4
} |
#### File: ruuvitag-sensor/examples/print_to_screen.py
```python
import os
from datetime import datetime
from ruuvitag_sensor.ruuvi import RuuviTagSensor
# Change here your own device's mac-address
mac = 'F4:A5:74:89:16:57'
print('Starting')
def print_data(received_data):
received_mac = received_data[0]
data = received_data[1]
line_sen = str.format('Sensor - {0}', received_mac)
line_tem = str.format('Temperature: {0} C', data['temperature'])
line_hum = str.format('Humidity: {0}', data['humidity'])
line_pre = str.format('Pressure: {0}', data['pressure'])
# Clear screen and print sensor data
os.system('clear')
print('Press Ctrl+C to quit.\n\r\n\r')
print(str(datetime.now()))
print(line_sen)
print(line_tem)
print(line_hum)
print(line_pre)
print('\n\r\n\r.......')
RuuviTagSensor.get_datas(print_data, mac)
```
#### File: ruuvitag-sensor/ruuvitag_sensor/ruuvi.py
```python
import sys
import os
import time
import logging
from ruuvitag_sensor.data_formats import DataFormats
from ruuvitag_sensor.decoder import get_decoder
log = logging.getLogger(__name__)
if not sys.platform.startswith('linux') or os.environ.get('CI') == 'True':
# Use BleCommunicationDummy also for CI as it can't use bluez
from ruuvitag_sensor.ble_communication import BleCommunicationDummy
ble = BleCommunicationDummy()
else:
from ruuvitag_sensor.ble_communication import BleCommunicationNix
ble = BleCommunicationNix()
class RunFlag(object):
"""
Wrapper for boolean run flag
Attributes:
running (bool): Defines if function should continue execution
"""
running = True
class RuuviTagSensor(object):
"""
RuuviTag communication functionality
"""
@staticmethod
def get_data(mac, bt_device=''):
"""
Get raw data for selected RuuviTag
Args:
mac (string): MAC address
bt_device (string): Bluetooth device id
Returns:
tuple (int, string): Data Format type and raw Sensor data
"""
raw = ble.get_data(mac, bt_device)
return DataFormats.convert_data(raw)
@staticmethod
def find_ruuvitags(bt_device=''):
"""
Find all RuuviTags. Function will print the mac and the state of the sensors when found.
Function will execute as long as it is stopped. Stop ecexution with Crtl+C.
Returns:
dict: MAC and state of found sensors
"""
log.info('Finding RuuviTags. Stop with Ctrl+C.')
datas = dict()
for new_data in RuuviTagSensor._get_ruuvitag_datas(bt_device=bt_device):
if new_data[0] in datas:
continue
datas[new_data[0]] = new_data[1]
log.info(new_data[0])
log.info(new_data[1])
return datas
@staticmethod
def get_data_for_sensors(macs=[], search_duratio_sec=5, bt_device=''):
"""
Get lates data for sensors in the MAC's list.
Args:
macs (array): MAC addresses
search_duratio_sec (int): Search duration in seconds. Default 5
bt_device (string): Bluetooth device id
Returns:
dict: MAC and state of found sensors
"""
log.info('Get latest data for sensors. Stop with Ctrl+C.')
log.info('Stops automatically in %ss', search_duratio_sec)
log.info('MACs: %s', macs)
datas = dict()
for new_data in RuuviTagSensor._get_ruuvitag_datas(macs, search_duratio_sec, bt_device=bt_device):
datas[new_data[0]] = new_data[1]
return datas
@staticmethod
def get_datas(callback, macs=[], run_flag=RunFlag(), bt_device=''):
"""
Get data for all ruuvitag sensors or sensors in the MAC's list.
Args:
callback (func): callback funcion to be called when new data is received
macs (list): MAC addresses
run_flag (object): RunFlag object. Function executes while run_flag.running
bt_device (string): Bluetooth device id
"""
log.info('Get latest data for sensors. Stop with Ctrl+C.')
log.info('MACs: %s', macs)
for new_data in RuuviTagSensor._get_ruuvitag_datas(macs, None, run_flag, bt_device):
callback(new_data)
@staticmethod
def _get_ruuvitag_datas(macs=[], search_duratio_sec=None, run_flag=RunFlag(), bt_device=''):
"""
Get data from BluetoothCommunication and handle data encoding.
Args:
macs (list): MAC addresses. Default empty list
search_duratio_sec (int): Search duration in seconds. Default None
run_flag (object): RunFlag object. Function executes while run_flag.running. Default new RunFlag
bt_device (string): Bluetooth device id
Yields:
tuple: MAC and State of RuuviTag sensor data
"""
mac_blacklist = []
start_time = time.time()
data_iter = ble.get_datas(mac_blacklist, bt_device)
for ble_data in data_iter:
# Check duration
if search_duratio_sec and time.time() - start_time > search_duratio_sec:
data_iter.send(StopIteration)
break
# Check running flag
if not run_flag.running:
data_iter.send(StopIteration)
break
# Check MAC whitelist
if macs and not ble_data[0] in macs:
continue
(data_format, data) = DataFormats.convert_data(ble_data[1])
# Check that encoded data is valid RuuviTag data and it is sensor data
# If data is not valid RuuviTag data add MAC to blacklist
if data is not None:
state = get_decoder(data_format).decode_data(data)
if state is not None:
yield (ble_data[0], state)
else:
log.error('Decoded data is null. MAC: %s - Raw: %s', ble_data[0], ble_data[1])
else:
mac_blacklist.append(ble_data[0])
``` |
{
"source": "jpza/ekscli",
"score": 2
} |
#### File: ekscli/ekscli/cli.py
```python
from __future__ import absolute_import
import functools
import logging
import os
import re
import sys
import boto3
import click
from future.utils import iteritems
from tabulate import tabulate
import ekscli
from ekscli.bootstrap import Kubelet
from ekscli.stack import ControlPlane, KubeConfig, NodeGroup, ClusterInfo, AWSSecurityGroupRule
from ekscli.thirdparty.click_alias import ClickAliasedGroup
from ekscli.utils import which, MutuallyExclusiveOption
LOG = logging.getLogger(ekscli.__app_name__)
__log_levels = [logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG, logging.NOTSET]
def config_logger(ctx, param, value):
if value > 4:
raise click.BadParameter('Set verbosity between -v and -vvvv')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
LOG.addHandler(handler)
LOG.setLevel(__log_levels[value])
def validate_region(ctx, param, value):
if value:
boto3.setup_default_session(region_name=value)
region = value
else:
region = boto3.session.Session().region_name
LOG.info('Using the system default AWS region: {}'.format(region))
if region not in ControlPlane.SUPPORTED_ZONES:
raise click.BadParameter('EKS not supported in this region - {}'.format(region))
return region
def validate_subnetes(ctx, param, value):
if not value:
return []
try:
subnets = value.split(',')
invalids = [s for s in subnets if not re.match(r'(subnet-[a-f0-9]{8})', s)]
if len(invalids):
raise click.BadParameter('these subnet ids are invalid: {}'.format(','.join(invalids)))
return subnets
except ValueError:
raise click.BadParameter('subnets should be a valid subnet id list.')
def validate_security_group_rule(ctx, param, value):
if not value:
return None
try:
# rule = namedtuple('rule', ['protocol', 'cidr', 'from_port', 'to_port'])
rules = [dict(tuple(x.split('=')) for x in v.split(',')) for v in value]
ingresses = [AWSSecurityGroupRule(cidr=r.get('cidr', '0.0.0.0/0'),
protocol=r.get('protocol', 'tcp'),
from_port=r.get('from', -1),
to_port=r.get('to', -1),
port=r.get('port'))
for r in rules]
return ingresses
except Exception as e:
raise click.BadParameter('ingress rule should be in the form as key-value pair delimited with comma')
def validate_tags(ctx, param, value):
if not value:
return {}
try:
# tags = re.findall(r'([^=]+)=([^=]+)(?:,|$)', value)
bits = [x.rsplit(',', 1) for x in value.split('=')]
kv = [(bits[i][-1], bits[i + 1][0]) for i in range(len(bits) - 1)]
invalids = [t for t in kv if not t[0] or not t[1]]
if len(invalids):
raise ValueError()
return dict(kv)
except ValueError:
raise click.BadParameter('tags should be in the form of k1=v11,k2=v2')
def validate_heptio_authenticator(ctx, param, value):
executable = value if value else 'heptio-authenticator-aws{}'.format('.exe' if os.name == 'nt' else '')
path = which(executable)
if not path:
raise click.BadParameter('{} does not exist in environment paths or un-executable.'.format(executable))
LOG.info('Use {} for heptio-authenticator-aws'.format(path))
return executable
def common_options(func):
@click.option('--name', '-n', envvar='EKS_CLUSTER_NAME', required=True,
help='A regional unique name of the EKS cluster. Overrides EKS_CLUSTER_NAME environment variable.')
@click.option('--region', '-r', type=str, callback=validate_region,
help='The AWS region to create the EKS cluster.')
@click.option('-v', '--verbosity', callback=config_logger, count=True,
help='Log level; -v for WARNING, -vv INFO, -vvv DEBUG and -vvvv NOTSET.')
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
@click.group()
@click.pass_context
def eks(ctx):
"""A simple and flexible command-line tool for AWS EKS management"""
pass
@eks.command()
def version():
"""Show the EKS cli version info"""
print('Version {}'.format(ekscli.__version__))
@eks.group(invoke_without_command=True, no_args_is_help=True, cls=ClickAliasedGroup,
short_help='Create an EKS resource: a cluster or node group')
@click.pass_context
def create(ctx):
"""Create an EKS component: a cluster or node group"""
pass
@eks.group(invoke_without_command=True, no_args_is_help=True, cls=ClickAliasedGroup)
@click.pass_context
def get(ctx):
"""Display EKS resource information"""
pass
@eks.group(invoke_without_command=True, no_args_is_help=True, cls=ClickAliasedGroup)
@click.pass_context
def delete(ctx):
"""Delete an EKS resource: cluster or node group"""
pass
@eks.group(invoke_without_command=True, no_args_is_help=True)
@click.pass_context
def export(ctx):
"""Export configuration from an EKS cluster"""
pass
@eks.command()
@click.option('--cluster-name', '-n', type=str, help='EKS cluster name')
@click.option('--region', '-r', type=str, help='AWS region')
@click.option('--max-pods', '-m', type=int, help='Max number pods able to run on the node.')
@click.option('--node-ip', '-i', type=str, help='Node internal IP')
@click.option('--kubelet-opt', '-o', type=str, multiple=True, help='kubelet options')
@click.option('--kubelet-exec', '-e', type=str, help='kubelet executor file location', default='/usr/bin/kubelet',
show_default=True)
@click.option('--kubelet-svc', '-s', type=str, help='kubelet service file location',
default='/etc/systemd/system/kubelet.service', show_default=True)
@click.option('--kubeconf', '-k', type=str, help='kube-config file location', default='/var/lib/kubelet/kubeconfig',
show_default=True)
@click.option('--cert', '-c', type=str, help='CA cert file location', default='/etc/kubernetes/pki/ca.crt',
show_default=True)
@click.option('--dry-run', '-d', is_flag=True, default=False,
help='If true, only print the artifacts that could be written to files.')
@click.pass_context
def bootstrap(ctx, cluster_name, region, max_pods, node_ip, kubelet_opt, kubelet_exec, kubelet_svc, kubeconf, cert,
dry_run):
"""Configure and bootstrap kubelet on worker nodes"""
opts = {v[0]: v[1] for v in [k if len(k) > 1 else k.append('') for k in [o.split('=', 1) for o in kubelet_opt]]}
kubelet = Kubelet(cluster_name=cluster_name, region=region, max_pods=max_pods, ip=node_ip, kubeconf_file=kubeconf,
cert_file=cert, kubelet_opts=opts, kubelet_exec_file=kubelet_exec, kubelet_svc_file=kubelet_svc,
dry_run=dry_run)
kubelet.bootstrap()
@create.command(name='cluster')
@common_options
@click.option('--cp-role', type=str, help='The existing EKS role for the control plane.')
@click.option('--subnets', type=str, callback=validate_subnetes,
help='The existing subnets for the EKS cluster and node groups.')
@click.option('--vpc-cidr', type=str, default='192.168.0.0/16', cls=MutuallyExclusiveOption,
mutex_group=['subnets', 'vpc_cidr'], help='The VPC CIDR block')
@click.option('--zones', type=str, cls=MutuallyExclusiveOption,
mutex_group=['subnets', 'zones'], help='Availability zones where to deploy EKS cluster.')
@click.option('--tags', type=str, callback=validate_tags,
help='Tags for the cluster; delimited by comma as: Key0=Value0,Key1=Value1.')
@click.option('--kubeconf', type=str,
help='Kubernetes config file; if not set, KUBECONFIG or ~/.kube/config will be used.')
@click.option('--username', type=str, default='aws', help='Username specified in kube config file for this cluster.')
@click.option('--heptio-auth', type=str, callback=validate_heptio_authenticator,
help='The path to Heptio AWS authenticator.')
@click.option('--cp-only', is_flag=True, default=False, help='To create EKS control plane only without node groups.')
@click.option('--node-name', type=str, default='workers', cls=MutuallyExclusiveOption,
mutex_group=['cp_only', 'node-name'], help='The node group name')
@click.option('--node-role', type=str, cls=MutuallyExclusiveOption, mutex_group=['cp_only', 'node-role'],
help='Additional roles for the node group')
@click.option('--node-sg-ingress', type=str, cls=MutuallyExclusiveOption, mutex_group=['cp_only', 'node-sg-ingress'],
multiple=True, callback=validate_security_group_rule,
help='Additional security group ingresses for the node group')
@click.option('--node-min', type=int, default=1, cls=MutuallyExclusiveOption,
mutex_group=['cp_only', 'node-min'], help='The min size of the node group')
@click.option('--node-max', type=int, default=3, cls=MutuallyExclusiveOption,
mutex_group=['cp_only', 'node-max'], help='The max size of the node group')
@click.option('--node-subnets', type=str, callback=validate_subnetes,
help='The existing subnets to create node groups. Default, all subnets where EKS cluster is deployed.')
@click.option('--node-type', type=str, cls=MutuallyExclusiveOption, mutex_group=['cp_only', 'node-type'],
help='Node group instance type.')
@click.option('--keyname', type=str, cls=MutuallyExclusiveOption, mutex_group=['cp_only', 'keyname', 'ssh_public_key'],
help='To use an existing keypair name in AWS for node groups')
@click.option('--ssh-public-key', type=str, cls=MutuallyExclusiveOption,
mutex_group=['cp_only', 'keyname', 'ssh_public_key'],
help='To create a keypair used by node groups with an existing SSH public key.')
@click.option('--ami', type=str, cls=MutuallyExclusiveOption, mutex_group=['cp_only', 'ami'],
help='AWS AMI id or location')
@click.option('--no-user-data', cls=MutuallyExclusiveOption, mutex_group=['cp_only', 'no_user_data'],
is_flag=True, default=False,
help='Not use the user data in NodeGroup LaunchConfiguration, '
'instead ekscli-boostrap alike for node discovery.')
@click.option('--yes', '-y', is_flag=True, default=False, help='Run ekscli without any confirmation prompt.')
@click.pass_context
def create_cluster(ctx, name, region, verbosity,
cp_role, subnets, tags, vpc_cidr, zones, kubeconf, username, heptio_auth, cp_only, node_name,
node_role, node_sg_ingress, node_min, node_max, node_subnets, node_type, keyname, ssh_public_key,
ami, no_user_data, yes):
"""Create an EKS cluster"""
if node_subnets and not subnets:
print('If node subnets are specified, the cluster subnets must appear!')
exit(1)
elif node_subnets and subnets:
s = [ns for ns in node_subnets if ns not in subnets]
if s:
print('[{}] not one of the cluster subnets.'.format(','.join(s)))
exit(1)
if not kubeconf:
files = os.environ.get('KUBECONFIG', '~/.kube/config')
kubeconf = os.path.expanduser(files.split(':')[0])
if not yes:
if not click.confirm('Are you sure to create the EKS cluster in '
'region[{}] with kubeconfig[{}]'.format(region, kubeconf)):
exit(0)
cp = ControlPlane(name, subnets=subnets, role=cp_role, region=region, tags=tags,
vpc_cidr=vpc_cidr, zones=zones)
cluster_info = cp.create()
kc = KubeConfig(cluster_info, kubeconf, user=username, heptio_auth=heptio_auth)
kc.create()
if cp_only:
LOG.info('To create EKS cluster control plane only.')
return
ng = NodeGroup(node_name, cluster_info=cluster_info, keypair=keyname, region=region, ami=ami, subnets=node_subnets,
kubeconf=kubeconf, role=node_role, sg_ingresses=node_sg_ingress, min_nodes=node_min,
max_nodes=node_max, instance_type=node_type, ssh_public_key=ssh_public_key,
no_user_data=no_user_data)
ng.create()
@export.command(name='kubeconfig')
@common_options
@click.option('--kubeconf', type=str,
help='Kubernetes config file; if not set, KUBECONFIG or ~/.kube/config will be used.')
@click.option('--username', type=str, help='Username specified in Kubernetes conf file for this cluster', default='aws')
@click.option('--heptio-auth', type=str, callback=validate_heptio_authenticator,
help='The path to Heptio AWS authenticator.')
@click.pass_context
def export_kubeconfig(ctx, name, region, verbosity, kubeconf, username, heptio_auth):
"""Export Kubernetes configuration for kubectl"""
cp = ControlPlane(name, region=region)
cluster_info = cp.query()
kc = KubeConfig(cluster_info, kubeconf, user=username, heptio_auth=heptio_auth)
kc.create()
@create.command(name='nodegroup', aliases=['ng'])
@common_options
@click.option('--node-name', required=True, help='The node group name.')
@click.option('--tags', type=str, callback=validate_tags,
help='Tags for all resources; delimited by comma as: Key0=Value0,Key1=Value1.')
@click.option('--kubeconf', type=str,
help='Kubernetes config file; if not set, KUBECONFIG or ~/.kube/config will be used.')
@click.option('--node-role', type=str, help='Additional roles for the node group')
@click.option('--node-sg-ingress', type=str, callback=validate_security_group_rule, multiple=True,
help='Additional security group ingresses for the node group')
@click.option('--node-min', type=int, default=1, help='The min size of the node group')
@click.option('--node-max', type=int, default=3, help='The max size of the node group')
@click.option('--node-type', type=str, help='Node group instance type.')
@click.option('--node-role', type=str, help='Additional roles for the node group')
@click.option('--node-subnets', type=str, callback=validate_subnetes,
help='The existing subnets to create node groups. Default, all subnets where EKS cluster is deployed.')
@click.option('--keyname', type=str, help='To use an existing keypair name in AWS for node groups',
cls=MutuallyExclusiveOption, mutex_group=['keyname', 'ssh_public_key'])
@click.option('--ssh-public-key', type=str,
help='To create a keypair used by node groups with an existing SSH public key.',
cls=MutuallyExclusiveOption, mutex_group=['keyname', 'ssh_public_key'])
@click.option('--ami', type=str, help='AWS AMI id or location')
@click.option('--bootstrap-opt', '-b', type=str, multiple=True,
help='Options for ekscli bootstrap. See ekscli bootstrap --help.')
@click.option('--no-user-data', type=str, is_flag=True, default=False,
help='Not use the user data in NodeGroup LaunchConfiguration, instead ekstrap alike for node discovery.')
@click.option('--yes', '-y', is_flag=True, default=False, help='Run ekscli without any confirmation prompt.')
@click.pass_context
def create_nodegroup(ctx, name, node_name, region, verbosity, node_subnets, tags, kubeconf, node_min, node_max,
node_role, node_type, node_sg_ingress, keyname, ssh_public_key, ami, bootstrap_opt, no_user_data,
yes):
"""Create a node group in an existing EKS cluster"""
cp = ControlPlane(name, region=region)
cluster_info = cp.query()
if not kubeconf:
files = os.environ.get('KUBECONFIG', '~/.kube/config')
kubeconf = os.path.expanduser(files.split(':')[0])
if not yes:
if not click.confirm('Are you sure to create the EKS cluster in '
'region[{}] with kubeconfig[{}]'.format(region, kubeconf)):
exit(0)
ng = NodeGroup(node_name, cluster_info=cluster_info, region=region, ami=ami, keypair=keyname, subnets=node_subnets,
role=node_role, sg_ingresses=node_sg_ingress, ssh_public_key=ssh_public_key, tags=tags,
kubeconf=kubeconf, min_nodes=node_min, max_nodes=node_max, instance_type=node_type,
no_user_data=no_user_data)
ng.create()
@get.command(name='cluster')
@common_options
@click.pass_context
def get_cluster(ctx, name, region, verbosity):
"""Display the information about the EKS cluster's control plane.
"""
cp = ControlPlane(name, region=region)
ci = cp.query()
headers = ['NAME', 'ENDPOINT', 'VPC', 'SUBNETS']
print(tabulate([[ci.name, ci.endpoint, ci.vpc, ','.join(ci.subnets)]], headers, tablefmt='plain'))
@get.command(name='nodegroup', aliases=['nodegroups', 'ng'])
@common_options
@click.argument('node-group-names', nargs=-1)
@click.pass_context
def get_ng(ctx, name, region, verbosity, node_group_names):
"""Display one or more node groups by names.
If no node group names specified, ekscli will display all node groups in the current EKS cluster
"""
cp = ControlPlane(name, region=region)
ci = cp.query()
if node_group_names:
ngis = [NodeGroup(name, ci).query().to_list() for name in node_group_names]
else:
stacks = cp.get_all_nodegroup_stacks()
ngis = [NodeGroup(name, ci).query(s).to_list() for (name, s) in list(iteritems(stacks))]
headers = ['NAME', 'INSTANCETYPE', 'MIN', 'MAX', 'ROLE']
print(tabulate(ngis, headers, tablefmt='plain'))
@delete.command(name='cluster')
@common_options
@click.confirmation_option('--yes', '-y', help='Are you sure to delete this cluster and associated node groups?')
@click.pass_context
def delete_cluster(ctx, name, region, verbosity):
"""Delete an EKS cluster (including its node groups)"""
cp = ControlPlane(name, region=region)
cp.delete()
@delete.command(name='nodegroup', aliases=['ng'])
@common_options
@click.option('--node-name', required=True, help='The node group name.')
@click.option('--kubeconf', type=str,
help='Kubernetes config file; if not set, KUBECONFIG or ~/.kube/config will be used.')
@click.confirmation_option('--yes', '-y', help='Are you sure to delete this node group?')
@click.pass_context
def delete_nodegroup(ctx, name, region, verbosity, node_name, kubeconf):
"""Delete an EKS node grup"""
ng = NodeGroup(node_name, ClusterInfo(name), region=region, kubeconf=kubeconf)
ng.delete()
def cli():
try:
eks()
except Exception as e:
click.echo('Error: {}'.format(e))
return 1
if __name__ == "__main__":
sys.exit(cli())
``` |
{
"source": "jpzhangvincent/MobileAppRecommendSys",
"score": 2
} |
#### File: webscraping/appcrawl/random_user_agent.py
```python
from appcrawl.settings import USER_AGENT_LIST
import random
class RandomUserAgentMiddleware(object):
def process_request(self, request, spider):
ua = random.choice(USER_AGENT_LIST)
if ua:
request.headers.setdefault('User-Agent', ua)
```
#### File: appcrawl/spiders/appstore.py
```python
import scrapy
from scrapy.selector import Selector
from scrapy.spiders import CrawlSpider
from appcrawl.items import AppItem
import datetime
class AppstoreSpider(CrawlSpider):
name = 'appstore'
allowed_domains = ['itunes.apple.com']
start_urls = ['http://itunes.apple.com/us/genre/ios-books/id6018?mt=8',
'http://itunes.apple.com/us/genre/ios-business/id6000?mt=8',
'http://itunes.apple.com/us/genre/ios-catalogs/id6022?mt=8',
'http://itunes.apple.com/us/genre/ios-education/id6017?mt=8',
'http://itunes.apple.com/us/genre/ios-entertainment/id6016?mt=8',
'http://itunes.apple.com/us/genre/ios-finance/id6015?mt=8',
'http://itunes.apple.com/us/genre/ios-food-drink/id6023?mt=8',
'http://itunes.apple.com/us/genre/ios-games/id6014?mt=8',
'http://itunes.apple.com/us/genre/ios-health-fitness/id6013?mt=8',
'http://itunes.apple.com/us/genre/ios-lifestyle/id6012?mt=8',
'http://itunes.apple.com/us/genre/ios-medical/id6020?mt=8',
'http://itunes.apple.com/us/genre/ios-music/id6011?mt=8',
'http://itunes.apple.com/us/genre/ios-navigation/id6010?mt=8',
'http://itunes.apple.com/us/genre/ios-news/id6009?mt=8',
'http://itunes.apple.com/us/genre/ios-newsstand/id6021?mt=8',
'http://itunes.apple.com/us/genre/ios-photo-video/id6008?mt=8',
'http://itunes.apple.com/us/genre/ios-productivity/id6007?mt=8',
'http://itunes.apple.com/us/genre/ios-reference/id6006?mt=8',
'https://itunes.apple.com/us/genre/ios-shopping/id6024?mt=8',
'http://itunes.apple.com/us/genre/ios-social-networking/id6005?mt=8',
'http://itunes.apple.com/us/genre/ios-sports/id6004?mt=8',
'https://itunes.apple.com/us/genre/ios-stickers/id6025?mt=8',
'http://itunes.apple.com/us/genre/ios-travel/id6003?mt=8',
'http://itunes.apple.com/us/genre/ios-utilities/id6002?mt=8',
'http://itunes.apple.com/us/genre/ios-weather/id6001?mt=8'
]
def parse(self, response):
hxs = Selector(response)
for href in hxs.xpath('//div[contains(@class,"column")]/ul/li/a/@href'):
url = href.extract()
yield scrapy.Request(url, callback=self.parse_app)
def parse_app(self, response): #parse_app
hxs = Selector(response)
i = AppItem()
i['name'] = hxs.xpath('//div/div/h1/text()').extract_first()
i['url'] = response.url
i['id'] = response.url.split('/')[-1].split('?')[0][2:]
i['category'] = hxs.xpath('//div[@id="left-stack"]/div[1]/ul/li[2]/a/span/text()').extract_first()
try:
i['description'] = " ".join(hxs.xpath('//div[@id="content"]/div/div[2]/div[1]/p/text()').extract()).encode('ascii','ignore').strip()
except:
i['description'] = None
try:
i['new_version_desc'] = " ".join(hxs.xpath('//div[@id="content"]/div/div[2]/div[3]/p/text()').extract()).encode('ascii','ignore').strip()
except:
i['new_version_desc'] = None
i['update_date'] = hxs.xpath('//div[@id="left-stack"]/div[1]/ul/li[3]/span[2]/text()').extract_first()
i['publish_date'] = hxs.xpath('//div[@id="left-stack"]/div[1]/ul/li[3]/span[2]/@content').extract_first()
i['scrape_date'] = datetime.date.today().isoformat()
i['price'] = hxs.xpath('//div[@id="left-stack"]/div[1]/ul/li[1]/span/div/text()').extract_first()
i['version'] = hxs.xpath('//div[@id="left-stack"]/div[1]/ul/li[4]/span[2]/text()').extract_first()
i['size'] = hxs.xpath('//div[@id="left-stack"]/div[1]/ul/li[5]/text()').extract_first()
i['seller'] = hxs.xpath('//div[@id="left-stack"]/div[1]/ul/li[7]/span[2]/span/text()').extract_first()
try:
i['is_InAppPurcased'] = int('In-App' in hxs.xpath('//div[@id="left-stack"]/div[3]/h4/text()').extract_first())
except:
i['is_InAppPurcased'] = 0
try:
lang_str = hxs.xpath('//*[@id="left-stack"]/div[1]/ul/li[6]/text()').extract_first().split(',')
i['is_multilingual'] = int(len(lang_str) > 1)
except:
i['is_multilingual'] = 0
try:
platform_str = int(hxs.xpath('//*[@id="left-stack"]/div[1]/div[1]/span[2]/text()').extract_first())
i['is_multiplatform'] = int('and' in platform_str)
except:
i['is_multiplatform'] = 0
try:
i['current_rating'] = float(hxs.xpath('//div[@id="left-stack"]/div[2]/div[2]/span[1]/text()').extract_first())
except:
i['current_rating'] = None
try:
num_crating_str = hxs.xpath('//div[@id="left-stack"]/div[2]/div[2]/span[2]/text()').extract_first()
i['num_current_rating'] = int(num_crating_str.split(' ')[0])
except:
i['num_current_rating'] = None
try:
orating_str = hxs.xpath('//div[@id="left-stack"]/div[2]/div[4]/@aria-label').extract_first()
orating, num_orating_str = orating_str.split(',')
i['overall_rating'] = orating
i['num_overall_rating'] = int(num_orating_str.split(' ')[1])
except:
i['overall_rating'] = None
i['num_overall_rating'] = None
try:
review1_str = hxs.xpath('//div[@id="content"]/div/div[2]/div[5]/div[1]/p/text()').extract_first()
i['review1'] = review1_str.encode('ascii','ignore').strip()
review1_stat_str = hxs.xpath('//div[@id="content"]/div/div[2]/div[5]/div[1]/h5/div/@aria-label').extract_first()
i['review1_star'] = int(review1_stat_str.split(' ')[0])
except:
i['review1'] = None
i['review1_star'] = None
try:
review2_str = hxs.xpath('//div[@id="content"]/div/div[2]/div[5]/div[2]/p/text()').extract_first()
i['review2'] = review2_str.encode('ascii','ignore').strip()
review2_stat_str = hxs.xpath('//div[@id="content"]/div/div[2]/div[5]/div[2]/h5/div/@aria-label').extract_first()
i['review2_star'] = int(review2_stat_str.split(' ')[0])
except:
i['review2'] = None
i['review2_star'] = None
try:
review3_str = hxs.xpath('//div[@id="content"]/div/div[2]/div[5]/div[3]/p/text()').extract_first()
i['review3'] = review3_str.encode('ascii','ignore').strip()
review3_stat_str = hxs.xpath('//div[@id="content"]/div/div[2]/div[5]/div[3]/h5/div/@aria-label').extract_first()
i['review3_star'] = int(review3_stat_str.split(' ')[0])
except:
i['review3'] = None
i['review3_star'] = None
yield i
``` |
{
"source": "jpzwolak/QFlow-suite",
"score": 3
} |
#### File: QFlow-2.0/QFlow/Prepare_ML.py
```python
from tensorflow import data as tf_data
from tensorflow.keras import layers as tf_layers
from tensorflow.keras import Model as tf_Model
from tensorflow.keras.optimizers import Adam as tf_Adam
from QFlow import config
def input_fn(features, labels, shuffle=True, batch_size=64,
repeat=False, seed=None):
'''
A function for converting data into training/evaluation tf.Dataset
inputs:
features: np.ndarray containing features.
labels : np.ndarray containing labels for all examples.
shuffle : bool indicates whether to shuffle the dataset.
batch_size : int indicating the desired batch size.
repeat: bool specifying whether to repeat dataset.
seed: int seed used for shuffling dataset.
outputs:
ds : dataset ready for training/evaluation
'''
# Convert the inputs to a Dataset.
shuffle_buffer_size = 100
ds = tf_data.Dataset.from_tensor_slices((features, labels))
if shuffle:
ds = ds.shuffle(shuffle_buffer_size, seed=seed).batch(batch_size)
else:
ds = ds.batch(batch_size)
if repeat:
ds = ds.repeat()
return ds
def create_model(model_type='state_estimator',
model_opt='best_noise_opt'):
'''
inputs:
model_type: str specifying either 'state_estimator' or
'quality_control' type machine learning model.
model_opt: str specifying dataset the model parameters were optimized
on. Valid options for 'state_estimator' model_type:
'noiseless_opt' or 'best_noise_opt'. Valid options for
'quality_control' type: 'uniform_noise_dist_opt'.
'''
valid_model_types = ['state_estimator','quality_control']
if model_type not in valid_model_types:
raise ValueError(
'model_type not recognized: ', model_type,
' Valid values: ', valid_model_types)
valid_model_opts = {
'state_estimator': ['noiseless_opt', 'best_noise_opt'],
'quality_control': ['uniform_noise_dist_opt']}
if model_opt not in valid_model_opts[model_type]:
raise ValueError(
'model_opt not recognized: ', model_opt,
' Valid values: ', valid_model_opts[model_type])
if model_type=='state_estimator' and model_opt=='best_noise_opt':
lr = 1.21e-3
k_size = [[7, 7], [7, 7]]
cnn_maxpool = False
cnn_stack = 2
n_cnn = 2
# these lists should be length n_cnn
n_filters = [[22, 22], [35, 35]]
drop_rates = [[0.655,0.655], [0.194, 0.194]]
layer_norm = False
ave_pool = True
activation='relu'
dense_n = 0
elif model_type=='state_estimator' and model_opt == 'noiseless_opt':
lr = 3.45e-3
k_size = [[5], [5], [5]]
cnn_maxpool=False
cnn_stack = 1
n_cnn = 3
n_filters = [[23], [7], [18]]
drop_rates = [[0.12], [0.28], [0.30]]
layer_norm = True
ave_pool = True
activation = 'relu'
dense_n = 0
elif model_type=='quality_control' and model_opt=='uniform_noise_dist_opt':
lr = 2.65e-4
k_size = [[7, 3]]
cnn_maxpool = True
cnn_stack = 2
n_cnn = 1
n_filters = [[184, 249]]
drop_rates = [[0.05, 0.0]]
layer_norm = True
ave_pool = True
activation='swish'
dense_n = 1
dense_dropout = [0.6]
dense_units = [161]
# set stride to 2 if not using maxpool as size reduction
if cnn_maxpool:
cnn_stride=1
else:
cnn_stride=2
# input layer
inputs = tf_layers.Input(shape=(config.SUB_SIZE,config.SUB_SIZE,1))
x = inputs
for i in range(n_cnn):
for j in range(cnn_stack):
if j==cnn_stack-1:
stride = cnn_stride
else:
stride=1
x = tf_layers.Conv2D(
filters=n_filters[i][j],
kernel_size=k_size[i][j],
padding='same',
strides=stride)(x)
x = tf_layers.Dropout(rate=drop_rates[i][j])(x)
if layer_norm:
x = tf_layers.LayerNormalization()(x)
x = tf_layers.Activation(activation)(x)
if cnn_maxpool:
x = tf_layers.MaxPooling2D(pool_size=(2,2), strides=2)(x)
if ave_pool:
x = tf_layers.GlobalAvgPool2D()(x)
x = tf_layers.Flatten()(x)
for i in range(dense_n):
x = tf_layers.Dense(units=dense_units[i],activation=activation)(x)
x = tf_layers.Dropout(rate=dense_dropout[i])(x)
if model_type=='state_estimator':
outputs = tf_layers.Dense(
units=config.NUM_STATES, activation='softmax')(x)
model = tf_Model(inputs, outputs,
name='device_state_estimator_'+model_opt)
model.compile(
optimizer=tf_Adam(learning_rate=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
elif model_type=='quality_control':
outputs = tf_layers.Dense(
units=config.NUM_QUALITY_CLASSES, activation='softmax')(x)
model = tf_Model(
inputs=inputs, outputs=outputs,
name='data_quality_control_'+model_opt)
model.compile(
optimizer=tf_Adam(learning_rate=lr),
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
```
#### File: QFlow-2.0/QFlow/Process_Data.py
```python
import numpy as np
import random
from scipy.stats import skew as scipy_skew
from skimage.transform import resize as skimage_resize
from QFlow import config
## set of functions for loading and preparing a dataset for training.
def get_num_min_class(labels):
'''
Get the number of the minimum represented class in label vector.
Used for resampling data.
input:
labels: np.ndarray of labels
outputs:
num_samples: int number of samples for minimum class
'''
# use argmax as example's class
argmax_labels = np.argmax(labels, axis=-1)
# max of num_samples is all one label
num_samples = labels.shape[0]
for i in range(labels.shape[-1]):
lab_elems = np.sum(argmax_labels==i)
if lab_elems < num_samples:
num_samples = lab_elems
return num_samples
def resample_data(features, state_labels, labels=None, seed=None):
'''
Resample data to be evenly distributed across classes in labels by cutting
number of examples for each class to be equal to the number of examples
in the least represented class. (classes assumed to be last axis of
labels). Shuffles after resampling.
inputs:
features: ndarray of features to be resampled. Resample along first axis.
state_labels: ndarray of labels to be used for resampling
labels: ndarray of labels to be resampled.
return_state: bool specifying whether to return state labels
seed: Seed of random number generator for shuffling idxs during resample
and for shuffling resampled features and labels.
outputs:
features: list of resampled features
labels: list of resampled labels
'''
rng = np.random.default_rng(seed)
num_samples = get_num_min_class(state_labels)
features_resamp = []; state_labels_resamp = []; labels_resamp = []
for i in range(state_labels.shape[-1]):
s_idxs = state_labels.argmax(axis=-1)==i
# first get full array of single state
features_s_full = features[s_idxs]
state_labels_s_full = state_labels[s_idxs]
if labels is not None:
labels_s_full = labels[s_idxs]
# then get idxs (0-length), shuffle, and slice to num_samples
# shuffle idxs to be sure labels and features are shuffled together
idxs = list(range(features_s_full.shape[0]))
rng.shuffle(idxs)
features_resamp.append(features_s_full[idxs[:num_samples]])
state_labels_resamp.append(state_labels_s_full[idxs[:num_samples]])
if labels is not None:
labels_resamp.append(labels_s_full[idxs[:num_samples]])
features_resamp_arr = np.concatenate(features_resamp, axis=0)
state_labels_resamp_arr = np.concatenate(state_labels_resamp, axis=0)
if labels is not None:
labels_resamp_arr = np.concatenate(labels_resamp, axis=0)
idxs = list(range(features_resamp_arr.shape[0]))
rng.shuffle(idxs)
if labels is not None:
return features_resamp_arr[idxs], labels_resamp_arr[idxs]
elif labels is None:
return features_resamp_arr[idxs], state_labels_resamp_arr[idxs]
def noise_mag_to_class(state_labels, noise_mags,
low_thresholds=None, high_thresholds=None):
'''
Function to convert noise magnitudes to noise classes.
Noise class thresholds are defined here. Thresholds for states
order is: no dot, left dot, central dot, right dot, double dot
Default low thresholds is the linear extrapolation to 100 % accuracy
of an average noisy-trained model vs. noise_mag. Default high
thresholds are from linear extrapolation to 0 % accuracy of an
average noisy trained model vs. noise_mag.
inputs:
state_labels: list of state labels. shape assumed to be
(num_examples, num_states).
noise_mags: list of float noise_mags for state_labels. shape assumed
to be (num_examples, ).
low_thresholds: list of floats of shape (num_state, ) specifying
high signal to noise class thresholds.
high_thresholds: list of floats of shape (num_state, ) specifying
high signal to noise class thresholds.
'''
# set number of noise classes and states.
# length of thresholds must be equal to num_states.
# no num_quality_classes != 3 are supported.
num_quality_classes = config.NUM_QUALITY_CLASSES
num_states = config.NUM_STATES
# set default thresholds
if high_thresholds is None:
high_thresholds = [1.22, 1.00, 1.21, 0.68, 2.00]
if low_thresholds is None:
low_thresholds = [0.31, 0.32, 0.41, 0.05, 0.47]
low_thresholds = np.array(low_thresholds)
high_thresholds = np.array(high_thresholds)
quality_classes = np.zeros(noise_mags.shape+(num_quality_classes,))
# use fractional labels by taking weighted average after
# applying thresholds
num_states = state_labels.shape[-1]
# get per state classes then sum across last axis later
per_state_classes = np.zeros(
noise_mags.shape + (num_quality_classes,) + (num_states,))
# use boolean indexing to define classes from noise mags/threshold arrays
for i in range(num_states):
per_state_classes[noise_mags <= low_thresholds[i],0, i] = 1
per_state_classes[(noise_mags > low_thresholds[i]) &\
(noise_mags <= high_thresholds[i]), 1, i] = 1
per_state_classes[noise_mags > high_thresholds[i], 2, i] = 1
# multiply each first axis element then sum across last axes
quality_classes = np.einsum('ijk,ik->ij', per_state_classes, state_labels)
return quality_classes
def get_data(f, train_test_split=0.9,
dat_key='sensor', label_key='state',
resample=True, seed=None,
low_thresholds=None, high_thresholds=None):
'''
Reads in the subregion data and converts it to a format useful for training
Note that the data is shuffled after reading in.
inputs:
f: one of:
str path to .npz file containing cropped data
dict of cropped data.
train_test_split: float fraction of data to use for training.
resample: bool specifying whether to resample data to get even state
representation.
seed: int random seed for file shuffling.
label_key: string key for data used for the label. One of:
'data_quality', 'noise_mag_factor', 'state'.
low_threshold: list of noise levels to use for high/moderate signal
to noise ratio threshold.
high_threshold: list of noise levels to use for moderate/low signal
to noise ratio threshold.
outputs:
train_data: np.ndarray of training data.
train_labels: np.ndarray of training labels.
eval_data: np.ndarray of training data.
eval_labels: np.ndarray of training labels.
'''
# treat f as path, or if TypeError treat as dict.
try:
dict_of_dicts = np.load(f, allow_pickle = True)
file_on_disk = True
except TypeError:
dict_of_dicts = f
file_on_disk = False
files = list(dict_of_dicts.keys())
random.Random(seed).shuffle(files)
inp = []
oup_state = []
# if we want a nonstate label load it so we can resample
if label_key!='state':
oup_labels = []
else:
oup_labels = None
train_labels = None
eval_labels = None
# if label is noise class, we need to get noise mag labels first
# then process to turn the mag into a class label
if label_key == 'data_quality':
data_quality = True
label_key = 'noise_mag_factor'
else:
data_quality = False
for file in files:
# for compressed data, file is the key of the dict of dicts
if file_on_disk:
data_dict = dict_of_dicts[file].item()
else:
data_dict = dict_of_dicts[file]
dat = data_dict[dat_key]
# generates a list of arrays
inp.append(dat.reshape(config.SUB_SIZE,config.SUB_SIZE,1))
oup_state.append(data_dict['state']) # generates a list of arrays
if oup_labels is not None:
oup_labels.append(data_dict[label_key])
inp = np.array(inp) # converts the list to np.array
oup_state = np.array(oup_state) # converts the list to np.array
if oup_labels is not None:
oup_labels = np.array(oup_labels)
# split data into train and evaluatoin data/labels
n_samples = inp.shape[0]
print("Total number of samples :", n_samples)
n_train = int(train_test_split * n_samples)
train_data = inp[:n_train]
print("Training data info:", train_data.shape)
train_states = oup_state[:n_train]
if oup_labels is not None:
train_labels = oup_labels[:n_train]
eval_data = inp[n_train:]
print("Evaluation data info:", eval_data.shape)
eval_states = oup_state[n_train:]
if oup_labels is not None:
eval_labels = oup_labels[n_train:]
# convert noise mag to class before resampling/getting noise mags if
# needed because resampling doesnt return state labels
if data_quality:
train_labels = noise_mag_to_class(
train_states, train_labels,
low_thresholds=low_thresholds,
high_thresholds=high_thresholds,
)
eval_labels = noise_mag_to_class(
eval_states, eval_labels,
low_thresholds=low_thresholds,
high_thresholds=high_thresholds,
)
# resample to make state representation even
if resample:
train_data, train_labels = resample_data(
train_data, train_states, train_labels)
eval_data, eval_labels = resample_data(
eval_data, eval_states, eval_labels)
elif not resample and label_key=='state':
train_labels = train_states
eval_labels = eval_states
# expand dim of labels to make sure that they have proper shape
if oup_labels is not None and len(train_labels.shape)==1:
np.expand_dims(train_labels, 1)
if oup_labels is not None and len(eval_labels.shape)==1:
np.expand_dims(eval_labels, 1)
return train_data, train_labels, eval_data, eval_labels
## preprocess functions
def gradient(x):
'''
Take gradient of an ndarray in specified direction. Thin wrapper around
np.gradient(). Also note that x -> axis=1 and y-> axis=0
input:
x: An numpy ndarray to take the gradient of
output:
numpy ndarray containing gradient in x direction.
'''
return np.gradient(x, axis=1)
def apply_threshold(x, threshold_val=10, threshold_to=0):
'''
Thresholds an numpy ndarray to remove
Args:
x = numpy array with data to be filtered
threshold_val = percentile below which to set values to zero
'''
x[x < np.abs(np.percentile(x.flatten(),threshold_val))] = threshold_to
return x
def apply_clipping(x, clip_val=3, clip_to='clip_val'):
'''
Clip input symmetrically at clip_val number of std devs.
Do not zscore norm x, but apply thresholds using normed x
'''
x_clipped = np.copy(x)
mean = np.mean(x)
std = np.std(x)
norm_x = (x - mean) / std
# set clipped values to either the mean or clip threshold
if clip_to.lower() == 'clip_val':
x_clipped[norm_x < -clip_val] = -clip_val * std + mean
x_clipped[norm_x > clip_val] = clip_val * std + mean
elif clip_to.lower() == 'mean':
x_clipped[norm_x < -clip_val] = mean
x_clipped[norm_x > clip_val] = mean
else:
raise KeyError('"clip_to" option not valid: ' +str(clip_to) +\
'Valid options: clip_val, mean')
return x_clipped
def autoflip_skew(data):
'''
Autoflip a numpy ndarray based on the skew of the values
(effective for gradient data).
'''
skew_sign = np.sign(scipy_skew(np.ravel(data)))
return data*skew_sign
def zscore_norm(x):
'''
Takes a numpy ndarray and returns a z-score normalized version
'''
return (x-x.mean())/x.std()
class Preprocessor():
def __init__(self, autoflip=False, denoising=[],
clip_val=None, thresh_val=None):
'''
Class for doing preprocessing of data.
inputs:
autoflip: bool specifying whether to autoflip data.
denoising: list of str specifying denoising to apply to data.
clip_val: value for clipping denoising. Unused if 'clip' not in
denoising.
thresh_val
'''
self.autoflip = autoflip
valid_denoising = ['threshold', 'clip']
if not set(denoising).issubset(valid_denoising):
raise ValueError(
'invalid denoising ', denoising,
' Valid values:', valid_denoising)
self.denoising = denoising
self.clip_val = clip_val
self.thresh_val = thresh_val
def proc_subimage(self, x):
'''
Takes the gradient of the measured data, applies denoising if specified,
normalizes, autoflips if specified,
and then adjusts the size (if necessary)
Args:
x = an array with data
'''
# take gradient
x = gradient(x)
# apply thresholding
if 'threshold' in self.denoising:
if self.threshold_val is not None:
grad_x = apply_threshold(x, self.threshold_val)
else:
grad_x = apply_threshold(x)
# apply clipping
if 'clip' in self.denoising:
if self.clip_val is not None:
grad_x = apply_clipping(grad_x, self.clip_val)
else:
grad_x = apply_clipping(grad_x)
# normalize with zscore normalization
x = zscore_norm(x)
# autoflip by skew of image gradient
if self.autoflip:
x = autoflip_skew(x)
target_shape = (config.SUB_SIZE, config.SUB_SIZE, 1)
if x.shape != target_shape:
x = skimage_resize(x, target_shape)
return x
def proc_subimage_set(self, x_arr):
'''
Loop through subimages and apply preprocessing to each one.
inputs:
x: full dataset of images. First axis assumed to be example index.
returns:
Full dataset of images with same shape, processed.
'''
return np.array([self.proc_subimage(x) for x in x_arr])
``` |
{
"source": "jpzwolak/quantum-ml",
"score": 3
} |
#### File: junk/dot_classifier_tf/potential.py
```python
import numpy as np
def gauss(x,mean=0.0,stddev=0.02,peak=1.0):
'''
Input:
x : x-coordintes
Output:
f(x) where f is a Gaussian with the given mean, stddev and peak value
'''
stddev = 5*(x[1] - x[0])
return peak*np.exp(-(x-mean)**2/(2*stddev**2))
def init_ndot(x,n_dot):
'''
Input:
x : 1d grid for the dots
ndot : number of dots
Output:
y : cordinates of the potential grid with ndots
The potential barriers are modelled as gaussians
'''
# n dots imply n+1 barriers
bar_centers = x[0] + (x[-1] - x[0])*np.random.rand(n_dot+1)
bar_heights = np.random.rand(n_dot+1)
#bar_heights = 0.5*np.ones(n_dot+1)
N = len(x)
y = np.zeros(N)
# no need to optimize here really since the dot number is generally small, the calculation of the gauss function is already done in a vectorised manner
for j in range(n_dot+1):
y += gauss(x-bar_centers[j],peak=bar_heights[j])
return y
```
#### File: junk/markov/thomas_fermi.py
```python
import numpy as np
# Coulomb interaction matrix
def calculate_K(E,N_dim,sigma = 1):
'''
Calculates the K matrix based on a power law with sigma added to prevent blowup
E : energy scale for the self interaction, K_ij = E/(sqrt(sigma^2 + (i-j)^2)
N_dim : size of the interaction matrix
sigma : parameter to prevent blowup for self-interaction, default = 1
'''
x = np.arange(N_dim)
K = E/np.sqrt((x[:,np.newaxis] - x)**2 + sigma**2)
return K
def solve_TF(mu_L1,mu_L2,N,V,K):
'''
Solves the TF equation V - mu + K n = 0 for mu_D and n along the N_D = N constraint
Linear system for V.size unknowns : vec(n) and mu_D
returns mu_D,vec(n)
'''
N_dim = V.size
# build up the LHS
A = K
a1 = -np.ones(N_dim)
a1[0] = 0
a1[N_dim-1] = 0
a2 = np.ones(N_dim+1)
a2[0] = 0
a2[N_dim-1] = 0
a2[N_dim] = 0
A = np.concatenate((A,a1[:,np.newaxis]),axis=1)
A = np.concatenate((A,[a2]))
# build up the RHS
b = -V
b[0] = b[0] + mu_L1
b[N_dim-1] = b[N_dim-1] + mu_L2
b = np.concatenate((b,[N]))
x = np.linalg.solve(A,b)
return x[N_dim],x[:N_dim]
def calculate_E_TF(mu_L1,mu_L2,mu_D,n,V,K):
'''
Calculates the Thomas-Fermi energy
E_TF = V n + 1/2 n K n
Note that it does not include mu, so this is actual energy and not the free energy
'''
N_dim = V.size
# constructing the chemical potential vector, = mu_L at the leads, and mu_D inside the dot
mu_vec = mu_D*np.ones(N_dim)
mu_vec[0] = mu_L1
mu_vec[-1] = mu_L2
E_TF = np.sum(V*n) + 0.5*np.sum(n*np.dot(K,n))
return E_TF
``` |
{
"source": "jq-77/leetcode",
"score": 4
} |
#### File: Python/Array/48_Rotate Image.py
```python
"""
You are given an n x n 2D matrix representing an image,
rotate the image by 90 degrees (clockwise).
You have to rotate the image in-place, which means you have to modify the input 2D matrix directly.
DO NOT allocate another 2D matrix and do the rotation.
"""
class Solution:
def rotate(self, matrix):
"""
Do not return anything, modify matrix in-place instead.
"""
# reverse matrix upside down
# select element from bottom to top
matrix[:] = [ [ row[i] for row in reversed(matrix)] for i in range(len(matrix)) ]
```
#### File: Python/Greedy/435_Non-overlapping_Intervals.py
```python
"""
Given an array of intervals intervals where intervals[i] = [starti, endi],
return the minimum number of intervals you need to remove to make the rest of the intervals non-overlapping.
"""
class Solution(object):
def eraseOverlapIntervals(self, intervals):
"""
:type intervals: List[List[int]]
:rtype: int
"""
intervals.sort(key=lambda x: x[0])
end = intervals[0][1]
rm = 0
for i in range(1, len(intervals)):
if intervals[i][0] >= end:
end = intervals[i][1]
else:
rm += 1
end = min(end, intervals[i][1]) #保留end更小的interval,给后面留更多空间
return rm
```
#### File: Python/Stack/155_Min Stack.py
```python
"""
Design a stack that supports push, pop, top, and retrieving the minimum element in constant time.
Implement the MinStack class:
MinStack() initializes the stack object.
void push(val) pushes the element val onto the stack.
void pop() removes the element on the top of the stack.
int top() gets the top element of the stack.
int getMin() retrieves the minimum element in the stack.
"""
class MinStack(object):
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
def push(self, val):
"""
:type val: int
:rtype: None
"""
self.stack.append(val)
def pop(self):
"""
:rtype: None
"""
self.stack.pop()
def top(self):
"""
:rtype: int
"""
return self.stack[-1]
def getMin(self):
"""
:rtype: int
"""
return min(self.stack)
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(val)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
```
#### File: Python/Two Pointers/167_Two_Sum_II.py
```python
"""
Given an array of integers numbers that is already sorted in non-decreasing order,
find two numbers such that they add up to a specific target number.
Return the indices of the two numbers (1-indexed) as an integer array answer of size 2,
where 1 <= answer[0] < answer[1] <= numbers.length.
The tests are generated such that there is exactly one solution.
You may not use the same element twice.
"""
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
i = 1
j = len(nums)
while i < j:
total = nums[i-1] + nums[j-1]
if total == target:
break
elif total < target:
i += 1
else:
j -= 1
return [i, j]
```
#### File: Python/Two Pointers/two_sum.py
```python
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
i = 0
while i < len(nums) - 1:
j = i+1
while j < len(nums):
if nums[i] +nums[j] == target:
output = [i, j]
break
j += 1
i += 1
return output
``` |
{
"source": "JQChong/confessionbackend",
"score": 2
} |
#### File: confessionbackend/comment/views.py
```python
import random
from rest_framework import generics, status
from comment.models import Comment
from comment.serializers import CommentSerializer
from rest_framework.response import Response
from confessionbackend.paginationsettings import PaginationSettings
from rest_framework_simplejwt.authentication import JWTAuthentication
class CommentList(generics.ListCreateAPIView):
serializer_class = CommentSerializer
pagination_class = PaginationSettings
def get_queryset(self):
approved = self.request.GET.get('approved', None)
post_id = self.request.GET.get('post_id', None)
ordering = self.request.GET.getlist('order_by', None)
current_query_set = Comment.objects.all()
if approved is not None:
current_query_set = current_query_set.filter(approved=approved)
if post_id is not None:
current_query_set = current_query_set.filter(post_id=post_id)
if ordering is not None:
test_obj = current_query_set.first()
"""
in the event that someone decided to meddle around with the query parameters,
the list will be sorted by the default way, i.e. by likes and time created
in descending order.
"""
def check_all_attr(obj, arr):
for attr in arr:
if getattr(obj, attr, None) is None:
return False
return True
if check_all_attr(test_obj, ordering):
current_query_set = current_query_set.order_by(*ordering)
else:
current_query_set = current_query_set.order_by('-likes', '-time_created')
else:
current_query_set = current_query_set.order_by('-likes', '-time_created')
return current_query_set
def create(self, request, *args, **kwargs):
if 'approved' in self.request.data:
return Response({'message': 'Object should not contain approved flag.'}, status=status.HTTP_403_FORBIDDEN)
return super().create(request, *args, **kwargs)
def perform_create(self, serializer):
instance = serializer.save()
data = self.request.data
if data['poster'] == 'Anonymous':
number = str(random.randint(1000, 9999))
anonId = number[:2] + str(instance.id) + number[2:]
serializer.save(poster='Anonymous#' + anonId)
class CommentDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Comment.objects.all()
serializer_class = CommentSerializer
def update(self, request, *args, **kwargs):
if self.request.method == 'PUT':
return Response({'message': "METHOD NOT ALLOWED"}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
if self.request.method == 'PATCH':
if 'approved' in self.request.data and not JWTAuthentication().authenticate(self.request):
return Response({'message': 'ILLEGAL OPERATION'}, status=status.HTTP_401_UNAUTHORIZED)
return super().update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
if not JWTAuthentication().authenticate(self.request):
return Response({'message': 'ILLEGAL OPERATION'}, status=status.HTTP_401_UNAUTHORIZED)
return super().destroy(request, *args, **kwargs)
``` |
{
"source": "JQGoh/multivariate_time_series_pipeline",
"score": 2
} |
#### File: src/features/make_features.py
```python
import cloudpickle
import numpy as np
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from src.enums import DataMetadata, FilePathEnum
from src.features.custom_transformers import (
DataFrameTransformer,
DatetimeFeatureTransformer,
TransformWithFeatureNamesFactory,
)
from src.features.tsfresh_transformers import (
RollingLagsTrasformer,
TSFreshRollingTransformer,
)
def main():
mock_data = pd.read_csv(
FilePathEnum.MOCK_DATA,
infer_datetime_format=DataMetadata.DATETIME_FORMAT,
parse_dates=[DataMetadata.DATETIME],
)
# Take 10% of most recent data as test set
grouped = mock_data.groupby(DataMetadata.ID)
max_row_counts = grouped.count()[DataMetadata.TARGET].max()
test_row_count = int(max_row_counts * 0.1)
test = grouped.tail(test_row_count)
train = mock_data[~mock_data.index.isin(test.index)]
# raw test set saved for later use
test.to_csv(FilePathEnum.TEST_DATA, index=None)
transformer_factory = TransformWithFeatureNamesFactory()
transformer_factory.register_format("SimpleImputer", SimpleImputer)
print("Setting imputer, datetime pipeline to impute input feature values")
pre_imputer_class = "Pre-MedianImputer"
derive_datetime_class = "DeriveDatetime"
impute_and_datetime_transformer = ColumnTransformer(
transformers=[
(
pre_imputer_class,
transformer_factory.get_transformer("SimpleImputer")(
names=DataMetadata.NUMERIC_FEATURES,
missing_values=np.nan,
strategy="median",
),
DataMetadata.NUMERIC_FEATURES,
),
(
derive_datetime_class,
DatetimeFeatureTransformer(),
DataMetadata.DATETIME,
),
],
verbose=True,
remainder="passthrough",
)
impute_and_datetime_pipeline = Pipeline(
steps=[
("ImputeAndDatetime", impute_and_datetime_transformer),
],
)
train_datetime = impute_and_datetime_pipeline.fit_transform(train)
datetime_transformed_names = impute_and_datetime_pipeline.get_feature_names_out()
print(
"Setting multivariate time series pipeline that includes Tsfresh derived rolling features"
)
features_for_rolling = [
feature
for feature in datetime_transformed_names
if feature.startswith(pre_imputer_class)
]
# one-hot encoded applied to hour component only
one_hot_features = [derive_datetime_class + "__hour"]
kind_to_fc_params = {
feature: {
"median": None,
"maximum": None,
"minimum": None,
"c3": [{"lag": 10}],
}
for feature in features_for_rolling
}
tsfresh_rolling_class = "TSFreshRolling"
rolling_lags_class = "RollingLags"
tsfresh_pipeline = Pipeline(
steps=[
(
"CastToDataFrame",
DataFrameTransformer(columns=datetime_transformed_names),
),
(
"tsfresh",
ColumnTransformer(
transformers=[
(
tsfresh_rolling_class,
TSFreshRollingTransformer(
input_column_names=features_for_rolling,
kind_to_fc_parameters=kind_to_fc_params,
rolling_window_size=30,
column_id="remainder" + "__" + DataMetadata.ID,
column_sort=derive_datetime_class
+ "__"
+ DataMetadata.DATETIME,
),
features_for_rolling
+ [
derive_datetime_class + "__" + DataMetadata.DATETIME,
"remainder" + "__" + DataMetadata.ID,
],
),
(
rolling_lags_class,
RollingLagsTrasformer(
input_column_names=features_for_rolling,
rolling_window_size=30,
column_id="remainder" + "__" + DataMetadata.ID,
column_sort=derive_datetime_class
+ "__"
+ DataMetadata.DATETIME,
orders=[1, 2, 3],
),
features_for_rolling
+ [
derive_datetime_class + "__" + DataMetadata.DATETIME,
"remainder" + "__" + DataMetadata.ID,
],
),
(
"OneHot",
OneHotEncoder(),
one_hot_features,
),
(
"PassThrough",
"passthrough",
[
derive_datetime_class + "__" + DataMetadata.DATETIME,
"remainder" "__" + DataMetadata.ID,
],
),
],
verbose=True,
remainder="passthrough",
),
),
]
)
train_tsfresh = tsfresh_pipeline.fit_transform(train_datetime)
tsfresh_transformed_names = tsfresh_pipeline.get_feature_names_out()
print(
"Setting post time series derived features pipeline to impute any other "
"remaining missing feature values followed by standard scaling."
)
numeric_names_for_transform = [
col
for col in tsfresh_transformed_names
if col.startswith(tsfresh_rolling_class) or col.startswith(rolling_lags_class)
]
impute_scaler_pipeline = Pipeline(
steps=[
(
"Post-MedianImputer",
transformer_factory.get_transformer("SimpleImputer")(
names=numeric_names_for_transform,
missing_values=np.nan,
strategy="median",
),
),
(
"StandardScaler",
StandardScaler(),
),
]
)
post_process_pipeline = Pipeline(
steps=[
(
"DataFrameConverter",
DataFrameTransformer(columns=tsfresh_transformed_names),
),
(
"PostProcess",
ColumnTransformer(
transformers=[
(
"ImputeAndScaler",
impute_scaler_pipeline,
numeric_names_for_transform,
)
],
verbose=True,
remainder="passthrough",
),
),
]
)
train_post_processed = post_process_pipeline.fit_transform(train_tsfresh)
post_transformed_names = post_process_pipeline.get_feature_names_out()
print("Save the derived time series multivariate features")
train_df = pd.DataFrame(train_post_processed, columns=post_transformed_names)
train_df.to_csv(FilePathEnum.TRAIN_FEATURES, index=None)
# target column is in fact not transformed but can be loaded for later use
# similarly, record some other useful special columns
target_name = next(
name for name in post_transformed_names if name.endswith(DataMetadata.TARGET)
)
series_id = next(
name for name in post_transformed_names if name.endswith(DataMetadata.ID)
)
post_processed_datetime = next(
name for name in post_transformed_names if name.endswith(DataMetadata.DATETIME)
)
cloudpickle.dump(
(
impute_and_datetime_pipeline,
tsfresh_pipeline,
post_process_pipeline,
post_processed_datetime,
series_id,
target_name,
),
open(FilePathEnum.PIPELINE, "wb"),
)
if __name__ == "__main__":
main()
``` |
{
"source": "jqhoogland/randnn",
"score": 3
} |
#### File: randnn/networks/dales_law_nn.py
```python
from typing import Optional
import numpy as np
from .base_nn import BaseNN, MatrixInit
from ..weights import get_gaussian_weights
class DalesLawNN(BaseNN, MatrixInit):
def __init__(
self,
frac_excitatory: float = 0.5,
g_excitatory: float = 1.,
g_inhibitory: Optional[float] = None,
mu_excitatory: float = 0.,
balanced=True,
zero_sum=False,
n_dofs: int = 100,
**kwargs
):
"""
### Fraction excitatory/inhibitory
:param frac_excitatory ($f_E$ or just $f$): the fraction of neurons that are excitatory
:param frac_inhibitory ($f_I$): the fraction of neurons that are inhibitory
- This is fixed by ``frac_excitatory`` ($f_I = 1-f_E$)
### Variance of excitatory/inhibitory couplings
:param g_excitatory ($g_E$): the excitatory population's standard deviation
:param g_inhibitory ($g_I$): the inhibitory population's standard deviation
### Average of excitatory/inhibitory coupling strength
:param mu_excitatory ($\mu_E$): the average coupling strength of the excitatory neurons
:param mu_inhibitory ($\mu_I$): the average coupling strength of the inhibitory neurons
### Additional constraints
:param balanced: whether to set the *average* over all edges to zero.
If this is true, then ``mu_inhibitory`` is fixed by ``mu_excitatory``:
$$\mu_E f + \mu_I(1-f) = 0$$
:param zero_sum: whether to enforce *strict* input-output balance (not just on average).
If this is true, then:
$$\sum_{j=1}^n(J_{ij} - M_{ij}) = 0$$
### Matrices
:param coupling_matrix ($J$): The final coupling matrix.
Given by $$J= A \Sigma P + M$$
:param randomness_matrix ($A$): a normally distributed matrix of zero mean and unit variance,
:param variances_matrix ($\Sigma$): a diagonal matrix with the variance of neuron $i$ in index $i$.
- Its first $nf$ elements have value $\mu_E$.
- The remaining $n(1-f)$ elements have value $\mu_I$.
:param projection_matrix ($P$): which enforces the ``zero_sum`` constraint
- If not ``zero_sum``: $P$ is the identity matrix
- If ``zero_sum``: $P$ is a matrix of all ones with coefficient $1/n$
:param offset_matrix ($M$): which tracks the offset or average strength of edge $(i, j)$.
- Its first $nf$ elements have value $g_E$.
- The remaining $n(1-f)$ elements have value $g_I$.
"""
assert 0 < frac_excitatory < 1
if g_inhibitory is None:
g_inhibitory = g_excitatory
self.g_excitatory = g_excitatory
self.g_inhibitory = g_inhibitory
self.frac_excitatory = frac_excitatory
self.frac_inhibitory = 1. - frac_excitatory
self.n_excitatory = round(self.frac_excitatory * n_dofs)
self.n_inhibitory = round(self.frac_inhibitory * n_dofs)
self.mu_excitatory = mu_excitatory
self.mu_inhibitory = -mu_excitatory * frac_excitatory / (1. - frac_excitatory) if balanced else -mu_excitatory
self.balanced = balanced
self.zero_sum = zero_sum
super(BaseNN, self).__init__(n_dofs=n_dofs, **kwargs)
super(MatrixInit, self).__init__()
def __repr__(self):
return "<DalesLawNN n:{} t:{} g_e:{} g_i:{} f_e:{} f_i:{} mu_e:{} mu_i:{} seed:{}>".format(
self.n_dofs, self.timestep, self.g_excitatory, self.g_inhibitory, self.frac_excitatory,
self.frac_inhibitory, self.mu_excitatory, self.mu_inhibitory, self.network_seed
)
@property
def get_radius(self):
return np.sqrt(self.frac_excitatory * self.g_excitatory ** 2 + self.frac_inhibitory * self.g_inhibitory ** 2)
def gen_variances(self):
return np.diag([*[self.g_excitatory] * self.n_excitatory, *[self.g_inhibitory] * self.n_inhibitory])
def gen_randomness(self):
return get_gaussian_weights(self.n_dofs, 1.)
def gen_projection(self):
if self.zero_sum:
return np.eye((self.n_dofs, self.n_dofs)) - np.ones((self.n_dofs, self.n_dofs)) / self.n_dofs
return np.eye(self.n_dofs, self.n_dofs)
def gen_offset(self):
row = np.array([*([self.mu_excitatory] * round(self.n_excitatory)),
*([self.mu_inhibitory] * round(self.n_inhibitory))]).reshape((1, self.n_dofs))
return np.ones((self.n_dofs, 1)) @ row
```
#### File: randnn/topologies/dilution.py
```python
from typing import Optional
import numpy as np
def dilute_connectivity(n_dofs: int, sparsity: Optional[float]=None, self_interaction: bool=False):
"""
:param n_dofs: the dimension of the connectivity matrix.
:param sparsity: the sparsity coefficient.
:param self_interaction: whether to allow on-diagonal elements. TODO
"""
if sparsity is None:
return 1.
assert 0 <= sparsity <= 1., f"``sparsity`` must be greater than 0 or less than 1, is {sparsity}"
sparsity_mask = np.ones([n_dofs, n_dofs])
n_edges = int(n_dofs * (n_dofs + self_interaction - 1))
n_edges_deleted = round(n_edges * sparsity)
if self_interaction is False:
sparsity_mask[np.diag_indices_from(sparsity_mask)] = 0
indices = []
for i in range(n_dofs):
for j in range(n_dofs):
if i != j or self_interaction:
indices.append([i, j])
indices = np.array(indices)
assert indices.shape[0] == n_edges
diluted_indices = indices[np.random.choice(n_edges, size=n_edges_deleted, replace=False)]
for (i, j) in diluted_indices:
# There's definitely a cleverer array slicing way to do this
sparsity_mask[i, j] = 0
return sparsity_mask
```
#### File: randnn/topologies/fully_connected.py
```python
import numpy as np
def get_fully_connected_edges(n_dofs: int, self_connection: bool = False):
return np.ones((n_dofs, n_dofs)) - (1 - self_connection) * np.eye(n_dofs)
```
#### File: randnn/weights/gaussian.py
```python
import numpy as np
def get_gaussian_weights(
n_dofs: int, coupling_strength: float,
) -> np.ndarray:
"""
:param n_dofs: the number of nodes in the network
:param coupling_strength: the final couplings are drawn from a
normal distribution with variation $g^2/N$, where $g$ is the
coupling strength and $N$ is the number of nodes.
"""
strength_normalized = (coupling_strength / np.sqrt(n_dofs))
unit_matrix = np.random.normal(size=(n_dofs, n_dofs))
coupling_matrix = (strength_normalized * unit_matrix / np.std(unit_matrix))
return coupling_matrix
``` |
{
"source": "jqhoogland/rgpy",
"score": 3
} |
#### File: rgpy/samplers/metropolis_hastings_tf.py
```python
import collections
import functools
from typing import List, Callable, NamedTuple, Tuple
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from rgpy import visualize
#------------------------------------------------------------------------
#
# Base Lattice Kernel
#
#------------------------------------------------------------------------
class BaseLatticeKernel(tfp.mcmc.TransitionKernel):
def __init__(self,
n_spins: int,
name="lattice_kernel"):
self.n_spins = n_spins
self._name = name
sign_flippers = np.ones((n_spins, n_spins))
idxs = np.arange(n_spins)
sign_flippers[idxs, idxs] = -1
self.sign_flippers = tf.constant(sign_flippers, dtype=tf.float32)
def get_delta_energy(self,
current_state: List[tf.Tensor],
current_energy: tf.Tensor,
prime_state: List[tf.Tensor], *args):
"""
The energy change that would result from flipping spin i of
current_state (which has total energy current_energy).
Naive implementation calculates energy of the prime state
Args:
current_state: the immediate configuration of spins
current_energy: the corresponding energy
prime_state (List[tf.Tensor]): the configuration that would result
from flipping spin i
*args: further details about the flip
(for child classes to provide more efficient implementations)
Returns:
delta_energy (tf.Tensor): change in energy for flip of spin i
next_energy (tf.Tensor): energy of prime_state
"""
next_energy = self._get_energy(prime_state)
delta_energy = next_energy - current_energy
return delta_energy, next_energy
def get_energy(self, state):
raise NotImplementedError
def gen_possible_step(self, state):
""" Chooses a random index of a spin to flip in state.
Returns:
i (tf.Tensor): index of spin
prime_state (List[tf.Tensor]]): result of flipping spin i in state
"""
i = tf.random.uniform([], minval=0, maxval=self.n_spins, dtype=tf.int32)
sign_flipper = self.sign_flippers[i]
prime_state = sign_flipper * state
return i, prime_state
def _one_step(self,
current_state: List[tf.Tensor],
previous_kernel_results: List[tf.Tensor]) -> Tuple[
List[tf.Tensor], List[tf.Tensor]]:
"""Progress one step for one chain.
Each step only updates one element of state. Consider specifying
`num_steps_between_results` in tfp.mcmc.sample_chain as len(samplers) - 1
to obtain entirely new states for each result.
Args:
current_state: shape [n_spins]
previous_kernel_results: shape [1]
"""
# Previous kernel results contains the energy of the previous state
current_energy = previous_kernel_results
i, prime_state = self.gen_possible_step(current_state)
delta_energy, next_energy = self.get_delta_energy(current_state,
current_energy,
prime_state,
i)
def accept_flip():
"""
Returns:
prime_state: current state with spin i flipped
next_energy:
"""
nonlocal prime_state
nonlocal next_energy
#_prime_state = tf.Print(prime_state, [i], "Accepted flip: ")
#_prime_state = tf.Print(_prime_state, [prime_state], 'New state: ', summarize=10)
#_next_energy = tf.Print(next_energy, [delta_energy], 'With delta energy: ')
return [prime_state, next_energy]
def prob_flip():
# Update state if randomly generated value in [0,1) exceeds the
# relative probability
accept_threshold = tf.exp(-delta_energy)
accept_value = tf.random.uniform((1, ), dtype=tf.float32)[0]
#accept_threshold = tf.Print(accept_threshold, [accept_threshold], "Threshold to accept: ")
#accept_value = tf.Print(accept_value, [accept_value], "Random value: ")
is_accepted = tf.greater_equal(accept_threshold, accept_value)
#is_accepted = tf.Print(is_accepted, [is_accepted], "Probabilistic flip accepted? ")
reject = current_state
#reject = tf.Print(reject, [i], "Rejected flip: ")
return tf.cond(
is_accepted,
accept_flip,
lambda: [reject, current_energy])
# if delta energy <= 0, accept the flip
# else accept the flip with probability exp(-delta_energy)
[next_state, next_energy] = tf.cond(
tf.less_equal(delta_energy, tf.constant(0., dtype=tf.float32)),
accept_flip,
prob_flip)
# Kernel results keep track of the energy of the previous configuration
return next_state, next_energy
def one_step(self, current_state: List[tf.Tensor],
previous_kernel_results: List[tf.Tensor]) -> Tuple[
List[tf.Tensor], List[tf.Tensor]]:
"""Progress one step for each chain.
Each step only updates one element of state. Consider specifying
`num_steps_between_results` in tfp.mcmc.sample_chain as len(samplers) - 1
to obtain entirely new states for each result.
Args:
current_state: shape [n_chains, n_spins]
previous_kernel_results: shape [n_chains, 1]
"""
updates = self._one_step(current_state, previous_kernel_results)
return updates
def bootstrap_results(self, init_state: List[tf.Tensor]) -> List[tf.Tensor]:
"""Initiates results based off of initial state.
Args:
init_state: Initial state, usually specified in `current_state` of
tfp.mcmc.sample_chain. shape [n_chains, n_spins]
Returns:
Initial accumulated results to begin the chain. shape [n_chains, 1]
"""
return self.get_energy(init_state)
@property
def is_calibrated(self) -> bool:
return True
#------------------------------------------------------------------------
#
# Generic Lattice Kernel
#
#------------------------------------------------------------------------
class GenericLatticeKernel(BaseLatticeKernel):
def __init__(self,
n_spins: int,
energy_fn=lambda state: 0.,
name="rbm_sampling_kernel"):
"""Creates a kernel that can sample visible configurations for a trained
RBM
Args:
energy_fn: function that returns the energy of a lattice state
"""
self._get_energy = energy_fn
BaseLatticeKernel.__init__(self, n_spins, name)
def get_energy(self, state) -> tf.Tensor:
return self._get_energy(state)
def set_energy_fn(self,
energy_fn: Callable[[List[tf.Tensor]], tf.Tensor]) -> None:
self._get_energy = energy_fn
def generic_generator_graph(
energy_fn,
n_spins=8,
n_results_per_chain=10,
n_chains=1,
n_burnin_steps=None,
n_steps_between_results=None,
draw=True,
save=True):
"""
n_results = `n_results_per_chain` * `n_chains`
Generates n_results samples of an generic lattice system of `n_spins`
(helical boundary conditions)
Note:
This is a wrapper for tfp.mcmc.sample_chain, with modifications:
No arguments `chain_results` and `previous_kernel_results`
(each chain starts from a randomly initialized state)
This returns all results accumulated along the first axis
Currently does nothing with kernel_results. This may change.
Args:
`energy_fn`: the energy function
`n_spins`: the number of units
`n_results_per_chain`: number of results to generate per chain
`n_chain`: number of chains to run (in parallel).
Analagous to `parallel_iterations` in tfp.mcmc.sample_chain
`n_burnin_steps`: number of steps to let the system 'thermalize' before
taking the first result. (Default= `n_spins` ** 2)
`n_steps_between_results`: number of steps between results
(to reduce correlated outcomes). (Default= `n_spins` ** 2)
`draw`: whether to draw the samples (to png) after generation.
filename = 'J=<J>_h=<h>_lw=<lattice_width>.png'
Yields:
`results` (List[tf.Tensor]): newly-generated 2d ising samples
shape [`n_results`, `n_spins`]
"""
n_results = n_results_per_chain * n_chains
# TODO: more research into optimal #s for below to avoid autocorrelation
if n_burnin_steps is None:
n_burnin_steps = n_spins
if n_steps_between_results is None:
n_steps_between_results = n_spins
# shape [n_chains, n_spins]
init_state = tf.constant(np.random.choice(a=[-1., 1.],
size=(n_spins, )),
dtype=tf.float32)
generic_kernel = GenericLatticeKernel(n_spins,
energy_fn)
# Run the chain (with burn-in).
samples, kernel_results= tfp.mcmc.sample_chain(
num_results=n_results_per_chain,
num_burnin_steps=n_burnin_steps,
num_steps_between_results=n_steps_between_results,
current_state=init_state,
parallel_iterations=n_chains,
kernel=generic_kernel)
# Accumulate all results along first axis
samples = tf.reshape(samples, [-1, n_spins])
kernel_results = tf.reshape(kernel_results, [-1, 1])
return samples
```
#### File: rgpy/standard/block_rg.py
```python
import numpy as np
class BlockRGTransform(object):
def __init__(self,
block_size):
""" Initializes a class which performs standard RG on a given visible block.
Args:
n_visible: the number of visible units in a block
binary: the kinds of units
"""
self.block_size = block_size
# not even used. Perhaps we could hard-code more interesting rules
def transform(self, batch_x):
"""
Maps a visible configuration to a hidden configuration
Args:
batch_x: the visible configuration to transform shape [batch_size, n_visible]
"""
zeros = np.random.choice([-1, 1], batch_x.shape)
total = np.sign(np.sum(2 * batch_x - 1, axis=1))
total[total == 0] == zeros[total == 0]
return total
def transform_nonbinary(self, batch_x):
"""
Maps a visible configuration to a hidden configuration
Args:
batch_x: the visible configuration to transform
"""
zeros = np.random.choice([-1, 1], batch_x.shape)
total = np.sign(np.sum(batch_x, axis=1))
total[total == 0] == zeros[total == 0]
return total
```
#### File: rgpy/tests/test_rbm.py
```python
import os
from tfrbm.bbrbm import BBRBM
from tfrbm import visualize
try:
import _pickle as pickle
except:
import pickle
sample_descriptors = ['cold', ]
def get_samples(sample_descriptor):
filepath = './crit.samples.pkl'
# Load the file if restrictions have already been processed
with open(filepath, 'rb') as f:
samples = pickle.load(f)
return samples
lattice_width = 16
n_visible = 256
samples = get_samples(sample_descriptors[0])
visualize.draw_samples(samples, (lattice_width, lattice_width), (25, 40),
'crit.samples.png')
bbrbm = BBRBM(n_visible=n_visible, n_hidden=128, learning_rate=0.01, use_tqdm=True)
errs = bbrbm.fit(samples, n_epoches=100, batch_size=10)
new_samples = bbrbm.gibbs_sampler()
visualize.draw_samples(new_samples, (lattice_width, lattice_width), (25, 40),
'crit.gibbs_chains.png')
```
#### File: rgpy/rgpy/transformations.py
```python
import os, warnings, math
from functools import reduce
from PIL import Image
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import rgpy.samplers
import rgpy.visualize
from rgpy.rbms import *
from rgpy.util import log_timer
from rgpy.standard import BlockRGTransform
default_psi_rbm = {
"n_hidden":2,
"learning_rate": 0.025,
"lmbda":0.01,
"k":1,
"use_tqdm":True,
"binary": [-1, 1]
}
default_psi_rbm_fit = {
"n_epochs": 100,
"batch_size": 25,
}
default_theta_rbm = {
"n_hidden":10,
"learning_rate": 0.025,
"lmbda":0.01,
"k":1,
"use_tqdm":True,
"binary": [-1, 1]
}
default_theta_rbm_fit = {
"n_epochs": 100,
"batch_size": 25,
}
default_lambda_rbm = {
"n_hidden":1,
"lmbda":0.01,
"use_tqdm":True,
"binary": [-1, 1]
}
default_lambda_rbm_fit = {
"n_epochs": 100,
"batch_size": 800,
}
class RGTransform(object):
"""
Perform RG transformations on Lattice Steps.
Separates boilerplate for manipulating and storing files of samples
and the logic of transforming these samples.
Children of this class will include particular implementations like:
- Block-spin renormalization
- Real-space Mutual Information Maximization
"""
def __init__(self,
lattice_width=8,
block_size=2,
n_samples=1000):
"docstring"
self.n_samples = n_samples
self.lattice_width = lattice_width
self.lattice_shape = [lattice_width, lattice_width]
self.n_spins = self.lattice_width ** 2
self.block_size = block_size
# TODO: Generalize to any dimensions / sizes
self.n_v_spins = self.block_size ** 2
self.n_e_spins = 4 + 4 * 4
self.n_ve_spins = self.n_v_spins + self.n_e_spins
self.n_ve_samples_per_axis = self.lattice_width // self.block_size
self.n_blocks_per_sample = (self.n_ve_samples_per_axis) ** 2
self.n_ve_samples = self.n_samples * self.n_blocks_per_sample
def _transform(self, v_samples):
raise NotImplementedError
@log_timer("Generating samples of H (the coarse grained-system)")
def transform(self, v_samples):
h_samples_raw=self._transform(v_samples)
return h_samples_raw.reshape([self.n_samples, -1])
def _run_step(self, step):
""" By default assumes there is no need to load/train procedure
"""
[v, _, _] = step.get_load_or_create_restricted_samples()
step.set_h_samples(self.transform(v))
return step
def run_step(self, step, save=True, draw=True):
"""
Wrapper for _run_step which also allows for saving and drawing
Args:
step (LatticeStep): the step to perform RG on
"""
step = self._run_step(step)
if save:
step.save_h_samples()
if draw:
step.draw_h_samples()
return step.get_h_samples()
class MajorityRuleTransform(RGTransform):
def __init__(self,
lattice_width=8,
block_size=2,
n_samples=1000):
"""
"""
RGTransform.__init__(self, lattice_width, block_size, n_samples)
self.block_rg = BlockRGTransform(block_size)
def _transform(self, v_samples):
return self.block_rg.transform_nonbinary(v_samples)
# ------------------------------------------------------------------------------
# RSMI STEP
# ------------------------------------------------------------------------------
# TODO save_path/load_path= None replace with decorator
class RSMITransform(RGTransform):
def __init__(self,
lattice_width=8,
block_size=2,
n_samples=1000,
name=None,
settings_psi_rbm=default_psi_rbm,
settings_psi_rbm_fit=default_psi_rbm_fit,
settings_theta_rbm=default_theta_rbm,
settings_theta_rbm_fit=default_theta_rbm_fit,
settings_lambda_rbm=default_lambda_rbm,
settings_lambda_rbm_fit=default_lambda_rbm_fit):
# TODO: J may not be known beforehand (if we provide the step with x_samples).
# Allow J to take non-specified value
RGTransform.__init__(self, lattice_width, block_size, n_samples)
# Make a directory to keep all generated samples/trained models in
if name is None:
name = "rbms".format()
self.dir_path = name
if not os.path.exists(self.dir_path):
os.mkdir(self.dir_path)
# RBMS
self.psi_rbm = BBRBM(
n_visible=self.n_v_spins,
**settings_psi_rbm
)
self.theta_rbm = BBRBM(
n_visible=self.n_ve_spins,
**settings_theta_rbm
)
# TODO: load these as well?
self._psi_energy_fn=lambda v: self.psi_rbm.get_visible_energy(v)
self._theta_energy_fn = lambda v, e: (
self.theta_rbm.get_visible_energy(
tf.concat([tf.reshape(v, [2, self.n_v_spins]),
e], #tf.reshape(e [-1, self.n_e_spins])],
axis=1)))
self.lambda_rbm=RGRBM(
self._psi_energy_fn,
self._theta_energy_fn,
n_visible=self.n_v_spins,
n_environment=self.n_e_spins,
**settings_lambda_rbm
)
# For use by self.save_rbms(), self.load_rbms()
self.rbm_names = ["lambda", "psi", "theta"]
self.rbm_save_paths = {
"theta":self._wrap_path("theta/theta.ckpt"),
"psi": self._wrap_path("psi/psi.ckpt"),
"lambda":self._wrap_path("lambda/lambda.ckpt")
}
self.rbm_training_settings = {'lambda': settings_lambda_rbm_fit,
'psi': settings_psi_rbm_fit,
'theta':settings_theta_rbm_fit}
# For use by self.save_samples(), self.load_samples()
# Tracks information about whether relevant models have been trained and
# whether samples have been generated/processed
self._status = {
"lambda": False,
"psi": False,
"theta": False,
}
def _transform(self, v_samples):
""" Assumes lambda rbm (and thus theta-rbms) has already been trained.
v_samples [n_samples, 4] -> h_samples [n_samples, 1].
Reorganizing in configurations is the task of self.transform
"""
return self.lambda_rbm.transform_nonbinary(v_samples, True)
def _run_step(self, step):
"""
Args:
step (LatticeStep): the step to perform RG on
"""
[v, _, ve] = step.get_load_or_create_restricted_samples()
self.get_load_or_train_lambda_rbm(v, ve)
step.set_h_samples(self.transform(v))
return step
#
# HELPER METHODS TO CHECK STATUS: WHETHER RBMS HAVE BEEN LOADED
#
def get_status(self, attr):
""" Helper method for keeping track of whether RBMs have been trained
"""
return self._status[attr]
def set_status(self, attr, new_status=True):
self._status[attr] = new_status
def check_status(self, attr, warn=False, ignore=False):
"""Like get_status() but generates an error (warn=False) or warning
(warn=True) if status is False."""
status = self.get_status(attr)
if not status and not ignore:
notice = (("The attribute `{}` has not been changed." +
"Use `run()` to make sure all attributes are properly configured.")
.format(attr))
if warn:
warnings.warn(notice, RuntimeWarning)
else:
raise RuntimeError(notice)
return status
#
# UPDATE RBM WRAPPER FUNCTIONS
#
def _update_theta(self, save=True):
""" To be called whenever theta is updated by either loading or training theta.
"""
self.set_status('theta')
self.lambda_rbm.energy_theta_of_ve=self._theta_energy_fn
if save:
self.save_theta_rbm()
return self.theta_rbm
def _update_psi(self, save=True):
self.set_status('psi')
self.lambda_rbm.energy_psi_of_v=self._psi_energy_fn
if save:
self.save_psi_rbm()
return self.psi_rbm
def _update_lambda(self, save=True):
self.set_status('lambda')
if save:
self.save_lambda_rbm()
return self.lambda_rbm
def _train_rbm(self, rbm, name, data, **settings):
print_filters_dir = self._wrap_path("{}/filters/".format(name))
rbm.fit(data,
print_filters_dir=print_filters_dir,
**settings)
return rbm
#
# TRAIN METHODS
#
def train_theta_rbm(self, ve, **kwargs):
settings = default_theta_rbm_fit
settings.update(kwargs)
self._train_rbm(self.theta_rbm, 'theta', ve)
return self._update_theta()
def train_psi_rbm(self, v, **kwargs):
settings = default_psi_rbm_fit
settings.update(kwargs)
self._train_rbm(self.psi_rbm, 'psi', v)
return self._update_psi()
def train_lambda_rbm(self, v, ve, **kwargs):
settings = default_lambda_rbm_fit
settings.update(kwargs)
theta_rbm = self.get_load_or_train_theta_rbm(ve)
psi_rbm = self.get_load_or_train_psi_rbm(v)
self._train_rbm(self.lambda_rbm, 'lambda', ve, **kwargs)
return self._update_lambda()
#
# LOAD METHODS
#
def _load_path(self, name):
return self.rbm_save_paths[name]
def load_theta_rbm(self):
self.theta_rbm.load_weights(self._load_path('theta'), 'theta')
return self._update_theta()
def load_psi_rbm(self):
self.psi_rbm.load_weights(self._load_path('psi'), 'psi')
return self._update_psi()
def load_lambda_rbm(self):
self.lambda_rbm.load_weights(self._load_path('lambda'), 'lambda')
return self._update_lambda()
#
# GET METHODS
#
def get_theta_rbm(self, ignore=False):
self.check_status('theta', warn=True, ignore=False)
return self.theta_rbm
def get_psi_rbm(self, ignore=False):
self.check_status('psi', warn=True, ignore=False)
return self.psi_rbm
def get_lambda_rbm(self, ignore=False):
self.check_status('lambda', warn=True, ignore=False)
return self.lambda_rbm
def get_rbm_training_settings(self, name):
return self.rbm_training_settings[name]
#
# GET LOAD OR TRAIN METHODS
#
def get_load_or_train_theta_rbm(self, ve_samples):
rbm = self.get_theta_rbm(ignore=True)
if not self.get_status('theta'):
try:
rbm = self.load_theta_rbm()
except:
rbm = self.train_theta_rbm(ve_samples)
return rbm
def get_load_or_train_psi_rbm(self, v_samples):
rbm = self.get_psi_rbm(ignore=True)
if not self.get_status('psi'):
try:
rbm = self.load_psi_rbm()
except:
rbm = self.train_psi_rbm(v_samples)
return rbm
def get_load_or_train_lambda_rbm(self, v_samples, ve_samples):
"""
Args:
v_samples: possibly needed if psi rbm hasn't been trained
ve_samples:
"""
rbm = self.get_lambda_rbm(ignore=True)
if not self.get_status('lambda'):
try:
rbm = self.load_lambda_rbm()
except:
rbm = self.train_lambda_rbm(v_samples, ve_samples)
return rbm
#
# SAVE METHODS
#
def save(self):
for rbm_name in self.rbm_names:
self.check_status(rbm_name, True)
rbms = [self.psi_rbm, self.theta_rbm, self.psi_rbm]
for rbm, rbm_name, rbm_save_path in zip(rbms, self.rbm_names, self.rbm_save_paths):
rbm.save_weights(rbm_save_path, rbm_name)
def save_theta_rbm(self):
self.theta_rbm.save_weights(self.rbm_save_paths['theta'], 'theta')
def save_psi_rbm(self):
self.psi_rbm.save_weights(self.rbm_save_paths['psi'], 'psi')
def save_lambda_rbm(self):
self.lambda_rbm.save_weights(self.rbm_save_paths['lambda'], 'lambda')
def _wrap_path(self, path):
path = os.path.join(
self.dir_path,
path)
# Make all necessary paths
dir_path = path[:path.rfind("/")]
if ("/" in path) and (not os.path.exists(dir_path)):
os.makedirs(dir_path)
return os.path.abspath(path)
``` |
{
"source": "jqhoogland/sqlalchemy_authorize",
"score": 2
} |
#### File: jqhoogland/sqlalchemy_authorize/conftest.py
```python
import sys
from contextlib import contextmanager
import pytest
import sqlalchemy as sa
from flask import Flask, appcontext_pushed, g
from oso import Oso
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session
from sqlalchemy_oso import register_models
from sqlalchemy_authorize import OsoPermissionsMixin, BasePermissionsMixin
Base = declarative_base()
engine = create_engine('sqlite:///:memory:', echo=False)
sess = Session(engine)
# -- Models -------------------------------------------------------------------
class BaseUser(BasePermissionsMixin, Base):
__tablename__ = 'baseuser'
__repr_attrs__ = ['name']
__permissions__ = OsoPermissionsMixin.load_permissions(
# Public permissions
read=["id", "username"],
# Role-based permissions
self=[
# The user can provide ``username`` and ``fullname``
# to ``__init__`` (as keyword args) and to ``__setattr__``.
(["create", "update"], ["username", "fullname"]),
# The user can read/delete the entire model.
"read",
"delete"
],
admin="*" # i.e., all actions on all fields
)
id = sa.Column(sa.String(128), primary_key=True)
username = sa.Column(sa.String(128), nullable=False)
fullname = sa.Column(sa.String(128), nullable=False)
ssn = sa.Column(sa.String(10), nullable=True)
is_admin = sa.Column(sa.Boolean, default=False)
def __repr__(self):
return f"<BaseUser {self.id}>"
class User(OsoPermissionsMixin, Base):
__tablename__ = 'user'
__repr_attrs__ = ['name']
__permissions__ = OsoPermissionsMixin.load_permissions(
# Public permissions
read=["id", "username"],
# Role-based permissions
self=[
# The user can provide ``username`` and ``fullname``
# to ``__init__`` (as keyword args) and to ``__setattr__``.
(["create", "update"], ["username", "fullname"]),
# The user can read/delete the entire model.
"read",
"delete"
],
admin="*" # i.e., all actions on all fields
)
id = sa.Column(sa.String(128), primary_key=True)
username = sa.Column(sa.String(128), nullable=False)
fullname = sa.Column(sa.String(128), nullable=False)
ssn = sa.Column(sa.String(10), nullable=True)
is_admin = sa.Column(sa.Boolean, default=False)
def __repr__(self):
return f"<User {self.id}>"
# -- Fixtures -----------------------------------------------------------------
@pytest.fixture(scope="session")
def session():
sess.rollback()
Base.__class__._session = None
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
Base.__class__._session = sess
return sess
@pytest.fixture(scope="session")
def app(oso):
app = Flask(__name__, instance_relative_config=True)
app.oso = oso
with app.test_client() as client:
with app.app_context():
yield client
@pytest.fixture(scope="session")
def oso():
oso = Oso()
register_models(oso, User)
from sqlalchemy_authorize.oso.oso_permissions_mixin import UserMock
oso.register_class(UserMock)
oso.load_files(["./sqlalchemy_authorize/oso/rbac.polar"])
return oso
@contextmanager
def user_set(app, user):
g.user = user
yield
# -- Doctest Namespace --------------------------------------------------------
@pytest.fixture(scope="session", autouse=True)
def add_app(doctest_namespace):
doctest_namespace["app"] = app
@pytest.fixture(scope="session", autouse=True)
def add_BaseUser(doctest_namespace):
doctest_namespace["BaseUser"] = BaseUser
@pytest.fixture(scope="session", autouse=True)
def add_User(doctest_namespace):
doctest_namespace["User"] = User
@pytest.fixture(scope="session", autouse=True)
def add_oso(doctest_namespace):
doctest_namespace["oso"] = oso
@pytest.fixture(scope="session", autouse=True)
def add_user_set(doctest_namespace):
doctest_namespace["user_set"] = user_set
``` |
{
"source": "jqhoogland/where-to-live",
"score": 3
} |
#### File: data/population/utils.py
```python
import os
from enum import Enum
import pandas
class CityType(Enum):
MEGA = "MEGA"
LARGE = "LARGE"
MEDIUM = "MEDIUM"
SMALL = "SMALL"
TINY = "TINY"
TOWN = "TOWN"
MEGA_CITIES = pandas.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)), "cleaned/mega.csv"))
LARGE_CITIES = pandas.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)), "cleaned/large.csv"))
MEDIUM_CITIES = pandas.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)), "cleaned/medium.csv"))
SMALL_CITIES = pandas.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)), "cleaned/small.csv"))
# TINY_CITIES = pandas.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)), "cleaned/tiny.csv"))
# TOWNS = pandas.read_csv(os.path.join(os.path.dirname(os.path.abspath(__file__)), "cleaned/towns.csv"))
CITIES = {
CityType.MEGA: MEGA_CITIES,
CityType.LARGE: LARGE_CITIES,
CityType.MEDIUM: MEDIUM_CITIES,
CityType.SMALL: SMALL_CITIES,
# CityType.TINY: TINY_CITIES,
# CityType.TOWN: TOWNS,
}
list_intersection = lambda xs, ys: [x for x in xs if x in ys]
list_intersection_indices = lambda xs, ys: [i for i, x in enumerate(xs) if x in ys]
def filter_by_type(cities_list, type_: CityType):
ref_cities = CITIES[type_]
return list_intersection(cities_list, ref_cities.loc[:,"Name"].tolist())
def get_indices_matching_type(cities_list, type_: CityType):
ref_cities = CITIES[type_]
return list_intersection_indices(cities_list, ref_cities.loc[:,"Name"].tolist())
def test_city_filtering():
cities_list = ["Kinshasa", "Pyongyang", "Lisbon", "Padang", "Tokyo", "Hong Kong", "Odesa", "Nanping"]
assert filter_by_type(cities_list, CityType.MEGA) == ["Kinshasa", "Tokyo"]
assert filter_by_type(cities_list, CityType.LARGE) == ["Pyongyang", "Hong Kong"]
assert filter_by_type(cities_list, CityType.MEDIUM) == ["Lisbon", "Odesa"]
assert filter_by_type(cities_list, CityType.SMALL) == ["Padang", "Nanping"]
def test_get_correct_indices():
cities_list = ["Kinshasa", "Pyongyang", "Lisbon", "Padang", "Tokyo", "Hong Kong", "Odesa", "Nanping"]
assert get_indices_matching_type(cities_list, CityType.MEGA) == [0, 4]
assert get_indices_matching_type(cities_list, CityType.LARGE) == [1, 5]
assert get_indices_matching_type(cities_list, CityType.MEDIUM) == [2, 6]
assert get_indices_matching_type(cities_list, CityType.SMALL) == [3, 7]
``` |
{
"source": "JQIamo/labscript-utils",
"score": 2
} |
#### File: labscript-utils/labscript_utils/double_import_denier.py
```python
import sys
import os
import traceback
import re
import importlib.util
DEBUG = False
# Tensorflow contains true double imports. This is arguably a bug in tensorflow,
# (reported here: https://github.com/tensorflow/tensorflow/issues/35369), but let's work
# around it since tensorflow is not our problem:
WHITELIST = ['tensorflow', 'tensorflow_core']
class DoubleImportDenier(object):
"""A module finder that tracks what's been imported and disallows multiple
imports of the same module under different names, raising an exception
upon detecting that this has occured"""
def __init__(self):
self.enabled = False
self.names_by_filepath = {}
self.tracebacks = {}
UNKNOWN = ('<unknown: imported prior to double_import_denier.enable()>\n')
for name, module in list(sys.modules.items()):
if getattr(module, '__file__', None) is not None:
path = os.path.realpath(module.__file__)
if os.path.splitext(os.path.basename(path))[0] == '__init__':
# Import path for __init__.py is actually the folder they're in, so
# use that instead
path = os.path.dirname(path)
self.names_by_filepath[path] = name
self.tracebacks[path] = [UNKNOWN, '']
self.stack = set()
def find_spec(self, fullname, path=None, target=None):
# Prevent recursion. If importlib.util.find_spec was called by us and is looking
# through sys.meta_path for finders, return None so it moves on to the other
# loaders.
dict_key = (fullname, tuple(path) if path is not None else None)
if dict_key in self.stack:
return
self.stack.add(dict_key)
try:
spec = importlib.util.find_spec(fullname, path)
except Exception as e:
if DEBUG: print('Exception in importlib.util.find_spec ' + str(e))
return
finally:
self.stack.remove(dict_key)
if spec is not None and spec.origin is not None and spec.origin != "built-in":
path = os.path.realpath(spec.origin)
if DEBUG: print('loading', fullname, 'from', path)
tb = traceback.format_stack()
other_name = self.names_by_filepath.get(path, None)
if fullname.split('.', 1)[0] not in WHITELIST:
if other_name is not None and other_name != fullname:
other_tb = self.tracebacks[path]
self._raise_error(path, fullname, tb, other_name, other_tb)
self.names_by_filepath[path] = fullname
self.tracebacks[path] = tb
return spec
def _format_tb(self, tb):
"""Take a formatted traceback as returned by traceback.format_stack()
and remove lines that are solely about us and the Python machinery,
leaving only lines pertaining to the user's code"""
frames = [frame for frame in tb[:-1]
if 'importlib._bootstrap' not in frame
and 'imp.load_module' not in frame
and not ('imp.py' in frame
and ('load_module' in frame
or 'load_source' in frame
or 'load_package' in frame))]
return ''.join(frames)
def _restore_tracebacklimit_after_exception(self):
"""Record the current value of sys.tracebacklimit, if any, and set a
temporary sys.excepthook to restore it to that value (or delete it)
after the next exception."""
orig_excepthook = sys.excepthook
exists = hasattr(sys, 'tracebacklimit')
orig_tracebacklimit = getattr(sys, 'tracebacklimit', None)
def excepthook(*args, **kwargs):
# Raise the error normally
orig_excepthook(*args, **kwargs)
# Restore sys.tracebacklimit
if exists:
sys.tracebacklimit = orig_tracebacklimit
else:
del sys.tracebacklimit
# Restore sys.excepthook:
sys.excepthook = orig_excepthook
sys.excepthook = excepthook
def _raise_error(self, path, name, tb, other_name, other_tb):
msg = """Double import! The same file has been imported under two
different names, resulting in two copies of the module. This is almost
certainly a mistake. If you are running a script from within a package
and want to import another submodule of that package, import it by its
full path: 'import module.submodule' instead of just 'import
submodule.'"""
msg = re.sub(' +',' ', ' '.join(msg.splitlines()))
tb = self._format_tb(tb)
other_tb = self._format_tb(other_tb)
msg += "\n\nPath imported: %s\n\n" % path
msg += "Traceback (first time imported, as %s):\n" % other_name
msg += "------------\n%s------------\n\n" % other_tb
msg += "Traceback (second time imported, as %s):\n" % name
msg += "------------\n%s------------" % tb
# We set sys.tracebacklimit to a small number to not print all the
# nonsense from the import machinary in the traceback, it is not
# useful to the user in reporting this exception. But we have to jump
# through this hoop to make sure sys.tracebacklimit is restored after
# our exception is raised, since putting it in a finally: block
# doesn't work:
self._restore_tracebacklimit_after_exception()
sys.tracebacklimit = 2
raise RuntimeError(msg) from None
_denier = None
def enable():
if '--allow-double-imports' in sys.argv:
# Calls to enable/disable the double import denier are ignored if this
# command line argument is present.
return
global _denier
if _denier is None:
_denier = DoubleImportDenier()
if _denier.enabled:
raise RuntimeError('already enabled')
# This is here because it actually happened:
for importer in sys.meta_path:
if importer.__class__.__name__ == DoubleImportDenier.__name__:
msg = 'Two DoubleImportDenier instances in sys.meta_path!'
raise AssertionError(msg)
sys.meta_path.insert(0, _denier)
_denier.enabled = True
def disable():
if '--allow-double-imports' in sys.argv:
# Calls to enable/disable the double import denier are ignored if this
# command line argument is present.
return
if not _denier.enabled:
raise RuntimeError('not enabled')
sys.meta_path.remove(_denier)
_denier.enabled = False
if __name__ == '__main__':
# Run from this directory as __main__:
enable()
def test1():
# Import numpy.linalg twice under different names:
import numpy as np
np.linalg.__file__ = None
# Add the numpy folder to the search path:
sys.path.append(os.path.dirname(np.__file__))
import linalg
def test2():
# This also gets detected, since this module already exists as
# __main__ but this line would import it as double_import_denier.
import double_import_denier
test1()
test2()
```
#### File: labscript_utils/excepthook/__init__.py
```python
import sys
import os
import threading
import traceback
import subprocess
import warnings
# The maximum number of windows the excepthook will spawn:
MAX_WINDOWS = 10
subprocess_script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tk_exception.py')
class l:
logger = None
child_processes = []
def install_thread_excepthook():
"""
Workaround for sys.excepthook thread bug
(https://sourceforge.net/tracker/?func=detail&atid=105470&aid=1230540&group_id=5470).
Call once from __main__ before creating any threads.
"""
run_old = threading.Thread.run
def run(*args, **kwargs):
try:
run_old(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except:
if sys is None:
# Interpreter is shutting down. Don't display graphical error.
# Let the threading module's code handle this however it normally does.
raise
exc_type, exc_value, exc_traceback = sys.exc_info()
# Cull the top frame so the user doesn't see this wrapping code in their traceback:
exc_traceback = exc_traceback.tb_next
sys.excepthook(exc_type, exc_value, exc_traceback)
threading.Thread.run = run
def tkhandler(exceptclass, exception, exec_info, reraise=True):
script = os.path.basename(sys.argv[0])
if not script:
script = 'python interactive shell'
shortmessage = '%s: %s' % (exceptclass.__name__, exception)
longmessage = ''.join(traceback.format_exception(exceptclass, exception, exec_info))
if l.logger:
l.logger.error('Got an exception:\n%s' % longmessage)
if exceptclass in [KeyboardInterrupt, SystemExit]:
sys.__excepthook__(exceptclass, exception, exec_info)
else:
for previous_process in child_processes[:]:
if previous_process.poll() is not None:
child_processes.remove(previous_process)
if len(child_processes) >= MAX_WINDOWS:
shortmessage = "Too many errors"
longmessage = ("Too many errors: Further errors will " +
"not be shown graphically until some error windows are closed")
if len(child_processes) < MAX_WINDOWS + 1:
process = subprocess.Popen([sys.executable, subprocess_script_path, script, shortmessage, longmessage])
child_processes.append(process)
if reraise:
sys.__excepthook__(exceptclass, exception, exec_info)
def logwarning(message, category, filename, lineno, file=None, line=None):
logmessage = warnings.formatwarning(message, category, filename, lineno, line)
l.logger.warn(logmessage)
warnings._showwarning(message, category, filename, lineno, file, line)
def set_logger(logger):
l.logger = logger
warnings._showwarning = warnings.showwarning
warnings.showwarning = logwarning
# Check for tkinter availability. Tkinter is frustratingly not available
# by default for python 3.x on Debian systems, despite being considered
# part of the Python standard library. I'll make it a dependency for
# packaging, but this is an extra check at runtime so that if something
# goes wrong with that we get an error at import rather than later:
import tkinter
sys.excepthook = tkhandler
install_thread_excepthook()
``` |
{
"source": "JQIamo/labscript_utils",
"score": 2
} |
#### File: labscript_utils/labscript_utils/__init__.py
```python
import sys
import os
import importlib
from .__version__ import __version__
from labscript_profile import LABSCRIPT_SUITE_PROFILE
if not os.path.exists(LABSCRIPT_SUITE_PROFILE):
# Create new profile if none exists
from labscript_profile.create import create_profile
create_profile()
# This would normally run at interpreter startup but didn't since the profile didn't
# exist:
import labscript_profile
labscript_profile.add_userlib_and_pythonlib()
# This folder
labscript_utils_dir = os.path.dirname(os.path.realpath(__file__))
def import_or_reload(modulename):
"""
Behaves like 'import modulename' would, excepts forces the imported
script to be rerun
"""
# see if the proposed module is already loaded
# if so, we will need to re-run the code contained in it
if modulename in sys.modules.keys():
importlib.reload(sys.modules[modulename])
return sys.modules[modulename]
module = importlib.import_module(modulename)
return module
from labscript_utils.versions import VersionException, check_version
def dedent(s):
"""Remove leading spaces from the first line of a string, all common leading
indentation (spaces only) from subsequent lines, strip trailing spaces from all
lines and replace single newlines prior to lines with the common indentation with
spaces. Lines with additional indentation are kept verbatim. Good for unwrapping
error messages etc that are in code as multiline triple-quoted strings."""
# Strip trailing whitespace:
lines = [line.rstrip(' ') for line in s.splitlines()]
# Get common indentation from lines other than the first one:
indentation = float('inf')
for line in lines[1:]:
if line:
indentation = min(indentation, len(line) - len(line.lstrip(' ')))
if not lines[1:]:
indentation = 0
# Dedent the lines:
dedented_lines = []
for i, line in enumerate(lines):
if i == 0:
dedented_line = line.lstrip(' ')
else:
dedented_line = line[indentation:]
dedented_lines.append(dedented_line)
# Then add newline characters where we are going to keep them:
unwrapped_lines = []
for i, line in enumerate(dedented_lines):
if i == 0:
unwrapped_lines.append(line)
else:
previous_line = dedented_lines[i - 1]
# If either this line or the previous line is blank or starts with custom
# indentation, put this line on a newline rather than unwrapping it:
if any(not l or l.startswith(' ') for l in [line, previous_line]):
unwrapped_lines.append('\n' + line)
else:
unwrapped_lines.append(' ' + line)
return ''.join(unwrapped_lines)
# Enforce that the same file can't be imported under multiple names, to help
# prevent subtle bugs:
import labscript_utils.double_import_denier
labscript_utils.double_import_denier.enable()
# Disable the 'quick edit' feature of Windows' cmd.exe, which causes console applicatons
# to freeze if their console windows are merely clicked on. This causes all kinds of
# headaches, so we disable it in all labscript programs:
import zprocess
zprocess.disable_quick_edit()
``` |
Subsets and Splits