blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e6f0940e31730d133d2c2e850edd277b34585500 | 8910c318bb18337ffc206e598ef6f1246f25d671 | /ghRepo/ghRepo/views.py | 7da3522081a7cf5f18372888f60289979e810e85 | []
| no_license | hemantjadon/gh-repo | 155c3df97b9f0528b3c2950dc6f8d7be926b8085 | 4ef1dea61c46bb21ab8feb571d50dddb4a62dd9c | refs/heads/master | 2021-01-10T10:39:28.762048 | 2016-04-05T09:36:39 | 2016-04-05T09:36:39 | 55,286,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | from django.shortcuts import render,redirect
from django.http import HttpResponse,JsonResponse
from django.core.urlresolvers import reverse
def Home(request):
return render(request,'index.html',{}) | [
"[email protected]"
]
| |
cda847c5e07cc1abb7c228936cccb2176999d210 | 58447c19379766653c8c9ec6800ac341a932bb0f | /micro_dl/deprecated/image_validator.py | ee07a421874b9f51aa247a600b26c7ca4b13a0cd | []
| no_license | bryantChhun/microDL | 70eed48011092f587b4c1cfe6616a68ae26656c1 | cb47f7c3adcdc7c82d8de89444453e13bdcd2543 | refs/heads/master | 2022-10-18T20:11:05.823516 | 2018-11-20T01:15:33 | 2018-11-20T01:15:33 | 158,441,077 | 0 | 0 | null | 2019-01-15T23:12:03 | 2018-11-20T19:30:12 | Python | UTF-8 | Python | false | false | 10,325 | py | """Classes for handling microscopy data in image file format, NOT LIF!
Uses dir structure:
input_dir
|-image_volume, image_volumes_info.csv
|-tp0
|-channel0
|-img_512_512_8_.., cropped_images_info.csv
|-tp-0
|-channel0: contains all npy files for cropped images from channel0
|-channel1: contains all npy files for cropped images from channel1..
and so on
"""
import cv2
import natsort
import numpy as np
import os
import pandas as pd
import re
from micro_dl.utils.aux_utils import init_logger
class ImageValidator:
"""Class for verifying image folder structure and writing metadata"""
def __init__(self, input_dir, meta_name, verbose=0):
"""
:param str input_dir: Input directory, containing time directories,
which in turn contain all channels (inputs and target) directories
:param str meta_name: Name of csv file containing image paths and metadata
which will be written in input_dir
:param int verbose: specifies the logging level: NOTSET:0, DEBUG:10,
INFO:20, WARNING:30, ERROR:40, CRITICAL:50
"""
self.input_dir = input_dir
self.time_dirs = self._get_subdirectories(self.input_dir)
assert len(self.time_dirs) > 0,\
"Input dir must contain at least one timepoint folder"
# Check to make sure first timepoint folder contains channel folders
self.channel_dirs = self._get_subdirectories(
os.path.join(self.input_dir, self.time_dirs[0]))
assert len(self.channel_dirs) > 0, \
"Must be at least one channel folder"
# Metadata will be written in input folder
self.meta_name = os.path.join(self.input_dir,
meta_name)
# Validate and instantiate logging
log_levels = [0, 10, 20, 30, 40, 50]
if verbose in log_levels:
self.verbose = verbose
else:
self.verbose = 10
self.logger = self._init_logger()
def _init_logger(self):
"""
Initialize logger for pre-processing
Logger outputs to console and log_file
"""
logger_fname = os.path.join(self.input_dir, 'preprocessing.log')
logger = init_logger('preprocessing', logger_fname, self.verbose)
return logger
def _log_info(self, msg):
"""Log info"""
if self.verbose > 0:
self.logger.info(msg)
def _get_subdirectories(self, dir_name):
subdirs = [subdir_name
for subdir_name in
os.listdir(dir_name)
if os.path.isdir(os.path.join(dir_name, subdir_name))
]
return natsort.natsorted(subdirs)
def folder_validator(self):
"""
Input directory should contain subdirectories consisting of timepoints,
which in turn should contain channel folders numbered 0, ...
This function makes sure images have matching shapes and unique indices
in each folder and writes a csv containing relevant image information.
:return list of ints channel_nrbs: Channel numbers determined by searching
input_dir subfolder names for ints
:return list of ints im_indices: Unique image indices. Must be matching
in all the subfolders of input_dir
"""
# Make sure all input directories contain images with the same indices and shape
# Collect all timepoint indices
time_indices = []
for dir_name in self.time_dirs:
time_indices.append(self.get_idx_from_dir(dir_name))
# Collect all channel indices from first timepoint
channel_indices = []
for dir_name in self.channel_dirs:
channel_indices.append(self.get_idx_from_dir(dir_name))
# Collect all image indices from first channel directory
im_shape, im_indices, _ = self.image_validator(os.path.join(
self.input_dir,
self.time_dirs[0],
self.channel_dirs[0]))
# Skipping these records for now
z_idx = 0
size_x_um = 0
size_y_um = 0
size_z_um = 0
# Make sure image shapes and indices match across channels
# and write csv containing relevant metadata
nbr_idxs = len(im_indices)
records = []
for time_idx, time_dir in zip(time_indices, self.time_dirs):
for channel_idx, channel_dir in zip(channel_indices, self.channel_dirs):
cur_dir = os.path.join(
self.input_dir,
time_dir,
channel_dir)
assert os.path.exists(cur_dir), \
"Directory doesn't exist: {}".format(cur_dir)
cur_shape, cur_indices, cur_names = self.image_validator(cur_dir)
# Assert image shape and indices match
idx_overlap = set(im_indices).intersection(cur_indices)
assert len(idx_overlap) == nbr_idxs, \
"Index mismatch in folder {}".format(cur_dir)
assert im_shape == cur_shape, \
"Image shape mismatch in folder {}".format(cur_dir)
for cur_idx, cur_name in zip(cur_indices, cur_names):
full_name = os.path.join(self.input_dir, time_dir, channel_dir, cur_name)
records.append((time_idx,
channel_idx,
cur_idx,
z_idx,
full_name,
size_x_um,
size_y_um,
size_z_um))
# Create pandas dataframe
df = pd.DataFrame.from_records(
records,
columns=['timepoint', 'channel_num', 'sample_num', 'slice_num',
'fname', 'size_x_microns', 'size_y_microns',
'size_z_microns']
)
df.to_csv(self.meta_name, sep=',')
self._log_info("Writing metadata in: {}".format(self.input_dir,
'image_volumes_info.csv'))
self._log_info("found timepoints: {}".format(time_indices))
self._log_info("found channels: {}".format(channel_indices))
self._log_info("found image indices: {}".format(im_indices))
def _get_sorted_names(self, image_dir):
"""
Get image names in directory and sort them by their indices
:param str image_dir: Image directory name
:return list of strs im_names: Image names sorted according to indices
"""
im_names = [f for f in os.listdir(image_dir) if not f.startswith('.')]
# Sort image names according to indices
return natsort.natsorted(im_names)
def _read_or_catch(self, dir_name, im_name):
"""
Checks file extension for npy and load array if true. Otherwise
readd regular image (png, tif, jpg, see OpenCV for supported files)
of any bit depth.
:param str dir_name: Directory name
:param str im_name: Image name
:return array im: image
:throws IOError if image can't be opened
"""
if im_name[-3:] == 'npy':
im = np.load(os.path.join(dir_name, im_name))
else:
try:
im = cv2.imread(os.path.join(dir_name, im_name), cv2.IMREAD_ANYDEPTH)
except IOError as e:
print(e)
return im
def image_validator(self, image_dir):
"""
Make sure all images in a directory have unique indexing and the same
shape.
:param str image_dir: Directory containing opencv readable images
:return tuple im_shape: image shape if all images have the same shape
:return list im_indices: Unique indices for the images
:return list im_names: list of fnames for images in a channel dir
:throws IOError: If images can't be read
"""
im_names = self._get_sorted_names(image_dir)
assert len(im_names) > 1, "Only one or less images in directory " + image_dir
# Read first image to determine shape
im = self._read_or_catch(image_dir, im_names[0])
im_shape = im.shape
# Determine indexing
idx0 = re.findall("\d+", im_names[0])
idx1 = re.findall("\d+", im_names[1])
assert len(idx0) == len(idx1), \
"Different numbers of indices in file names {} {}".format(
im_names[0], im_names[1])
potential_idxs = np.zeros(len(idx0))
for idx, (i, j) in enumerate(zip(idx0, idx1)):
potential_idxs[idx] = abs(int(j) - int(i))
idx_pos = np.where(potential_idxs > 0)[0]
# There should only be one index (varying integer) in filenames
assert len(idx_pos) == 1, ("Unclear indexing,"
"more than one varying int in file names")
# Loop through all images
# check that shape is constant and collect indices
im_indices = np.zeros(len(im_names), dtype=int)
for i, im_name in enumerate(im_names):
im = self._read_or_catch(image_dir, im_name)
assert im.shape == im_shape, "Mismatching image shape in " + im_name
im_indices[i] = int(re.findall("\d+", im_name)[idx_pos[0]])
# Make sure there's a unique index for each image
assert len(im_indices) == len(np.unique(im_indices)), \
"Images don't have unique indexing"
msg = '{} contains indices: {}'.format(image_dir, im_indices)
self._log_info(msg)
return im_shape, im_indices, im_names
def get_idx_from_dir(self, dir_name):
"""
Get directory index, assuming it's an int in the last part of the
image directory name.
:param str dir_name: Directory name containing one int
:return int idx_nbr: Directory index
"""
strs = dir_name.split("/")
pos = -1
if len(strs[pos]) == 0 and len(strs) > 1:
pos = -2
idx_nbr = re.findall("\d+", strs[pos])
assert len(idx_nbr) == 1, ("Couldn't find index in {}".format(dir_name))
return int(idx_nbr[0])
| [
"[email protected]"
]
| |
47b6667c47136aac7d50731f707df679abe1acb2 | 87dc44f5ae9a5e8fea4f19130bdb84aabc4c27a1 | /sf_hii_interface/sf_hii_interface/urls.py | dbc9503a5d1db1fa13aa97f7380e0373a9aecb04 | []
| no_license | yanspineiro/HII | 2cece07fe0912292ad1c2d39e6199241212c022b | b9684080db733fe81594b70ecd6cb9e4f883c5d8 | refs/heads/master | 2021-01-22T07:13:34.339727 | 2015-08-02T20:02:12 | 2015-08-02T20:02:12 | 40,090,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | import debug_toolbar
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^sf_bridge/', include('sf_bridge.urls')),
url(r'^api-auth/', include('rest_framework.urls'), name='rest_framework'),
url(r'^__debug__/', include(debug_toolbar.urls)),
)
| [
"[email protected]"
]
| |
4a9f201bd46ed928be0fe13e3c58b666905774ad | a37dd35ca10458424bc694c3c1d82dc36397f7c4 | /faux_craigslist/urls.py | d34d9490bc78fd0a3b214ed479b0912b6aa05ce3 | []
| no_license | eileendwyer/faux_craigslist | 6675c5464d4dac9d6d2add1b19f86386fd5bf1ab | 9c17091927a06ca331bf66e7d6723e000a916369 | refs/heads/master | 2021-01-20T17:12:35.331043 | 2016-06-25T18:28:56 | 2016-06-25T18:28:56 | 61,836,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,215 | py | """faux_craigslist URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls import static
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import logout
from tootieapp.views import IndexView, ProfileView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', IndexView.as_view(), name="index"),
#url(r'^logout/$', logout, name="logout"),
#url(r'^accounts/profile/$', login_required(ProfileView.as_view()), name="profile_view"),
#url('^', include('django.contrib.auth.urls')),
]
| [
"[email protected]"
]
| |
8586efce94570fd36dc6376165ffc8e9a732f066 | f0c4b4fde64863817a476667448ee25d6bbada32 | /master_thesis_version/experiment4.py | a6037e9ae53b9a71631139768ee768aad6688bb5 | [
"MIT"
]
| permissive | tobias-freidling/hsic-lasso-psi | 18d67eb9beed738aff824c71e51b01d97291cef1 | 01f32f6ed1c1514539b6bc79f0e3aab7566f56e3 | refs/heads/master | 2023-06-02T05:24:14.370549 | 2021-06-17T15:20:24 | 2021-06-17T15:20:24 | 298,973,860 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,597 | py | from psi.simulation import Data_Generator, Evaluator, Visualisation
from psi.linear import Linear_Model
from psi.poly_multi import Poly_Multi_HSIC
from psi.hsic_lasso_psi import Split_HSIC_Lasso
"""
Comparison with other method for (model-free) post-selection inference
"""
dg_id = Data_Generator(p = 500, experiment = 'non_linear', rho = 0, decay = False,
customize = False, noise_signal_ratio = 0.2)
dg_const = Data_Generator(p = 500, experiment = 'non_linear', rho = 0.1, decay = False,
customize = False, noise_signal_ratio = 0.2)
dg_logistic = Data_Generator(p = 500, experiment = 'logistic', rho = 0, decay = False)
dg_linear = Data_Generator(p = 500, experiment = 'linear', rho = 0, decay = False, customize = False,
noise_signal_ratio = 0.3)
targets = ['partial', 'full', 'carved', 'H']
split_b10 = Split_HSIC_Lasso(targets, split_ratio = 0.25, n_screen = 50, H_estimator = 'block', H_B = 10)
split_inc1 = Split_HSIC_Lasso(targets, split_ratio = 0.25, n_screen = 50, H_estimator = 'inc', H_l = 1)
multi_b10 = Poly_Multi_HSIC(30, poly = False, estimator = 'block', B = 10)
multi_inc1 = Poly_Multi_HSIC(30, poly = False, estimator = 'inc', l = 1)
linear = Linear_Model(sigma = 5 * 0.3, reg_factor = 3)
models1 = [split_b10, split_inc1, multi_b10, multi_inc1]
names1 = ['split_b10', 'split_inc1', 'multi_b10', 'multi_inc1']
models2 = [split_b10, split_inc1, multi_b10, multi_inc1, linear]
names2 = ['split_b10', 'split_inc1', 'multi_b10', 'multi_inc1', 'linear']
sample_sizes = [250, 500, 1000, 1500, 2000]
eval_id = Evaluator(models1, names1, rep = 100, dg = dg_id, n_record_variables = 4,
sample_sizes = sample_sizes, alpha = 0.05, start_seed = 2020)
eval_const = Evaluator(models1, names1, rep = 100, dg = dg_const, n_record_variables = 4,
sample_sizes = sample_sizes, alpha = 0.05, start_seed = 2020)
eval_logistic = Evaluator(models1, names1, rep = 100, dg = dg_logistic, n_record_variables = 5,
sample_sizes = sample_sizes, alpha = 0.05, start_seed = 2020)
eval_linear = Evaluator(models2, names2, rep = 100, dg = dg_linear, n_record_variables = 5,
sample_sizes = sample_sizes, alpha = 0.05, start_seed = 2020)
eval_id.simulation_parallel()
eval_const.simulation_parallel()
eval_logistic.simulation_parallel()
eval_linear.simulation_parallel()
vis4 = Visualisation({'eval_id': eval_id, 'eval_const': eval_const,
'eval_logistic': eval_logistic, 'eval_linear': eval_linear})
# Visualisation of TPR and FPR for HSIC-target
labels1 = ['Proposal, Block, B=10', 'Proposal, inc., l=1',
'Multi, Block, B=10', 'Multi, inc., l=1']
labels2 = ['Proposal, Block, B=10', 'Proposal, inc., l=1',
'Multi, Block, B=10', 'Multi, inc., l=1', 'Linear']
titles = ['Non-linear, Identity', 'Non-linear, Const. corr.', 'Logistic', 'Linear']
subdict_model_comp1 = {'split_b10': ['H'], 'split_inc1': ['H'],
'multi_b10': ['H'], 'multi_inc1': ['H']}
v_dict12 = {'eval_id': subdict_model_comp1,
'eval_const': subdict_model_comp1,
'eval_logistic': subdict_model_comp1,
'eval_linear': subdict_model_comp1}
label_dict12 = {'split_b10': ['Proposal, Block, B=10'], 'split_inc1': ['Proposal, inc., l=1'],
'multi_b10': ['Multi, Block, B=10'], 'multi_inc1': ['Multi, inc., l=1']}
vis4.visualise_rates('fpr', v_dict12, titles, label_dict12, width = 12, height = 3)
vis4.visualise_rates('tpr', v_dict12, titles, label_dict12, width = 12, height = 3)
# Visualisation of TPR and FPR for other targets
subdict_model_comp3 = {'split_b10': ['partial', 'full', 'carved'],
'split_inc1': ['partial', 'full', 'carved'],
'linear': ['beta']}
subdict_model_comp2 = {'split_b10': ['partial', 'full', 'carved'],
'split_inc1': ['partial', 'full', 'carved']}
v_dict13 = {'eval_id': subdict_model_comp2,
'eval_const': subdict_model_comp2,
'eval_logistic': subdict_model_comp2,
'eval_linear': subdict_model_comp3}
label_dict13 = {'split_b10': ['Proposal, B=10, partial', 'Proposal, B=10, full', 'Proposal, B=10, carved'],
'split_inc1': ['Proposal, l=1, partial', 'Proposal, l=1, full', 'Proposal, l=1, carved'],
'linear': ['Linear, partial']}
vis4.visualise_rates('fpr', v_dict13, titles, label_dict13, width = 12, height = 3)
vis4.visualise_rates('tpr', v_dict13, titles, label_dict13, width = 12, height = 3) | [
"[email protected]"
]
| |
7983e4ca4e3edbea12d64c6439b721c3e33251bc | d2cadd78166781f0bddc0ea10ed737d28341d3ab | /venv/bin/pip3.7 | 74022e3ffe951acd468bd2400aa4bdabdeb63e85 | []
| no_license | fernndzaky/FP_IS | 45c40deec74773fc644796e7cef80c6ee4737450 | a89c9f3bd442467dba8ac3b11792b4e7219b8b32 | refs/heads/master | 2022-11-09T08:07:27.029094 | 2020-06-15T19:57:07 | 2020-06-15T19:57:07 | 272,520,855 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | 7 | #!/Applications/XAMPP/xamppfiles/htdocs/php/FP_IS/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
| [
"[email protected]"
]
| |
f40501420b9dfbaed6985ce1eb838f5072a0e8cd | 4afe7bd69a030cf2c51045cf8dc808d4ac5d5ef2 | /maoyan(vue+django)/maoyan-Django/venv/Lib/site-packages/pytz/tzinfo.py | 0d0f62081d53293440fd73b0a5631a42bc4eba73 | []
| no_license | jbxdlele/python | e19aa83f8eca45cbd64c79fe302fba88e101f646 | b5444fa759168e4deed6e472f558f522390bf879 | refs/heads/master | 2020-09-02T07:15:19.849700 | 2019-11-02T17:37:04 | 2019-11-02T17:37:04 | 219,164,400 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,273 | py | '''Base classes and helpers for building zone specific tzinfo classes'''
from bisect import bisect_right
from datetime import datetime, timedelta, tzinfo
try:
set
except NameError:
from sets import Set as set
import pytz
from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError
__all__ = []
_timedelta_cache = {}
def memorized_timedelta(seconds):
'''Create only one instance of each distinct timedelta'''
try:
return _timedelta_cache[seconds]
except KeyError:
delta = timedelta(seconds=seconds)
_timedelta_cache[seconds] = delta
return delta
_epoch = datetime.utcfromtimestamp(0)
_datetime_cache = {0: _epoch}
def memorized_datetime(seconds):
'''Create only one instance of each distinct datetime'''
try:
return _datetime_cache[seconds]
except KeyError:
# NB. We can't just do datetime.utcfromtimestamp(seconds) as this
# fails with negative values under Windows (Bug #90096)
dt = _epoch + timedelta(seconds=seconds)
_datetime_cache[seconds] = dt
return dt
_ttinfo_cache = {}
def memorized_ttinfo(*args):
'''Create only one instance of each distinct tuple'''
try:
return _ttinfo_cache[args]
except KeyError:
ttinfo = (
memorized_timedelta(args[0]),
memorized_timedelta(args[1]),
args[2]
)
_ttinfo_cache[args] = ttinfo
return ttinfo
_notime = memorized_timedelta(0)
def _to_seconds(td):
'''Convert a timedelta to seconds'''
return td.seconds + td.days * 24 * 60 * 60
class BaseTzInfo(tzinfo):
# Overridden in subclass
_utcoffset = None
_tzname = None
zone = None
def __str__(self):
return self.zone
class StaticTzInfo(BaseTzInfo):
'''A timezone that has a constant offset from UTC
These timezones are rare, as most locations have changed their
offset at some point in their history
'''
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
if dt.tzinfo is not None and dt.tzinfo is not self:
raise ValueError('fromutc: dt.tzinfo is not self')
return (dt + self._utcoffset).replace(tzinfo=self)
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return self._utcoffset
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return _notime
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return self._tzname
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime.
This is normally a no-op, as StaticTzInfo timezones never have
ambiguous cases to correct:
>>> from pytz import timezone
>>> gmt = timezone('GMT')
>>> isinstance(gmt, StaticTzInfo)
True
>>> dt = datetime(2011, 5, 8, 1, 2, 3, tzinfo=gmt)
>>> gmt.normalize(dt) is dt
True
The supported method of converting between timezones is to use
datetime.astimezone(). Currently normalize() also works:
>>> la = timezone('America/Los_Angeles')
>>> dt = la.localize(datetime(2011, 5, 7, 1, 2, 3))
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> gmt.normalize(dt).strftime(fmt)
'2011-05-07 08:02:03 GMT (+0000)'
'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return '<StaticTzInfo %r>' % (self.zone,)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (self.zone,)
class DstTzInfo(BaseTzInfo):
'''A timezone that has a variable offset from UTC
The offset might change if daylight saving time comes into effect,
or at a point in history when the region decides to change their
timezone definition.
'''
# Overridden in subclass
# Sorted list of DST transition times, UTC
_utc_transition_times = None
# [(utcoffset, dstoffset, tzname)] corresponding to
# _utc_transition_times entries
_transition_info = None
zone = None
# Set in __init__
_tzinfos = None
_dst = None # DST offset
def __init__(self, _inf=None, _tzinfos=None):
if _inf:
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = _inf
else:
_tzinfos = {}
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = (
self._transition_info[0])
_tzinfos[self._transition_info[0]] = self
for inf in self._transition_info[1:]:
if inf not in _tzinfos:
_tzinfos[inf] = self.__class__(inf, _tzinfos)
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
if (dt.tzinfo is not None and
getattr(dt.tzinfo, '_tzinfos', None) is not self._tzinfos):
raise ValueError('fromutc: dt.tzinfo is not self')
dt = dt.replace(tzinfo=None)
idx = max(0, bisect_right(self._utc_transition_times, dt) - 1)
inf = self._transition_info[idx]
return (dt + inf[0]).replace(tzinfo=self._tzinfos[inf])
def normalize(self, dt):
'''Correct the timezone information on the given datetime
If date arithmetic crosses DST boundaries, the tzinfo
is not magically adjusted. This method normalizes the
tzinfo to the correct one.
To test, first we need to do some setup
>>> from pytz import timezone
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
We next create a datetime right on an end-of-DST transition point,
the instant when the wallclocks are wound back one hour.
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
Now, if we subtract a few minutes from it, note that the timezone
information has not changed.
>>> before = loc_dt - timedelta(minutes=10)
>>> before.strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
But we can fix that by calling the normalize method
>>> before = eastern.normalize(before)
>>> before.strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
The supported method of converting between timezones is to use
datetime.astimezone(). Currently, normalize() also works:
>>> th = timezone('Asia/Bangkok')
>>> am = timezone('Europe/Amsterdam')
>>> dt = th.localize(datetime(2011, 5, 7, 1, 2, 3))
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> am.normalize(dt).strftime(fmt)
'2011-05-06 20:02:03 CEST (+0200)'
'''
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
# Convert dt in localtime to UTC
offset = dt.tzinfo._utcoffset
dt = dt.replace(tzinfo=None)
dt = dt - offset
# convert it back, and return it
return self.fromutc(dt)
def localize(self, dt, is_dst=False):
'''Convert naive time to local time.
This method should be used to construct localtimes, rather
than passing a tzinfo argument to a datetime constructor.
is_dst is used to determine the correct timezone in the ambigous
period at the end of daylight saving time.
>>> from pytz import timezone
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> amdam = timezone('Europe/Amsterdam')
>>> dt = datetime(2004, 10, 31, 2, 0, 0)
>>> loc_dt1 = amdam.localize(dt, is_dst=True)
>>> loc_dt2 = amdam.localize(dt, is_dst=False)
>>> loc_dt1.strftime(fmt)
'2004-10-31 02:00:00 CEST (+0200)'
>>> loc_dt2.strftime(fmt)
'2004-10-31 02:00:00 CET (+0100)'
>>> str(loc_dt2 - loc_dt1)
'1:00:00'
Use is_dst=None to raise an AmbiguousTimeError for ambiguous
times at the end of daylight saving time
>>> try:
... loc_dt1 = amdam.localize(dt, is_dst=None)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
is_dst defaults to False
>>> amdam.localize(dt) == amdam.localize(dt, False)
True
is_dst is also used to determine the correct timezone in the
wallclock times jumped over at the start of daylight saving time.
>>> pacific = timezone('US/Pacific')
>>> dt = datetime(2008, 3, 9, 2, 0, 0)
>>> ploc_dt1 = pacific.localize(dt, is_dst=True)
>>> ploc_dt2 = pacific.localize(dt, is_dst=False)
>>> ploc_dt1.strftime(fmt)
'2008-03-09 02:00:00 PDT (-0700)'
>>> ploc_dt2.strftime(fmt)
'2008-03-09 02:00:00 PST (-0800)'
>>> str(ploc_dt2 - ploc_dt1)
'1:00:00'
Use is_dst=None to raise a NonExistentTimeError for these skipped
times.
>>> try:
... loc_dt1 = pacific.localize(dt, is_dst=None)
... except NonExistentTimeError:
... print('Non-existent')
Non-existent
'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
# Find the two best possibilities.
possible_loc_dt = set()
for delta in [timedelta(days=-1), timedelta(days=1)]:
loc_dt = dt + delta
idx = max(0, bisect_right(
self._utc_transition_times, loc_dt) - 1)
inf = self._transition_info[idx]
tzinfo = self._tzinfos[inf]
loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo))
if loc_dt.replace(tzinfo=None) == dt:
possible_loc_dt.add(loc_dt)
if len(possible_loc_dt) == 1:
return possible_loc_dt.pop()
# If there are no possibly correct timezones, we are attempting
# to convert a time that never happened - the time period jumped
# during the start-of-DST transition period.
if len(possible_loc_dt) == 0:
# If we refuse to guess, raise an exception.
if is_dst is None:
raise NonExistentTimeError(dt)
# If we are forcing the pre-DST side of the DST transition, we
# obtain the correct timezone by winding the clock forward a few
# hours.
elif is_dst:
return self.localize(
dt + timedelta(hours=6), is_dst=True) - timedelta(hours=6)
# If we are forcing the post-DST side of the DST transition, we
# obtain the correct timezone by winding the clock back.
else:
return self.localize(
dt - timedelta(hours=6),
is_dst=False) + timedelta(hours=6)
# If we get this far, we have multiple possible timezones - this
# is an ambiguous case occuring during the end-of-DST transition.
# If told to be strict, raise an exception since we have an
# ambiguous case
if is_dst is None:
raise AmbiguousTimeError(dt)
# Filter out the possiblilities that don't match the requested
# is_dst
filtered_possible_loc_dt = [
p for p in possible_loc_dt if bool(p.tzinfo._dst) == is_dst
]
# Hopefully we only have one possibility left. Return it.
if len(filtered_possible_loc_dt) == 1:
return filtered_possible_loc_dt[0]
if len(filtered_possible_loc_dt) == 0:
filtered_possible_loc_dt = list(possible_loc_dt)
# If we get this far, we have in a wierd timezone transition
# where the clocks have been wound back but is_dst is the same
# in both (eg. Europe/Warsaw 1915 when they switched to CET).
# At this point, we just have to guess unless we allow more
# hints to be passed in (such as the UTC offset or abbreviation),
# but that is just getting silly.
#
# Choose the earliest (by UTC) applicable timezone if is_dst=True
# Choose the latest (by UTC) applicable timezone if is_dst=False
# i.e., behave like end-of-DST transition
dates = {} # utc -> local
for local_dt in filtered_possible_loc_dt:
utc_time = (
local_dt.replace(tzinfo=None) - local_dt.tzinfo._utcoffset)
assert utc_time not in dates
dates[utc_time] = local_dt
return dates[[min, max][not is_dst](dates)]
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> str(tz.utcoffset(ambiguous, is_dst=False))
'-1 day, 20:30:00'
>>> str(tz.utcoffset(ambiguous, is_dst=True))
'-1 day, 21:30:00'
>>> try:
... tz.utcoffset(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._utcoffset
else:
return self._utcoffset
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> str(tz.dst(normal))
'1:00:00'
>>> str(tz.dst(normal, is_dst=False))
'1:00:00'
>>> str(tz.dst(normal, is_dst=True))
'1:00:00'
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> str(tz.dst(ambiguous, is_dst=False))
'0:00:00'
>>> str(tz.dst(ambiguous, is_dst=True))
'1:00:00'
>>> try:
... tz.dst(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._dst
else:
return self._dst
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.tzname(normal)
'NDT'
>>> tz.tzname(normal, is_dst=False)
'NDT'
>>> tz.tzname(normal, is_dst=True)
'NDT'
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.tzname(ambiguous, is_dst=False)
'NST'
>>> tz.tzname(ambiguous, is_dst=True)
'NDT'
>>> try:
... tz.tzname(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return self.zone
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._tzname
else:
return self._tzname
def __repr__(self):
if self._dst:
dst = 'DST'
else:
dst = 'STD'
if self._utcoffset > _notime:
return '<DstTzInfo %r %s+%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
else:
return '<DstTzInfo %r %s%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (
self.zone,
_to_seconds(self._utcoffset),
_to_seconds(self._dst),
self._tzname
)
def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None):
"""Factory function for unpickling pytz tzinfo instances.
This is shared for both StaticTzInfo and DstTzInfo instances, because
database changes could cause a zones implementation to switch between
these two base classes and we can't break pickles on a pytz version
upgrade.
"""
# Raises a KeyError if zone no longer exists, which should never happen
# and would be a bug.
tz = pytz.timezone(zone)
# A StaticTzInfo - just return it
if utcoffset is None:
return tz
# This pickle was created from a DstTzInfo. We need to
# determine which of the list of tzinfo instances for this zone
# to use in order to restore the state of any datetime instances using
# it correctly.
utcoffset = memorized_timedelta(utcoffset)
dstoffset = memorized_timedelta(dstoffset)
try:
return tz._tzinfos[(utcoffset, dstoffset, tzname)]
except KeyError:
# The particular state requested in this timezone no longer exists.
# This indicates a corrupt pickle, or the timezone database has been
# corrected violently enough to make this particular
# (utcoffset,dstoffset) no longer exist in the zone, or the
# abbreviation has been changed.
pass
# See if we can find an entry differing only by tzname. Abbreviations
# get changed from the initial guess by the database maintainers to
# match reality when this information is discovered.
for localized_tz in tz._tzinfos.values():
if (localized_tz._utcoffset == utcoffset and
localized_tz._dst == dstoffset):
return localized_tz
# This (utcoffset, dstoffset) information has been removed from the
# zone. Add it back. This might occur when the database maintainers have
# corrected incorrect information. datetime instances using this
# incorrect information will continue to do so, exactly as they were
# before being pickled. This is purely an overly paranoid safety net - I
# doubt this will ever been needed in real life.
inf = (utcoffset, dstoffset, tzname)
tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos)
return tz._tzinfos[inf]
| [
"[email protected]"
]
| |
4833f5befa09e074405d298fb725b42817e4fc7d | d83fde3c891f44014f5339572dc72ebf62c38663 | /_bin/google-cloud-sdk/.install/.backup/lib/surface/emulators/bigtable/__init__.py | ccb697c875b01fa3825b38808c007f7b95f64c41 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | gyaresu/dotfiles | 047cc3ca70f4b405ba272856c69ee491a79d2ebe | e5e533b3a081b42e9492b228f308f6833b670cfe | refs/heads/master | 2022-11-24T01:12:49.435037 | 2022-11-01T16:58:13 | 2022-11-01T16:58:13 | 17,139,657 | 1 | 1 | null | 2020-07-25T14:11:43 | 2014-02-24T14:59:59 | Python | UTF-8 | Python | false | false | 1,581 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The gcloud bigtable emulator group."""
from __future__ import absolute_import
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.emulators import bigtable_util
from googlecloudsdk.command_lib.emulators import util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core.util import platforms
class UnsupportedPlatformError(exceptions.Error):
pass
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA)
class Bigtable(base.Group):
"""Manage your local Bigtable emulator.
This set of commands allows you to start and use a local Bigtable emulator.
"""
detailed_help = {
'EXAMPLES':
"""\
To start a local Bigtable emulator, run:
$ {command} start
""",
}
# Override
def Filter(self, context, args):
util.EnsureComponentIsInstalled(bigtable_util.BIGTABLE,
bigtable_util.BIGTABLE_TITLE)
| [
"[email protected]"
]
| |
986e0045bf977f4349d5a6d36279edec5d1f8e06 | e200208aa8fc42f48e7e0f3abaa0bea28f46eca1 | /main.py | 67668159fe0463b13633deec4f1d64a746dc5596 | []
| no_license | sahil150395/Higher-Lower-Game | 2580c7c1fe5f12ddcc6755db7a9905d259774977 | 3d2b5b5a329be637f743bd65ab0909458dd79f85 | refs/heads/master | 2023-07-19T04:04:29.132494 | 2021-09-02T04:55:28 | 2021-09-02T04:55:28 | 401,937,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | from game_data import data
import random
import art
from replit import clear
def format_data(account):
"""Format the account data in printable form"""
accountName = account["name"]
accountDesc = account["description"]
accountCountry = account["country"]
return f'{accountName}, a {accountDesc}, from {accountCountry}.'
def check_answer(guess, cFollower, aFollower):
"""Use if statement to check if user is correct and returns is they got it right"""
if cFollower > aFollower:
return guess == "a"
else:
return guess == "b"
print(art.logo)
score = 0
continueGame = True
against = random.choice(data)
while continueGame:
comparison = against
against = random.choice(data)
while comparison == against:
against = random.choice(data)
print(f"Compare A: {format_data(comparison)}")
print(art.vs)
print(f"Against B: {format_data(against)}")
guess = input("Who has more followers? Type 'A' or 'B': ").lower()
cFollower = comparison["follower_count"]
aFollower = against["follower_count"]
is_correct = check_answer(guess, cFollower, aFollower)
clear()
print(art.logo)
if is_correct:
score += 1
print(f"You're right! Current score: {score}.")
else:
continueGame = False
print(f"Sorry, that's wrong. Final score: {score}")
| [
"[email protected]"
]
| |
e6e15fb040527dcb46faee488520b93fbfbcb714 | 5b99825807822a7483c69a38feddf240543d1d1a | /CreateSendingProfiles.py | 17abbb6a80559588e8f70c78ebc89379f1981d41 | []
| no_license | lucjb/skillmatch | 3143d98dfab420d3966ba839dc365ef134939978 | e41e64bc805fa9dc7b32c298c55b4fd0964d5839 | refs/heads/master | 2021-06-26T07:02:57.000877 | 2017-09-11T20:40:17 | 2017-09-11T20:40:17 | 103,062,918 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,005 | py |
from email.parser import Parser
from email_reply_parser import EmailReplyParser
from os import listdir
from os.path import isfile, join
import sys
import numpy as np
import six
import operator
import io
import os
import talon
import re
import nltk
from talon import quotations
from talon.signature.bruteforce import extract_signature
from nltk.corpus import stopwords
talon.init()
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
root_path = '../skillmatch/maildir'
mentor_dirs = [join(root_path, f) for f in listdir(root_path) if not isfile(join(root_path, f))]
email2mentor = {}
for mentor_path in mentor_dirs:
folders = [join(mentor_path, f) for f in listdir(mentor_path) if not isfile(join(mentor_path, f))]
mentor_email_address=None
for folder in folders:
if folder.endswith('sent') or folder.endswith('sent_items') or folder.endswith('sent_mail'):
with open(join(folder, '1.'), "r") as f:
data = f.read()
email = Parser().parsestr(data)
mentor_email_address = email['from']
email2mentor[mentor_email_address] = mentor_path
c=0
mentor2text = {}
email_ids = set()
for mentor_path in mentor_dirs:
folders = [join(mentor_path, f) for f in listdir(mentor_path) if not isfile(join(mentor_path, f))]
for folder in folders:
mail_files = [join(folder, f) for f in listdir(folder) if isfile(join(folder, f))]
for mail_file in mail_files:
with open(mail_file, "r") as f:
data = f.read()
email = Parser().parsestr(data)
email_id = email['Message-ID']
if email_id in email_ids:
continue
email_ids.add(email_id)
sender = email['from']
if sender in email2mentor:
mentor = email2mentor[sender]
try:
mentor2text[mentor].append(email)
except KeyError:
mentor2text[mentor] = [email]
c+=1
if c % 1000==0:
print c
print len(email_ids)
word2vec = {}
embeddings = open('word_embeddings.csv')
_, dim = embeddings.next().split()
dim = int(dim)
for line in embeddings:
wordvec = line.split()
word = wordvec[0]
vec = np.array(map(float, wordvec[1:]))
word2vec[word]=vec
profiles_file = open('profiles.csv', 'w')
for mentor, emails in mentor2text.iteritems():
print mentor, len(emails)
mentor_vec = np.zeros(dim)
for email in emails:
pl = email.get_payload()
message = EmailReplyParser.parse_reply(pl)
message, signature = extract_signature(quotations.extract_from_plain(message))
message = message.lower()
message = re.sub(r'https?:\/\/.*[\r\n]*', '', message, flags=re.MULTILINE)
message = message.replace('=', ' ')
sentences = sent_detector.tokenize(message)
for s in sentences:
words = nltk.word_tokenize(s)
words = [word for word in words if word not in stopwords.words('english')]
for w in words:
if re.search('[a-z]', w):
try:
mentor_vec +=word2vec[w]
except KeyError:
pass
profiles_file.write(mentor.split('/')[-1] + ' ')
mentor_vec.tofile(profiles_file, sep=' ')
profiles_file.write('\n')
| [
"[email protected]"
]
| |
6b3a38302b6169e357bf72c55bb8031402624ea0 | 4d6a73019413ed4a9eae15277b780125fa4c3e60 | /보물상자비밀번호.py | e200285fd5665ed609c6192b99bb5d77f3143404 | []
| no_license | jungjy14/Algorithms | f72187efb9904dfac7c1f692424d6153fe046b43 | 678cbf3600f56ccf8ed7fa4be7b67a67dd2b946a | refs/heads/master | 2020-04-01T14:54:31.067918 | 2018-10-20T15:05:32 | 2018-10-20T15:05:32 | 153,313,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | T = int(input())
for i in range(T):
N, K = map(int, input().split())
num = input().strip()
line = int(N / 4)
sum = set()
for j in range(line):
for k in range(4):
sum.add(int(num[k * line:(k + 1) * line], 16))
num = num[-1] + num[:-1]
answer = list(sum)
answer.sort(reverse=True)
print('#{} {}'.format(i + 1, answer[K-1]))
| [
"[email protected]"
]
| |
ee2bcf56d3c065ddadaae8981da37e2e8bfca510 | 95f504c2ee61f09df786e7a0a403b9972f574af3 | /python/Practice/TraverseAllGraphertices.py | 09076dde5973b0e245993e57f4830a667201dd3b | []
| no_license | Sumedh31/algos | 16d2eee260718fedda276976b8e4f7ea5a6fe414 | f5e349ea26d95a1cb45ba019fb39e2d3dd9b69cf | refs/heads/master | 2023-08-11T18:42:59.609503 | 2021-09-22T04:33:24 | 2021-09-22T04:33:24 | 304,841,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | '''
Created on 11-Jul-2019
@author: Sumedh.Tambe
'''
from _collections import defaultdict
class graph():
def __init__(self):
self.graph=defaultdict(list)
def addEdge(self,vertice,edge):
self.graph[vertice].append(edge)
def DFSTraverse(self,vertice,visited):
visited[vertice]=True
print(vertice)
for i in self.graph[vertice]:
if(visited[i]==False):
self.DFSTraverse(i, visited)
def InitTraverse(self,vertice):
noOfVertices=len(self.graph)
visited=[False]*noOfVertices
# for i in range(noOfVertices):
# if(visited[i]==False):
self.DFSTraverse(vertice,visited)
if __name__ == '__main__':
g = graph()
g.addEdge(0, 1)
g.addEdge(0, 2)
g.addEdge(1, 2)
g.addEdge(2, 0)
g.addEdge(2, 3)
g.addEdge(3, 3)
#print "Following is Depth First Traversal"
g.InitTraverse() | [
"[email protected]"
]
| |
99e0dd19851d2e57e9d43a3cab22f452775b2b89 | f7bae15820d3615cd6dd3828f8196a064db955bf | /MPI/mpi_cbet.py | 8f1900a5660f40a13f068bb5091f1ed3d9f5f953 | []
| no_license | cepheid42/CBET_python | c3169fd8b8036553e2d034e282f8e76509fd59c9 | 0d92a39dd17ada66553801e825bff0d675f80c83 | refs/heads/master | 2022-07-16T20:00:12.363291 | 2020-05-11T17:25:38 | 2020-05-11T17:25:38 | 256,596,588 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,798 | py | from mpi4py import MPI
from mpi_constants import *
import mpi_launch_ray as lr
# from plotter import plot_everything
import numpy as np
from time import monotonic
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
if rank == 0:
start_time = monotonic()
x = np.zeros((nx, nz), dtype=np.float32, order='F')
z = np.zeros((nx, nz), dtype=np.float32, order='F')
dedendz = np.zeros((nx, nz), dtype=np.float32, order='F')
dedendx = np.zeros((nx, nz), dtype=np.float32, order='F')
machnum = np.zeros((nx, nz), dtype=np.float32, order='F')
eden = np.zeros((nx, nz), dtype=np.float32, order='F')
finalts = np.zeros((nrays, nbeams), dtype=np.int32, order='F')
mysaved_x = np.zeros((nt, nrays, nbeams), dtype=np.float32, order='F')
mysaved_z = np.zeros((nt, nrays, nbeams), dtype=np.float32, order='F')
edep = np.zeros((nx + 2, nz + 2, nbeams), dtype=np.float32, order='F')
marked = np.zeros((nx, nz, numstored, nbeams), dtype=np.int32, order='F')
crosses_x = np.zeros((nbeams, nrays, ncrossings), dtype=np.float32, order='F')
crosses_z = np.zeros((nbeams, nrays, ncrossings), dtype=np.float32, order='F')
boxes = np.zeros((nbeams, nrays, ncrossings, 2), dtype=np.int32, order='F')
present = np.zeros((nx, nz, nbeams), dtype=np.int32, order='F')
loc_edep = np.zeros((nx + 2, nz + 2), dtype=np.float32, order='F')
loc_marked = np.zeros((nx, nz, numstored), dtype=np.int32, order='F')
loc_crosses_x = np.zeros((nrays, ncrossings), dtype=np.float32, order='F')
loc_crosses_z = np.zeros((nrays, ncrossings), dtype=np.float32, order='F')
loc_boxes = np.zeros((nrays, ncrossings, 2), dtype=np.int32, order='F')
loc_present = np.zeros((nx, nz), dtype=np.int32, order='F')
loc_finalts = np.zeros(nrays, dtype=np.int32, order='F')
loc_savedx = np.zeros((nt, nrays), dtype=np.float32, order='F')
loc_savedz = np.zeros((nt, nrays), dtype=np.float32, order='F')
if rank == 0:
for zz in range(nz):
x[:, zz] = np.linspace(xmin, xmax, nx, dtype=np.float32)
for xx in range(nx):
z[xx, :] = np.linspace(zmin, zmax, nz, dtype=np.float32)
# print('More initialization...')
for xx in range(nx):
for zz in range(nz):
eden[xx, zz] = max(0.0, ((0.3 * ncrit - 0.1 * ncrit) / (xmax - xmin)) * (x[xx, zz] - xmin) + (0.1 * ncrit))
machnum[xx, zz] = max(0.0, (((-0.4) - (-2.4)) / (xmax - xmin)) * (x[xx, zz] - xmin)) + (-2.4)
for xx in range(nx - 1):
for zz in range(nz - 1):
dedendz[xx, zz] = (eden[xx, zz + 1] - eden[xx, zz]) / (z[xx, zz + 1] - z[xx, zz])
dedendx[xx, zz] = (eden[xx + 1, zz] - eden[xx, zz]) / (x[xx + 1, zz] - x[xx, zz])
dedendz[:, nz - 1] = dedendz[:, nz - 2] # sets last column equal to second to last column
dedendx[nx - 1, :] = dedendz[nx - 2, :] # sets last row equal to second to last row
# print('Setting initial conditions for ray tracker')
# print('nrays per beam is ', nrays)
comm.Bcast(x, root=0)
comm.Bcast(z, root=0)
comm.Bcast(eden, root=0)
comm.Bcast(machnum, root=0)
comm.Bcast(dedendx, root=0)
comm.Bcast(dedendz, root=0)
uray = np.ones(nt, dtype=np.float32, order='F')
x0 = np.zeros(nrays, dtype=np.float32, order='F')
z0 = np.zeros(nrays, dtype=np.float32, order='F')
kx0 = np.zeros(nrays, dtype=np.float32, order='F')
kz0 = np.zeros(nrays, dtype=np.float32, order='F')
uray_mult = intensity * courant_mult * rays_per_zone**-1.0
wpe = np.sqrt(eden * 1e6 * e_c ** 2.0 / (m_e * e_0))
# print("Tracking Rays...")
if rank == 0:
x0[:] = xmin - (dt / courant_mult * c * 0.5)
z0[:] = np.linspace(beam_min_z, beam_max_z, nrays, dtype=np.float32) + offset - (dz / 2) - (dt / courant_mult * c * 0.5)
kx0[:nrays] = np.float32(1.0)
kz0[:nrays] = np.float32(-0.1)
# print('BEAMNUM is ', rank + 1)
for n in range(nrays): # loop over rays
uray[0] = uray_mult * np.interp(z0[n], phase_x + offset, pow_x) # determines initial power weighting
dummy = lr.Ray_XZ(n, uray, loc_boxes, loc_marked, loc_present,
x, z, loc_crosses_x, loc_crosses_z, loc_edep, wpe, dedendx, dedendz,
x0[n], z0[n], kx0[n], kz0[n])
finalt = dummy.get_finalt()
rayx = dummy.get_rayx()
rayz = dummy.get_rayz()
loc_finalts[n] = finalt
loc_savedx[:finalt, n] = rayx
loc_savedz[:finalt, n] = rayz
# if n % 20 == 0:
# print(f' ...{int(100 * (1 - (n / nrays)))}% remaining...')
if rank == 1:
x0[:] = np.linspace(beam_min_z, beam_max_z, nrays, dtype=np.float32) - (dx / 2) - (dt / courant_mult * c * 0.5)
z0[:] = zmin - (dt / courant_mult * c * 0.5)
kx0[:nrays] = np.float32(0.0)
kz0[:nrays] = np.float32(1.0)
# print('BEAMNUM is ', rank + 1)
for n in range(nrays): # loop over rays
uray[0] = uray_mult * np.interp(x0[n], phase_x, pow_x) # determines initial power weighting
dummy = lr.Ray_XZ(n, uray, loc_boxes, loc_marked, loc_present,
x, z, loc_crosses_x, loc_crosses_z, loc_edep, wpe, dedendx, dedendz,
x0[n], z0[n], kx0[n], kz0[n])
finalt = dummy.get_finalt()
rayx = dummy.get_rayx()
rayz = dummy.get_rayz()
loc_finalts[n] = finalt
loc_savedx[:finalt, n] = rayx
loc_savedz[:finalt, n] = rayz
# if n % 20 == 0:
# print(f' ...{int(100 * (1 - (n / nrays)))}% remaining...')
if rank == 1:
comm.Send(loc_crosses_x, dest=0, tag=15)
comm.Send(loc_crosses_z, dest=0, tag=16)
comm.Send(loc_edep, dest=0, tag=17)
comm.Send(loc_marked, dest=0, tag=18)
comm.Send(loc_boxes, dest=0, tag=19)
comm.Send(loc_present, dest=0, tag=20)
comm.Send(loc_finalts, dest=0, tag=21)
comm.Send(loc_savedx, dest=0, tag=22)
comm.Send(loc_savedz, dest=0, tag=23)
if rank == 0:
temp_crossx = np.empty((nrays, ncrossings), dtype=np.float32, order='F')
temp_crossz = np.empty((nrays, ncrossings), dtype=np.float32, order='F')
temp_edep = np.empty((nx + 2, nz + 2), dtype=np.float32, order='F')
temp_marked = np.empty((nx, nz, numstored), dtype=np.int32, order='F')
temp_boxes = np.empty((nrays, ncrossings, 2), dtype=np.int32, order='F')
temp_present = np.empty((nx, nz), dtype=np.int32, order='F')
temp_finalts = np.empty(nrays, dtype=np.int32, order='F')
temp_savedx = np.empty((nt, nrays), dtype=np.float32, order='F')
temp_savedz = np.empty((nt, nrays), dtype=np.float32, order='F')
comm.Recv(temp_crossx, source=1, tag=15)
comm.Recv(temp_crossz, source=1, tag=16)
comm.Recv(temp_edep, source=1, tag=17)
comm.Recv(temp_marked, source=1, tag=18)
comm.Recv(temp_boxes, source=1, tag=19)
comm.Recv(temp_present, source=1, tag=20)
comm.Recv(temp_finalts, source=1, tag=21)
comm.Recv(temp_savedx, source=1, tag=22)
comm.Recv(temp_savedz, source=1, tag=23)
crosses_x[1, :, :] = temp_crossx
crosses_z[1, :, :] = temp_crossz
edep[:, :, 1] = temp_edep
marked[:, :, :, 1] = temp_marked
boxes[1, :, :, :] = temp_boxes
present[:, :, 1] = temp_present
finalts[:, 1] = temp_finalts
mysaved_x[:, :, 1] = temp_savedx
mysaved_z[:, :, 1] = temp_savedz
crosses_x[0, :, :] = loc_crosses_x
crosses_z[0, :, :] = loc_crosses_z
edep[:, :, 0] = loc_edep
marked[:, :, :, 0] = loc_marked
boxes[0, :, :, :] = loc_boxes
present[:, :, 0] = loc_present
finalts[:, 0] = loc_finalts
mysaved_x[:, :, 0] = loc_savedx
mysaved_z[:, :, 0] = loc_savedz
comm.Bcast(edep, root=0)
comm.Bcast(crosses_x, root=0)
comm.Bcast(crosses_z, root=0)
comm.Bcast(boxes, root=0)
comm.Bcast(present, root=0)
i_b1 = np.copy(edep[:nx, :nz, 0], order='F')
i_b2 = np.copy(edep[:nx, :nz, 1], order='F')
# if rank == 0:
# print("Finding ray intersections with rays from opposing beams.")
intersections = np.zeros((nx, nz), dtype=np.float32, order='F')
if rank == 0:
for xx in range(1, nx): # loops start from 1, the first zone
for zz in range(1, nz):
for ss in range(numstored):
if marked[xx, zz, ss, 0] == 0:
break
else:
# iray1 = marked[xx, zz, ss, 0]
for sss in range(numstored):
if marked[xx,zz, sss, 1] == 0:
break
else:
intersections[xx, zz] += 1.0
comm.Bcast(intersections, root=0)
# if rank == 0:
# print('Calculating CBET gains...')
dkx = crosses_x[:, :, 1:] - crosses_x[:, :, :-1]
dkz = crosses_z[:, :, 1:] - crosses_z[:, :, :-1]
dkmag = np.sqrt(dkx ** 2 + dkz ** 2)
u_flow = machnum * cs
W1 = np.sqrt(1 - eden / ncrit) / rays_per_zone
W2 = np.sqrt(1 - eden / ncrit) / rays_per_zone
W1_init = np.copy(W1, order='F')
W1_new = np.copy(W1_init, order='F')
W2_init = np.copy(W2, order='F')
W2_new = np.copy(W2_init, order='F')
for bb in range(nbeams - 1):
''' rr1 loop is done cyclicly by all processes
so 2 processes will calculate every other loop'''
for rr1 in range(rank, nrays, size):
for cc1 in range(ncrossings):
if boxes[bb, rr1, cc1, 0] == 0 or boxes[bb, rr1, cc1, 1] == 0:
break
ix = boxes[bb, rr1, cc1, 0]
iz = boxes[bb, rr1, cc1, 1]
if intersections[ix, iz] != 0:
nonzeros1 = marked[ix, iz, :, 0].nonzero()
numrays1 = np.count_nonzero(marked[ix, iz, :, 0])
nonzeros2 = marked[ix, iz, :, 1].nonzero()
numrays2 = np.count_nonzero(marked[ix, iz, :, 1])
marker1 = marked[ix, iz, nonzeros1, 0].flatten()
marker2 = marked[ix, iz, nonzeros2, 1].flatten()
rr2 = marker2
cc2 = marker2
for rrr in range(numrays1):
if marker1[rrr] == rr1:
ray1num = rrr
break
for n2 in range(numrays2):
for ccc in range(ncrossings):
ix2 = boxes[bb + 1, rr2[n2], ccc, 0]
iz2 = boxes[bb + 1, rr2[n2], ccc, 1]
if ix == ix2 and iz == iz2:
cc2[n2] = ccc
break
n2limit = int(min(present[ix, iz, 0], numrays2))
for n2 in range(n2limit):
ne = eden[ix, iz]
epsilon = 1.0 - ne / ncrit
kmag = (omega / c) * np.sqrt(epsilon) # magnitude of wavevector
kx1 = kmag * (dkx[bb, rr1, cc1] / (dkmag[bb, rr1, cc1] + 1.0e-10))
kx2 = kmag * (dkx[bb + 1, rr2[n2], cc2[n2]] / (dkmag[bb + 1, rr2[n2], cc2[n2]] + 1.0e-10))
kz1 = kmag * (dkz[bb, rr1, cc1] / (dkmag[bb, rr1, cc1] + 1.0e-10))
kz2 = kmag * (dkz[bb + 1, rr2[n2], cc2[n2]] / (dkmag[bb + 1, rr2[n2], cc2[n2]] + 1.0e-10))
kiaw = np.sqrt((kx2 - kx1) ** 2 + (kz2 - kz1) ** 2) # magnitude of the difference between the two vectors
ws = kiaw * cs # acoustic frequency, cs is a constant
omega1 = omega
omega2 = omega # laser frequency difference. zero to start
eta = ((omega2 - omega1) - (kx2 - kx1) * u_flow[ix, iz]) / (ws + 1.0e-10)
efield1 = np.sqrt(8.0 * np.pi * 1.0e7 * i_b1[ix, iz] / c) # initial electric field of ray
efield2 = np.sqrt(8.0 * np.pi * 1.0e7 * i_b2[ix, iz] / c) # initial electric field of ray
P = (iaw ** 2 * eta) / ((eta ** 2 - 1.0) ** 2 + iaw ** 2 * eta ** 2) # from Russ's paper
# gain1 = constant1 * efield2 ** 2 * (ne / ncrit) * (1 / iaw) * P # L^-1 from Russ's paper
gain2 = constant1 * efield1 ** 2 * (ne / ncrit) * (1 / iaw) * P # L^-1 from Russ's paper
if dkmag[bb + 1, rr2[n2], cc2[n2]] >= 1.0 * dx:
W2_new_ix_iz = W2[ix, iz] * np.exp(-1 * W1[ix, iz] * dkmag[bb + 1, rr2[n2], cc2[n2]] * gain2 / np.sqrt(epsilon))
W1_new_ix_iz = W1[ix, iz] * np.exp(1 * W2[ix, iz] * dkmag[bb, rr1, cc1] * gain2 / np.sqrt(epsilon))
if rank != 0:
comm.send(ix, dest=0, tag=10)
comm.send(iz, dest=0, tag=11)
comm.send(W2_new_ix_iz, dest=0, tag=12)
comm.send(W1_new_ix_iz, dest=0, tag=13)
else:
W2_new[ix, iz] = W2_new_ix_iz
W1_new[ix, iz] = W1_new_ix_iz
for r in range(1, size):
other_ix = comm.recv(source=r, tag=10)
other_iz = comm.recv(source=r, tag=11)
W2_new[other_ix, other_iz] = comm.recv(source=r, tag=12)
W1_new[other_ix, other_iz] = comm.recv(source=r, tag=13)
# if rank == 0 and rr1 % 20 == 0:
# print(f' ...{int(100 * (1 - (rr1 / nrays)))}% remaining...')
comm.Bcast(W1_new, root=0)
comm.Bcast(W2_new, root=0)
# print("Updating intensities due to CBET gains...")
i_b1_new = np.copy(i_b1, order='F')
i_b2_new = np.copy(i_b2, order='F')
for bb in range(nbeams - 1):
''' rr1 loop is done cyclicly by all processes
so 2 processes will calculate every other loop'''
for rr1 in range(rank, nrays, size):
for cc1 in range(ncrossings):
if boxes[bb, rr1, cc1, 0] == 0 or boxes[bb, rr1, cc1, 1] == 0:
break
ix = boxes[bb, rr1, cc1, 0]
iz = boxes[bb, rr1, cc1, 1]
if intersections[ix, iz] != 0:
nonzeros1 = marked[ix, iz, :, 0].nonzero()
numrays1 = np.count_nonzero(marked[ix, iz, :, 0])
nonzeros2 = marked[ix, iz, :, 1].nonzero()
numrays2 = np.count_nonzero(marked[ix, iz, :, 1])
marker1 = marked[ix, iz, nonzeros1, 0].flatten()
marker2 = marked[ix, iz, nonzeros2, 1].flatten()
rr2 = marker2
cc2 = marker2
for rrr in range(numrays1):
if marker1[rrr] == rr1:
ray1num = rrr
break
for n2 in range(numrays2):
for ccc in range(ncrossings):
ix2 = boxes[bb + 1, rr2[n2], ccc, 0]
iz2 = boxes[bb + 1, rr2[n2], ccc, 1]
if ix == ix2 and iz == iz2:
cc2[n2] = ccc
break
fractional_change_1 = -1.0 * (1.0 - (W1_new[ix, iz] / W1_init[ix, iz])) * i_b1[ix, iz]
fractional_change_2 = -1.0 * (1.0 - (W2_new[ix, iz] / W2_init[ix, iz])) * i_b2[ix, iz]
if rank != 0:
comm.send(ix, dest=0, tag=10)
comm.send(iz, dest=0, tag=11)
comm.send(fractional_change_1, dest=0, tag=12)
comm.send(fractional_change_2, dest=0, tag=13)
else:
i_b1_new[ix, iz] += fractional_change_1
i_b2_new[ix, iz] += fractional_change_2
for r in range(1, size):
other_ix = comm.recv(source=r, tag=10)
other_iz = comm.recv(source=r, tag=11)
i_b1_new[other_ix, other_iz] += comm.recv(source=r, tag=12)
i_b2_new[other_ix, other_iz] += comm.recv(source=r, tag=13)
x_prev_1 = x[ix, iz]
z_prev_1 = z[ix, iz]
x_prev_2 = x[ix, iz]
z_prev_2 = z[ix, iz]
# Now we need to find and increment/decrement the fractional_change for the rest of the beam 1 ray
for ccc in range(cc1 + 1, ncrossings):
ix_next_1 = boxes[0, rr1, ccc, 0]
iz_next_1 = boxes[0, rr1, ccc, 1]
x_curr_1 = x[ix_next_1, iz_next_1]
z_curr_1 = z[ix_next_1, iz_next_1]
if ix_next_1 == 0 or iz_next_1 == 0:
break
else:
# Avoid double deposition if the (x,z) location doesn't change with incremented crossing number
if x_curr_1 != x_prev_1 or z_curr_1 != z_prev_1:
new_val = fractional_change_1 * (present[ix, iz, 0] / present[ix_next_1, iz_next_1, 0])
if rank != 0:
comm.send(ix_next_1, dest=0, tag=10)
comm.send(iz_next_1, dest=0, tag=11)
comm.send(new_val, dest=0, tag=12)
else:
i_b1_new[ix_next_1, iz_next_1] += new_val
for r in range(1, size):
other_ix = comm.recv(source=r, tag=10)
other_iz = comm.recv(source=r, tag=11)
other_new_val = comm.recv(source=r, tag=12)
i_b1_new[other_ix, other_iz] += other_new_val
x_prev_1 = x_curr_1
z_prev_1 = z_curr_1
n2 = min(ray1num, numrays2)
for ccc in range(cc2[n2] + 1, ncrossings):
ix_next_2 = boxes[1, rr2[n2], ccc, 0]
iz_next_2 = boxes[1, rr2[n2], ccc, 1]
x_curr_2 = x[ix_next_2, iz_next_2]
z_curr_2 = z[ix_next_2, iz_next_2]
if ix_next_2 == 0 or iz_next_2 == 0:
break
else:
if x_curr_2 != x_prev_2 or z_curr_2 != z_prev_2:
new_val = fractional_change_2 * (present[ix, iz, 0] / present[ix_next_2, iz_next_2, 1])
if rank != 0:
comm.send(ix_next_2, dest=0, tag=10)
comm.send(iz_next_2, dest=0, tag=11)
comm.send(new_val, dest=0, tag=12)
else:
i_b2_new[ix_next_2, iz_next_2] += new_val
for r in range(1, size):
other_ix = comm.recv(source=r, tag=10)
other_iz = comm.recv(source=r, tag=11)
other_new_val = comm.recv(source=r, tag=12)
i_b2_new[other_ix, other_iz] += other_new_val
x_prev_2 = x_curr_2
z_prev_2 = z_curr_2
# if rank == 0 and rr1 % 20 == 0:
# print(f' ...{int(100 * (1 - (rr1 / nrays)))}% remaining...')
if rank == 0:
intensity_sum = np.sum(edep[:nx, :nz, :], axis=2)
variable1 = 8.53e-10 * np.sqrt(i_b1 + i_b2 + 1.0e-10) * (1.053 / 3.0)
i_b1_new[i_b1_new < 1.0e-10] = 1.0e-10
i_b2_new[i_b2_new < 1.0e-10] = 1.0e-10
a0_variable = 8.53e-10 * np.sqrt(i_b1_new + i_b2_new + 1.0e-10) * (1.053 / 3.0)
# plot_everything(z, x, eden, mysaved_x, mysaved_z, finalts, intensity_sum, variable1, a0_variable)
'''==================== TIMER REPORTS ============================================================='''
print("FINISHED! Reporting ray timings now...")
print('___________________________________________________________________')
print(f'Total time: {monotonic() - start_time}')
| [
"[email protected]"
]
| |
7b9fa5a24352753ce2cea24c1f44d6212d0ee88f | b588151eff85d9dec096e65980d0eec9ff353973 | /course_gather/viewsets.py | 1f6b20541ba7298d4110d20bd5e364fdf69094b4 | []
| no_license | Blu3spirits/MTU-Transfer-Course-Gatherer | 86d668cc61c2ff5109970c3ea7c427deacab6a5f | b4a419492feb6bfb507d63b849eb0b8846b233d1 | refs/heads/master | 2021-05-22T00:40:42.211029 | 2020-05-03T17:45:49 | 2020-05-03T17:45:49 | 219,623,368 | 1 | 0 | null | 2019-11-05T00:32:56 | 2019-11-05T00:32:56 | null | UTF-8 | Python | false | false | 1,644 | py | from course_gather.models import (
College,
Course,
MTUCourse,
State,
)
from course_gather.serializers import (
CollegeSerializer,
CourseSerializer,
MTUCourseSerializer,
StateSerializer
)
from course_gather.filters import (
CollegeFilter,
CourseFilter,
MTUCourseFilter,
StateFilter
)
from rest_framework import viewsets
from django_filters import rest_framework as filters
class StateViewSet(viewsets.ModelViewSet):
serializer_class = StateSerializer
queryset = State.objects.all()
filterset_class = StateFilter
filter_backends = (filters.DjangoFilterBackend,)
class CollegeViewSet(viewsets.ModelViewSet):
serializer_class = CollegeSerializer
queryset = College.objects.all()
filterset_class = CollegeFilter
filter_backends = (filters.DjangoFilterBackend,)
class CourseViewSet(viewsets.ModelViewSet):
serializer_class = CourseSerializer
queryset = Course.objects.all().select_related(
'mtu_equiv',
'transfer_course_college_code',
'transfer_course_college_code',
'transfer_course_state_code',
'transfer_course_state_code')
filterset_class = CourseFilter
filter_backends = (filters.DjangoFilterBackend,)
class MTUCourseViewSet(viewsets.ModelViewSet):
serializer_class = MTUCourseSerializer
queryset = MTUCourse.objects.all()
filterset_class = MTUCourseFilter
filter_backends = (filters.DjangoFilterBackend,)
| [
"[email protected]"
]
| |
90d236ea0df30a603b84446a3b97662b75e3c27f | 6c1d75591aeece9902b201ae1a902eee2de93725 | /CS 1064/Code Comprehension Lab/mystery-hard1.py | 0e7fb5bb4f90d86f96f05b0870387bb7f364ba1c | []
| no_license | pahuja-gor/Python-Lectures | 85a8630bb1a707daf98b621dd105c5163155efc0 | 2d16dba4b39703c255a4059318da6866028595bb | refs/heads/master | 2022-11-27T03:45:42.254849 | 2020-08-01T23:59:09 | 2020-08-01T23:59:09 | 256,677,210 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | # TODO:
# 1. Write documentation for the function -- what does it do, what is the input meaning and type, what is the output meaning and type
# 2. Make any changes needed for readability -- update variable names, update function names,
# add extra documentation within the function if necessary
# 2.1. The logical structure of the code does not need to be changed; only the variable/function names and the documentation
def list_equals_array(x, y):
'''Checks to see whether the values in the list equal the values in the array.
In other words, the function checks to see if the list and array contain the same values.
Arguments:
x: List
y: Array
Returns:
bool : Returns whether the list is equal to the array (True) or not (False)'''
for a, b in x:
assert (a in y)
assert (b == y[a])
# Don't edit anything below this point; these are the test cases
l = [
['cow', 'moo'],
['duck', 'quack'],
['sheep', 'baa'],
['cat', 'meow'],
['dog', 'bark']
]
d = {
'cow': 'moo',
'duck': 'quack',
'sheep': 'baa',
'cat': 'meow',
'dog': 'bark'
}
list_equals_array(l, d)
print('Tests passed')
| [
"[email protected]"
]
| |
5242e0cfc2df3a36ba6e3483685cda8f090d40a4 | 5b166c31744ee792d9c1ca1b3c14cfb5e0e4f591 | /examples/01_integer_var2.py | 3acede0107edca3a2547e13decbed726f88e985e | []
| no_license | DoOR-Team/ortools_examples | 8a286f8b824bc5a6183eaa3d6f1fcf8283170557 | 2a4fb54de931ca83a26d8afe2fc8358dfc2fa7a6 | refs/heads/master | 2022-11-27T04:14:25.426232 | 2020-08-07T02:45:27 | 2020-08-07T02:45:27 | 284,651,170 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,193 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ortools.sat.python import cp_model
# 鸡兔同笼问题
def RabbitsAndPheasantsSat():
model = cp_model.CpModel()
r = model.NewIntVar(0, 100, 'r')
p = model.NewIntVar(0, 100, 'p')
# 20 heads.
model.Add(r + p == 20)
# 56 legs.
model.Add(4 * r + 2 * p == 56)
# Solves and prints out the solution.
solver = cp_model.CpSolver()
status = solver.Solve(model)
if status == cp_model.FEASIBLE:
print('%i rabbits and %i pheasants' % (solver.Value(r),
solver.Value(p)))
# 有物不知其数,三三数之剩二,五五数之剩三,七七数之剩二。问物几何?
def NumberMod():
# x = k1 * 3 + 2
# x = k2 * 5 + 3
# x = k3 * 7 + 2
model = cp_model.CpModel()
k1 = model.NewIntVar(0, 5000, 'k1')
k2 = model.NewIntVar(0, 5000, 'k2')
k3 = model.NewIntVar(0, 5000, 'k3')
x = model.NewIntVar(0, 5000, 'x')
model.Add(k1 * 3 + 2 == x)
model.Add(k2 * 5 + 3 == x)
model.Add(k3 * 7 + 2 == x)
# model.Minimize(x)
model.Maximize(x)
# Solves and prints out the solution.
solver = cp_model.CpSolver()
status = solver.Solve(model)
print('max x = %d' % (solver.Value(x)))
# 有物不知其数,三三数之剩二,五五数之剩三,七七数之剩二。问物几何?
def NumberMod2():
model = cp_model.CpModel()
x = model.NewIntVar(0, 5000, 'x')
xmod3 = model.NewIntVar(0, 1000, 'xmod3')
xmod5 = model.NewIntVar(0, 1000, 'xmod5')
xmod7 = model.NewIntVar(0, 1000, 'xmod7')
model.AddModuloEquality(xmod3, x, 3)
model.AddModuloEquality(xmod5, x, 5)
model.AddModuloEquality(xmod7, x, 7)
model.Add(xmod3 == 2)
model.Add(xmod5 == 3)
model.Add(xmod7 == 2)
# model.Minimize(x)
model.Maximize(x)
# Solves and prints out the solution.
solver = cp_model.CpSolver()
status = solver.Solve(model)
print('max x = %d' % (solver.Value(x)))
# RabbitsAndPheasantsSat()
NumberMod()
NumberMod2()
| [
"[email protected]"
]
| |
b718a949f2f549d464c3f8126f3a669decc1f4f0 | 47654f8eb4402b39d8c1b541a93e9518796ed9f5 | /example/test_ctp2.py | 9f332a7e9c8123fa9ee393666ce951957d2873dc | []
| no_license | eelxpeng/pyLTM | d588aeec77959d71146aa343a7ec07bf6ca64edc | f30ddcdb443646ce830ab6dfc34c92c4c16db24e | refs/heads/master | 2020-03-13T20:31:45.977958 | 2018-09-18T03:18:59 | 2018-09-18T03:18:59 | 131,276,442 | 3 | 2 | null | 2018-04-28T04:02:29 | 2018-04-27T09:35:26 | Python | UTF-8 | Python | false | false | 1,041 | py | '''
Created on 14 Feb 2018
@author: Bryan
'''
import sys
sys.path.append("..")
from pyltm.model import Gltm
from pyltm.model import DiscreteVariable, SingularContinuousVariable
from pyltm.reasoner import NaturalCliqueTreePropagation, Evidence
from pyltm.io import BifParser
from pyltm.data import ContinuousDatacase
import numpy as np
if __name__ == '__main__':
modelfile = "glass.bif"
varNames = ["RI","Na","Mg","Al","Si","K","Ca","Ba","Fe"]
data = [1.51793,12.79,3.5,1.12,73.03,0.64,8.77,0,0]
# modelfile = "continuoustoy.bif"
# varNames = ["x"]
# data = [0]
bifparser = BifParser()
net = bifparser.parse(modelfile)
print(net)
# set up evidence
datacase = ContinuousDatacase.create(varNames)
datacase.synchronize(net)
datacase.putValues(data)
evidence = datacase.getEvidence()
ctp = NaturalCliqueTreePropagation(net)
print(ctp._tree)
ctp.use(evidence)
ctp.propagate()
loglikelihood = ctp.loglikelihood
print("Loglikelihood: ", loglikelihood) | [
"[email protected]"
]
| |
f30d969d62567611dd3d15370564683a645439e0 | 4544f04da8dd6faf602af706e523f4929941ba41 | /tests/test_orders.py | bdd3bba77db83483c9ff9e41fa4dcf0da87443e1 | [
"MIT"
]
| permissive | hpsilva/oanda-api-v20 | 78febf98384945ded213f517ed1bf00d696e5dcb | a8d82dff91787bde08a4d8733283a62ecd0ef7f3 | refs/heads/master | 2021-01-12T11:43:45.763974 | 2016-10-22T16:34:46 | 2016-10-22T16:34:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,625 | py | import sys
import unittest
import json
from . import unittestsetup
from .unittestsetup import environment as environment
import requests_mock
try:
from nose_parameterized import parameterized
except:
print("*** Please install 'nose_parameterized' to run these tests ***")
exit(0)
import oandapyV20
from oandapyV20 import API
from oandapyV20.exceptions import V20Error
import oandapyV20.endpoints.orders as orders
from oandapyV20.endpoints.orders import responses
access_token = None
accountID = None
account_cur = None
api = None
class TestOrders(unittest.TestCase):
"""Tests regarding the orders endpoints."""
def setUp(self):
"""setup for all tests."""
global access_token
global accountID
global account_cur
global api
# self.maxDiff = None
try:
accountID, account_cur, access_token = unittestsetup.auth()
setattr(sys.modules["oandapyV20.oandapyV20"],
"TRADING_ENVIRONMENTS",
{"practice": {
"stream": "https://test.com",
"api": "https://test.com",
}})
api = API(environment=environment,
access_token=access_token,
headers={"Content-Type": "application/json"})
api.api_url = 'https://test.com'
except Exception as e:
print("%s" % e)
exit(0)
def test__orders_base_exception(self):
"""test for the exception when using the baseclass."""
with self.assertRaises(TypeError) as bcErr:
r = orders.Orders(accountID)
bcErr = bcErr.exception
self.assertTrue("Use of abstract base class" in "{}".format(bcErr))
@requests_mock.Mocker()
def test__orders_list(self, mock_get):
"""get the orders information for an account."""
uri = 'https://test.com/v3/accounts/{}/orders'.format(accountID)
resp = responses["_v3_accounts_accountID_orders"]['response']
text = json.dumps(resp)
mock_get.register_uri('GET',
uri,
text=text)
r = orders.OrderList(accountID)
result = api.request(r)
self.assertTrue(len(result['orders']) == 1 and
result['orders'][0]['instrument'] == "EUR_USD")
@requests_mock.Mocker()
def test__order_replace(self, mock_get):
"""test replacing an order."""
orderID = "2125"
# to replace with
tmp = {"order": {
"units": "-50000",
"type": "LIMIT",
"instrument": "EUR_USD",
"price": "1.25",
}
}
uri = 'https://test.com/v3/accounts/{}/orders/{}'.format(accountID,
orderID)
resp = responses["_v3_accounts_accountID_order_replace"]['response']
text = json.dumps(resp)
r = orders.OrderReplace(accountID, orderID, data=tmp)
mock_get.register_uri('PUT',
uri,
text=text,
status_code=r._expected_status)
result = api.request(r)
self.assertTrue(len(result['orders']) == 1 and
result['orders'][0]['units'] == tmp["order"]["units"])
@requests_mock.Mocker()
def test__order_replace_wrong_status_exception(self, mock_get):
"""test replacing an order with success but wrong status_code."""
orderID = "2125"
# to replace with
tmp = {"order": {
"units": "-50000",
"type": "LIMIT",
"instrument": "EUR_USD",
"price": "1.25",
}
}
uri = 'https://test.com/v3/accounts/{}/orders/{}'.format(accountID,
orderID)
resp = responses["_v3_accounts_accountID_order_replace"]['response']
text = json.dumps(resp)
r = orders.OrderReplace(accountID, orderID, data=tmp)
# force the wrong status code
mock_get.register_uri('PUT',
uri,
text=text,
status_code=200)
with self.assertRaises(ValueError) as err:
result = api.request(r)
self.assertTrue("200" in "{}".format(err.exception) and
r.status_code is None)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
]
| |
dc26c0c524546f6abd875e3e66f4199c9995bb59 | 5e8dd0f82de8777250fa7e3e64d9f77f8a66b37d | /flaggy/doppio/api/responses.py | 433790723c4c0512ac031efb7d9a3fc35fb0871c | []
| no_license | mihirpandya/flaggy-server | d461bbee75d2ab9c6c419c2f4fdc1a3299118ab2 | 30570df5d724ceca29a64daf4f98164ee9653404 | refs/heads/master | 2020-05-30T18:23:15.358268 | 2014-12-28T04:11:59 | 2014-12-28T04:11:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | ## SUCCESS/ERROR RESPONSES ##
def success(msg):
return {'status': 'success', 'msg': msg}
def error(msg):
return {'status': 'error', 'msg': msg}
def is_Success(obj):
return (obj['status'] == "success")
def is_Error(obj):
return (obj['status'] == "error")
def get_Msg(obj):
return obj['msg']
| [
"[email protected]"
]
| |
0d7ecae510ab892edb50401c8c1b872ed79e8e91 | 48015ac20ec07ff1bf8a1989321efe180376773e | /hello.ppy.py | f8c151a7e1b05bad4ff010ca50d6a5892b42776f | []
| no_license | hmangal1/IntroProgramming-Labs | c9f821a9dfc1e1a9a2bf31ee362acd4c2cbb151e | 72fe115393be76ab287a6be9ce90d8af9733ee73 | refs/heads/master | 2021-01-21T13:29:37.014796 | 2017-12-01T17:10:49 | 2017-12-01T17:10:49 | 102,128,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | # Introduction to Programming
# Author: Herrick Mangal
# Date: 9/1/17
def main():
print("Hello, instructor!")
print("Good-bye!")
main()
| [
"[email protected]"
]
| |
ac84c45542a5d9969ce52a966acd81916c428d2b | fddcc8c32861626ca84c1dd501729e6c73f67c68 | /tests/functional/test_customer_debit.py | a080814bcac335566cf53d0d1e9c11bf1e6adb4f | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | Vantiv/vantiv-sdk-for-python | 773baf42b2a54bf447481f0c4228f153f8012c29 | 52f1cbf90e9b19ba817ec19818d46276059195f0 | refs/heads/12.x | 2023-08-08T13:41:39.197289 | 2023-07-27T13:21:51 | 2023-07-27T13:21:51 | 85,575,691 | 5 | 17 | MIT | 2023-07-27T13:21:52 | 2017-03-20T12:44:52 | Python | UTF-8 | Python | false | false | 2,395 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2017 Vantiv eCommerce
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the 'Software'), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import os
import sys
import unittest
package_root = os.path.dirname(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
sys.path.insert(0, package_root)
from vantivsdk import *
conf = utils.Configuration()
# vendorCredit transaction is also getting tested in 'tests/functional/test_funding_instruction.py'
class TestCustomerDebit(unittest.TestCase):
def test_customer_debit(self):
transaction = fields.customerDebit()
transaction.id = 'ThisIsID'
transaction.reportGroup = 'Default Report Group'
transaction.fundingCustomerId = "customerId"
transaction.customerName = "temp1200"
transaction.fundsTransferId = "value for fundsTransferId"
transaction.amount = 1512
account_info = fields.echeckTypeCtx()
account_info.accType = 'Savings'
account_info.accNum = "1234"
account_info.routingNum = "12345678"
transaction.accountInfo = account_info
transaction.customIdentifier = '123'
response = online.request(transaction, conf)
self.assertEquals('000', response['customerDebitResponse']['response'])
self.assertEquals('sandbox', response['customerDebitResponse']['location'])
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
54f8110f7ff5bffedaa4751f92a398e1a582e99a | 4282a63e4800be65a39dedb0b05864525912ae19 | /codegen/architectures/arm/operands.py | 732409f9c187a634ee8a4eb438933edb83234037 | [
"BSD-3-Clause"
]
| permissive | peterwauligmann/PSpaMM | 2e652ed57425c665cf0b0254a44bc5090d2d6537 | 344c06c183854f72224c1e88ad2ced2e092d4efb | refs/heads/master | 2020-03-24T07:44:05.958222 | 2019-07-16T15:34:33 | 2019-07-16T15:34:33 | 142,573,247 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | from codegen.operands import *
class Operand_ARM:
@property
def ugly(self):
raise NotImplementedError()
# TODO: Rename this 'Immediate'
class Constant_ARM(Constant):
@property
def ugly(self):
return "#{}".format(self.value)
def c(n):
"""Sugar for conveniently defining integer constants"""
return Constant_ARM(value=int(n))
class Label_ARM(Label):
@property
def ugly(self):
#return self.ordinal
return self.value.upper() + "_%="
def l(label: str):
return Label_ARM(label)
class Register_ARM(Register):
@property
def ugly(self):
return self.value
@property
def clobbered(self):
return (self.value.split(".")[0]).replace("x", "r")
@property
def ugly_scalar(self):
return (self.value.split(".")[0]).replace("v", "q")
@property
def ugly_scalar_1d(self):
return (self.value.split(".")[0]).replace("v", "d")
@property
def ugly_1d(self):
return self.value.replace("2d", "1d")
r = lambda n: Register_ARM(AsmType.i64, "x"+str(n))
xzr = Register_ARM(AsmType.i64, "xzr")
v = lambda n: Register_ARM(AsmType.f64x8, "v"+str(n) + ".2d")
class MemoryAddress_ARM(MemoryAddress):
@property
def ugly(self):
return "[{}, {}]".format(self.base.ugly,self.disp)
def mem(base, offset):
return MemoryAddress_ARM(base, offset)
| [
"[email protected]"
]
| |
f666eb4210fdf3708d5784fe65261d8bbd9b2510 | d1742451b25705fc128acc245524659628ab3e7d | /Codeforces with Python/F - Annoying Present.py | c7e3cd5fbc6ac5c66e92713582ac07328200ab12 | []
| no_license | Shovon588/Programming | ebab793a3c97aedddfcad5ea06e7e22f5c54a86e | e4922c9138998358eed09a1be7598f9b060c685f | refs/heads/master | 2022-12-23T18:29:10.141117 | 2020-10-04T17:29:32 | 2020-10-04T17:29:32 | 256,915,133 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 139 | py | n,m=map(int,input().split())
result=0
for i in range(m):
x,d=map(int,input().split())
result+=(n*x)+d*(n-1)*(n/2)
print(result/n)
| [
"[email protected]"
]
| |
7a0506344152b79bf3e9b1671a36b2cdb326497b | f5d40767fa1fd0a090b98a43015cbb65628ed0fd | /basic_password/basic_password/urls.py | 4ffb8e738b4055ebd9152f4ac31501308fac1424 | []
| no_license | Akashshere19/social | a5fda2b51cfc3c500f526b41b7601831c4188ca4 | 75d78fa6b9b7b8e6b09d25e20aa663c36a912d93 | refs/heads/main | 2023-08-27T02:22:49.025973 | 2021-11-06T14:44:30 | 2021-11-06T14:44:30 | 425,266,915 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | """basic_password URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from password_app import views
urlpatterns = [
path('',views.index,name='index'),
path('admin/', admin.site.urls),
path('password_app/',include('password_app.urls')),
path('logout/',views.user_logout,name='logout'),
path('special/',views.special,name='special')
]
| [
"[email protected]"
]
| |
8b6c4f8c0db588496506a3f861ba6e7cd27ca2aa | 70b03133176e54db8856881569d33fa969159a77 | /users/api/serializers.py | da92e6afe209503848813fed3488393631d7b17a | []
| no_license | BrenoOsvaldoFunicheli/naveapi | 504102d3fe79acd866daca08094b895336018710 | 7929991eeb84c64cb3fefdf76775ce7d92c57734 | refs/heads/master | 2022-12-03T18:02:32.036049 | 2020-08-17T20:55:28 | 2020-08-17T20:55:28 | 279,436,135 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | from django.contrib.auth.models import User, Group
from rest_framework import serializers
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ['username', 'email']
| [
"[email protected]"
]
| |
f26251d0d88cabf18cbd783821f5e00d3ef4eac8 | 5c7cfebc992873adce8eff71f24a632555106deb | /challenges/python/bicalc.py | 2f52a3d54e4f745fbcb0fa9984e7760337c72066 | []
| no_license | bledidalipaj/codefights | bf103d7a05a16a436b68264b4610acd8e8b76bfe | e52ebf22b7eb018375adef7b4738a5b7eee0355d | refs/heads/master | 2021-01-09T20:42:01.659494 | 2019-03-05T20:02:15 | 2019-03-05T20:02:15 | 62,514,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,039 | py | """
You have a calculator called BiCalc, which is very strange, as it can only perform two operations: multiply by 2, denoted as x2, and subtract
by 1, denoted -1. You have a lot of time on your hands, so write a program that will help you determine the minimum number of button presses
required to get from the start value to the end value.
Other important information
The calculator can only display numbers up to 999999999 = 109 - 1, so if at any time the current value of the number becomes greater than
999999999, the calculator will display "Error".
It is guaranteed that there is no need to use negative numbers to solve this challenge.
Examples
For start = 1 and end = 16, the output should be
BiCalc(start, end) = 4.
You can multiply 1 by 2 four times to get 16 (i.e. press x2 4 times).
For start = 8 and end = 2, the output should be
BiCalc(start, end) = 6.
You can subtract 1 from 8 six times to get 2 (i.e. press -1 6 times).
Input/Output
[time limit] 4000ms (js)
[input] integer start
The starting number that is displayed on the calculator.
Constraints:
1 ≤ start ≤ 109 - 1.
[input] integer end
The number that you wish to achieve with as few button presses as possible.
Constraints:
1 ≤ end ≤ 109 - 1.
[output] integer
The minimum number of button presses required to get from start to end or return -1 (the integer, not the button) if it isn't possible.
# Challenge's link: https://codefights.com/challenge/4qomB9ThTehgbra72/main #
"""
def BiCalc(start, end):
result = 0
while start != end:
if end >= 10**9:
return -1
if start >= end:
result += start - end
end = start
elif end % 2 == 1:
end += 1
result += 1
else:
end /= 2
result += 1
return result
def BiCalc(start, end):
res = 0
while end > start:
if end % 2 != 0:
end += 1
else:
end /= 2
if end >= 10**9: return -1
res += 1
return res + s - e | [
"[email protected]"
]
| |
96f8bb2b0b7cac97ab2c14732709d42e18eac054 | df2f413970cb95bbfc294ae15df7237ae28d985a | /twd/rango/migrations/0003_category_slug.py | c9ad37ed41b977205f5bd152573db8f8e7d1f7f9 | []
| no_license | mcgeorgiev/tango_with_django | 7033598ae5fe79c5b603bdaf88aef0262a06c075 | 6fa447349150a760fbc18f9af232363ea3b59952 | refs/heads/master | 2021-01-12T00:26:13.219718 | 2017-02-24T19:49:43 | 2017-02-24T19:49:43 | 78,726,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-26 13:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rango', '0002_auto_20170120_1724'),
]
operations = [
migrations.AddField(
model_name='category',
name='slug',
field=models.SlugField(default=''),
preserve_default=False,
),
]
| [
"[email protected]"
]
| |
098fb4e8dbae6bbc9a3221ab06d0021fd72218f8 | f426daf0544bf3edc39c16c62d344f3d7ef96306 | /migrations/versions/6dd034695410_initialmigration.py | ae25d3023f5717add5ed9050bfeae0f4d0f70537 | []
| no_license | jesskarl/BOLG-python-webapp | 92d1fced5019772e2790eea4a10698c2127e2d63 | ebac4fdd3e543c9f4725f4d7962c56df7919fac8 | refs/heads/master | 2020-03-18T21:43:23.054520 | 2018-05-29T15:29:57 | 2018-05-29T15:29:57 | 135,298,330 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | """initialmigration
Revision ID: 6dd034695410
Revises: f0a8edad350a
Create Date: 2017-05-08 21:35:32.065000
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6dd034695410'
down_revision = 'f0a8edad350a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('posts', sa.Column('body_html', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('posts', 'body_html')
# ### end Alembic commands ###
| [
"[email protected]"
]
| |
39ee15326027b428b34b878729f3109081d99b0e | cbc409dfc0e065c970d3451af1b726b7b0a9f8da | /lab3/lab3_4.py | aeaa6eaa7f88eb3709087a1a1eda6f681780bbcd | []
| no_license | Tsirtsey/Python | 91d843542cfedd719e5891999a125cf46221d3a1 | 05214bd5da7866c8a37902885c5ad2a3f60016b8 | refs/heads/master | 2021-04-17T11:29:19.465996 | 2020-05-16T16:40:44 | 2020-05-16T16:40:44 | 249,441,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,810 | py | """
Напишите простой класс StringFormatter для форматирования строк со следующим функционалом:
– удаление всех слов из строки, длина которых меньше n букв;
– замена всех цифр в строке на знак «*»;
– вставка по одному пробелу между всеми символами в строке;
– сортировка слов по размеру;
– сортировка слов в лексикографическом порядке.
"""
import re
class StringFormatter(object):
separators = [' ']
@staticmethod
def _get_words(line):
return re.split('|'.join(StringFormatter.separators), line)
@staticmethod
def clean(line, n):
words = StringFormatter._get_words(line)
new_string_parts = [word for word in words if len(word) > n]
return ' '.join(new_string_parts)
@staticmethod
def hide_figures(line):
for num in '0123456789':
line = line.replace(num, '*')
return line
@staticmethod
def add_spases(line):
return line.replace('', ' ')
@staticmethod
def sort_by_size(line):
words = StringFormatter._get_words(line)
return ' '.join(sorted(words, key=lambda word: len(word)))
@staticmethod
def sort_by_lec(line):
words = StringFormatter._get_words(line)
return ' '.join(sorted(words))
if __name__ == '__main__':
s = 'gdsdf 44312 ggdfg 54ggg gasdfasdasweer 553jg 4566oqr6'
print(StringFormatter.clean(s, 4))
print(StringFormatter.hide_figures(s))
print(StringFormatter.add_spases(s))
print(StringFormatter.sort_by_size(s))
print(StringFormatter.sort_by_lec(s))
| [
"[email protected]"
]
| |
6dae55ebac1694f37ebb2d93a70f91307a4c212f | ebfad103f30d6eb7b8d53af4bbe59717a0aefb77 | /yield.py | cb1edbac00f56088e9ca9a0977b4e57a8d138344 | []
| no_license | barucAlmaguer/py_learning | 190832d139f6ee6f03027c10af8ab2400b9b8690 | da51c713703fea787fa40dce78a85ce4469226a7 | refs/heads/master | 2021-05-15T16:38:03.315295 | 2017-10-19T14:38:49 | 2017-10-19T14:38:49 | 107,469,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | def yielding(txt) :
for char in txt :
yield char.upper()
def squaring(num) :
for i in range(1, num) :
yield i * i
| [
"[email protected]"
]
| |
4cf3156f5bda4a6d3722d100f183d2c04c3177ba | 951690846b6560ebb551f47151cedf4660dd907e | /Project/settings.py | 003eb4863bf69c2f2c4749a3d1fd7f03d1507a08 | []
| no_license | Django-Nawaz/Project | 7cf597d96407701434b110dd545c4f63583d9eef | 2225de4f32399fb4952c6bcd8f08707a09c866aa | refs/heads/master | 2020-06-11T08:10:03.055939 | 2016-12-06T07:53:21 | 2016-12-06T07:53:21 | 75,726,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,568 | py | """
Django settings for Project project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sut$pt!#cr6_&7!^mx@*-pc$jlb+lteft=g=0&%dzkw9ib^$(e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'App',
'smart_selects',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
BOOTSTRAP_ADMIN_SIDEBAR_MENU = False
WSGI_APPLICATION = 'Project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'questup',
# 'USER': 'postgres',
# 'PASSWORD': 'nawaz1996',
# 'HOST': 'localhost',
# 'PORT': '8005',
# }
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'questup',
'USER': 'nawaz',
'PASSWORD': 'nawaz1996',
'HOST': 'quest-up-rds.cgjqg9blzvea.us-west-2.rds.amazonaws.com',
'HOST': 'localhost',
'PORT': '8005',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
]
| |
24b69f5344e10e8e91a8f19fbc7f9af8668d6232 | 7d78dbc0b34c92aa6040b291016a1c2a2f0811b5 | /sms_notification_drc/models/sms_template.py | e165af1ce5cd0ac46416f2611583289bbcff257a | []
| no_license | odoo258/apps | c623b0e4b3dc54b1545eafedb1a10a32fc0bc81a | 4b2c97677ebb0c6c838d2c5ed2e2c05bb57ed842 | refs/heads/master | 2020-04-08T01:37:40.269249 | 2018-02-21T04:23:39 | 2018-02-21T04:23:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | # -*- coding: utf-8 -*-
from odoo import fields, models
class TemplateSms(models.Model):
_name = "template.sms"
_rec_name = 'template_name'
template_name = fields.Char(string="Name", required=True)
template_global = fields.Boolean(string="Global")
template_condition = fields.Selection([('order_place', 'Order Place'), ('order_confirm', 'Order Confirm'), (
'order_delivered', 'Order Delivered')])
template_model_id = fields.Many2one(
'ir.model', string="Applies to", domain=[('model', '=', 'sale.order')])
template_auto_delete = fields.Boolean(string="Auto Delete")
content = fields.Text()
| [
"[email protected]"
]
| |
04aef65bbb2620a26219bf0e742b8b4cfbc302c8 | cf8e0179cca7be3b40a3362eb5cbd4d17c2f0e28 | /targ_pos_socket/scripts/send_target_socket.py | 73f3c5d961a2104d10229f9f7cb3845546cea3bc | []
| no_license | robertoalbanese/GP_App | 35c187b2a9417d97ab575e033ae176f1def6ffd7 | cf3f4d8c50029ed332bdf34ee9251e64b73b90cc | refs/heads/main | 2023-03-30T13:25:13.046050 | 2021-04-01T13:11:35 | 2021-04-01T13:11:35 | 351,187,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,860 | py | #!/usr/bin/env python
# -*- coding: iso-8859-15 -*-
import os
import sys
import socket
import json
import time
# ROS
import rospy
from geometry_msgs.msg import Point
class send_target_socket():
FORMAT = "utf-8"
DISCONNECT_MESSAGE = "!DISCONNECT"
def __init__(self, ip_server, port):
rospy.init_node('send_target_socket')
self.PORT = port
self.SERVER =ip_server # IP del server
self.ADDR = (self.SERVER, self.PORT)
# mettere try
path = os.path.dirname(os.path.abspath(__file__)) + '/target.JSON'
print(path)
""" with open('./target.JSON','wb') as j:
self.data = json.load(j) """
self.data = json.load(open(path))
print("just read")
self.sub = rospy.Subscriber("/D_Control_point", Point, self.send_target_to_app, queue_size=1)
def send_msg(self, msg, client):
message = msg.encode(self.FORMAT) # codificarlo in binario per la trasmissione
client.send(message) # mando msg
# print(client.recv(2048).decode(FORMAT))# decodifico risposta e la printo
client.close()
def send_target_to_app(self, target):
self.data["target_x"] = target.x
self.data["target_y"] = target.y
msg = json.dumps(self.data)
print("before sendig")
client = socket.socket(
socket.AF_INET, socket.SOCK_STREAM) # creo il client
client.connect(self.ADDR) # indirizzo del server a cui devo connettermi
print("Sanding the jason msg...")
self.send_msg(msg, client)
time.sleep(1)
if __name__ == "__main__":
ip_server = "130.251.13.144"
port = 8080
client = send_target_socket(ip_server, port)
try:
rospy.spin()
except KeyboardInterrupt:
print ("Shutting down ROS Image feature detector module")
| [
"[email protected]"
]
| |
a4a1af6dd0b77de844752031489160a61c9b5bf1 | a3a98c9b882835fdf1f89008911879406e1785a0 | /sort.py | 3aecf4a9f159075d3a728fd9d9ec23ada85008b1 | [
"Apache-2.0"
]
| permissive | currysesame/sort_method | 69df4647f88093e937a99b49accbce85c9476511 | e603ee4048232c3f4d03f58577d0a20922007fc6 | refs/heads/master | 2022-11-25T11:59:37.704260 | 2020-08-08T07:36:30 | 2020-08-08T07:36:30 | 280,795,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py |
#insert sort
import numpy as np
arr1 = np.array([233,646,242,6,24,7,234,-323,66,-24,23,2,18,122,233,34,11])
print(arr1)
def switch(array_, x, y):
if(x < (len(array_)) and y < (len(array_))):
buffer1 = array_[x]
array_[x] = array_[y]
array_[y] = buffer1
return array_
for i in range(len(arr1) -1):
for j in range(i+1):
if(arr1[i-j+1] < arr1[i-j]):
arr1 = switch(arr1, i-j+1, i-j)
else:
break
print(arr1)
| [
"[email protected]"
]
| |
196e71e3cfeddd2e4c3953c160c4ad91cf327a90 | 1e45ec7a39b66478a766b753445af8fea4b0045d | /reddwarf/guestagent/manager/mysql.py | 2e733072e8dfd315e553fcf45af8aa4c7d13d6ea | [
"Apache-2.0"
]
| permissive | mmcdaris/reddwarf | 54d703e3cf81654d4800d68943898347a53bdd51 | c6b2d78e82f8de781c6750e0132e84f9438a0ced | refs/heads/master | 2021-01-15T22:41:54.663997 | 2013-05-10T19:39:19 | 2013-05-10T19:39:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,466 | py | import os
import re
import time
import uuid
from datetime import date
from sqlalchemy import create_engine
from sqlalchemy import exc
from sqlalchemy import interfaces
from sqlalchemy.sql.expression import text
from reddwarf import db
from reddwarf.common.exception import ProcessExecutionError
from reddwarf.common import cfg
from reddwarf.common import utils
from reddwarf.guestagent import dbaas
from reddwarf.guestagent import query
from reddwarf.guestagent.db import models
from reddwarf.guestagent import pkg
from reddwarf.guestagent import volume
from reddwarf.instance import models as rd_models
from reddwarf.openstack.common import log as logging
from reddwarf.openstack.common.gettextutils import _
from reddwarf.openstack.common import periodic_task
from reddwarf.instance import models as rd_models
LOG = logging.getLogger(__name__)
MYSQL_BASE_DIR = "/var/lib/mysql"
class Manager(periodic_task.PeriodicTasks):
@periodic_task.periodic_task(ticks_between_runs=10)
def update_status(self, context):
"""Update the status of the MySQL service"""
MySqlAppStatus.get().update()
def change_passwords(self, context, users):
return MySqlAdmin().change_passwords(users)
def create_database(self, context, databases):
return MySqlAdmin().create_database(databases)
def create_user(self, context, users):
MySqlAdmin().create_user(users)
def delete_database(self, context, database):
return MySqlAdmin().delete_database(database)
def delete_user(self, context, user):
MySqlAdmin().delete_user(user)
def get_user(self, context, username, hostname):
return MySqlAdmin().get_user(username, hostname)
def grant_access(self, context, username, hostname, databases):
return MySqlAdmin().grant_access(username, hostname, databases)
def revoke_access(self, context, username, hostname, database):
return MySqlAdmin().revoke_access(username, hostname, database)
def list_access(self, context, username, hostname):
return MySqlAdmin().list_access(username, hostname)
def list_databases(self, context, limit=None, marker=None,
include_marker=False):
return MySqlAdmin().list_databases(limit, marker,
include_marker)
def list_users(self, context, limit=None, marker=None,
include_marker=False):
return MySqlAdmin().list_users(limit, marker,
include_marker)
def enable_root(self, context):
return MySqlAdmin().enable_root()
def is_root_enabled(self, context):
return MySqlAdmin().is_root_enabled()
def prepare(self, context, databases, memory_mb, users, device_path=None,
mount_point=None):
"""Makes ready DBAAS on a Guest container."""
MySqlAppStatus.get().begin_mysql_install()
# status end_mysql_install set with secure()
app = MySqlApp(MySqlAppStatus.get())
restart_mysql = False
if device_path:
device = volume.VolumeDevice(device_path)
device.format()
#if a /var/lib/mysql folder exists, back it up.
if os.path.exists(MYSQL_BASE_DIR):
#stop and do not update database
app.stop_db()
restart_mysql = True
#rsync exiting data
device.migrate_data(MYSQL_BASE_DIR)
#mount the volume
device.mount(mount_point)
LOG.debug(_("Mounted the volume."))
#check mysql was installed and stopped
if restart_mysql:
app.start_mysql()
app.install_if_needed()
LOG.info("Securing mysql now.")
app.secure(memory_mb)
self.create_database(context, databases)
self.create_user(context, users)
LOG.info('"prepare" call has finished.')
def restart(self, context):
app = MySqlApp(MySqlAppStatus.get())
app.restart()
def start_db_with_conf_changes(self, context, updated_memory_size):
app = MySqlApp(MySqlAppStatus.get())
app.start_db_with_conf_changes(updated_memory_size)
def stop_db(self, context, do_not_start_on_reboot=False):
app = MySqlApp(MySqlAppStatus.get())
app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot)
def get_filesystem_stats(self, context, fs_path):
""" Gets the filesystem stats for the path given """
return dbaas.Interrogator().get_filesystem_volume_stats(fs_path)
ADMIN_USER_NAME = "os_admin"
FLUSH = text(query.FLUSH)
ENGINE = None
MYSQLD_ARGS = None
PREPARING = False
UUID = False
ORIG_MYCNF = "/etc/mysql/my.cnf"
FINAL_MYCNF = "/var/lib/mysql/my.cnf"
TMP_MYCNF = "/tmp/my.cnf.tmp"
DBAAS_MYCNF = "/etc/dbaas/my.cnf/my.cnf.%dM"
MYSQL_BASE_DIR = "/var/lib/mysql"
CONF = cfg.CONF
INCLUDE_MARKER_OPERATORS = {
True: ">=",
False: ">"
}
def generate_random_password():
return str(uuid.uuid4())
def get_auth_password():
pwd, err = utils.execute_with_timeout(
"sudo",
"awk",
"/password\\t=/{print $3; exit}",
"/etc/mysql/my.cnf")
if err:
LOG.error(err)
raise RuntimeError("Problem reading my.cnf! : %s" % err)
return pwd.strip()
def get_engine():
"""Create the default engine with the updated admin user"""
#TODO(rnirmal):Based on permissions issues being resolved we may revert
#url = URL(drivername='mysql', host='localhost',
# query={'read_default_file': '/etc/mysql/my.cnf'})
global ENGINE
if ENGINE:
return ENGINE
#ENGINE = create_engine(name_or_url=url)
pwd = get_auth_password()
ENGINE = create_engine("mysql://%s:%s@localhost:3306" %
(ADMIN_USER_NAME, pwd.strip()),
pool_recycle=7200, echo=True,
listeners=[KeepAliveConnection()])
return ENGINE
def load_mysqld_options():
try:
out, err = utils.execute("/usr/sbin/mysqld", "--print-defaults",
run_as_root=True)
arglist = re.split("\n", out)[1].split()
args = {}
for item in arglist:
if "=" in item:
key, value = item.split("=")
args[key.lstrip("--")] = value
else:
args[item.lstrip("--")] = None
return args
except ProcessExecutionError as e:
return None
class MySqlAppStatus(object):
"""
Answers the question "what is the status of the MySQL application on
this box?" The answer can be that the application is not installed, or
the state of the application is determined by calling a series of
commands.
This class also handles saving and load the status of the MySQL application
in the database.
The status is updated whenever the update() method is called, except
if the state is changed to building or restart mode using the
"begin_mysql_install" and "begin_mysql_restart" methods.
The building mode persists in the database while restarting mode does
not (so if there is a Python Pete crash update() will set the status to
show a failure).
These modes are exited and functionality to update() returns when
end_install_or_restart() is called, at which point the status again
reflects the actual status of the MySQL app.
"""
_instance = None
def __init__(self):
if self._instance is not None:
raise RuntimeError("Cannot instantiate twice.")
self.status = self._load_status()
self.restart_mode = False
def begin_mysql_install(self):
"""Called right before MySQL is prepared."""
self.set_status(rd_models.ServiceStatuses.BUILDING)
def begin_mysql_restart(self):
"""Called before restarting MySQL."""
self.restart_mode = True
def end_install_or_restart(self):
"""Called after MySQL is installed or restarted.
Updates the database with the actual MySQL status.
"""
LOG.info("Ending install_if_needed or restart.")
self.restart_mode = False
real_status = self._get_actual_db_status()
LOG.info("Updating status to %s" % real_status)
self.set_status(real_status)
@classmethod
def get(cls):
if not cls._instance:
cls._instance = MySqlAppStatus()
return cls._instance
def _get_actual_db_status(self):
global MYSQLD_ARGS
try:
out, err = utils.execute_with_timeout(
"/usr/bin/mysqladmin",
"ping", run_as_root=True)
LOG.info("Service Status is RUNNING.")
return rd_models.ServiceStatuses.RUNNING
except ProcessExecutionError as e:
LOG.error("Process execution ")
try:
out, err = utils.execute_with_timeout("/bin/ps", "-C",
"mysqld", "h")
pid = out.split()[0]
# TODO(rnirmal): Need to create new statuses for instances
# where the mysql service is up, but unresponsive
LOG.info("Service Status is BLOCKED.")
return rd_models.ServiceStatuses.BLOCKED
except ProcessExecutionError as e:
if not MYSQLD_ARGS:
MYSQLD_ARGS = load_mysqld_options()
pid_file = MYSQLD_ARGS.get('pid_file',
'/var/run/mysqld/mysqld.pid')
if os.path.exists(pid_file):
LOG.info("Service Status is CRASHED.")
return rd_models.ServiceStatuses.CRASHED
else:
LOG.info("Service Status is SHUTDOWN.")
return rd_models.ServiceStatuses.SHUTDOWN
@property
def is_mysql_installed(self):
"""
True if MySQL app should be installed and attempts to ascertain
its status won't result in nonsense.
"""
return (self.status is not None and
self.status != rd_models.ServiceStatuses.BUILDING and
self.status != rd_models.ServiceStatuses.FAILED)
@property
def _is_mysql_restarting(self):
return self.restart_mode
@property
def is_mysql_running(self):
"""True if MySQL is running."""
return (self.status is not None and
self.status == rd_models.ServiceStatuses.RUNNING)
@staticmethod
def _load_status():
"""Loads the status from the database."""
id = CONF.guest_id
return rd_models.InstanceServiceStatus.find_by(instance_id=id)
def set_status(self, status):
"""Changes the status of the MySQL app in the database."""
db_status = self._load_status()
db_status.set_status(status)
db_status.save()
self.status = status
def update(self):
"""Find and report status of MySQL on this machine.
The database is update and the status is also returned.
"""
if self.is_mysql_installed and not self._is_mysql_restarting:
LOG.info("Determining status of MySQL app...")
status = self._get_actual_db_status()
self.set_status(status)
else:
LOG.info("MySQL is not installed or is in restart mode, so for "
"now we'll skip determining the status of MySQL on this "
"box.")
def wait_for_real_status_to_change_to(self, status, max_time,
update_db=False):
"""
Waits the given time for the real status to change to the one
specified. Does not update the publicly viewable status Unless
"update_db" is True.
"""
WAIT_TIME = 3
waited_time = 0
while(waited_time < max_time):
time.sleep(WAIT_TIME)
waited_time += WAIT_TIME
LOG.info("Waiting for MySQL status to change to %s..." % status)
actual_status = self._get_actual_db_status()
LOG.info("MySQL status was %s after %d seconds."
% (actual_status, waited_time))
if actual_status == status:
if update_db:
self.set_status(actual_status)
return True
LOG.error("Time out while waiting for MySQL app status to change!")
return False
class LocalSqlClient(object):
"""A sqlalchemy wrapper to manage transactions"""
def __init__(self, engine, use_flush=True):
self.engine = engine
self.use_flush = use_flush
def __enter__(self):
self.conn = self.engine.connect()
self.trans = self.conn.begin()
return self.conn
def __exit__(self, type, value, traceback):
if self.trans:
if type is not None: # An error occurred
self.trans.rollback()
else:
if self.use_flush:
self.conn.execute(FLUSH)
self.trans.commit()
self.conn.close()
def execute(self, t, **kwargs):
try:
return self.conn.execute(t, kwargs)
except:
self.trans.rollback()
self.trans = None
raise
class MySqlAdmin(object):
"""Handles administrative tasks on the MySQL database."""
def _associate_dbs(self, user):
"""Internal. Given a MySQLUser, populate its databases attribute."""
LOG.debug("Associating dbs to user %s at %s" % (user.name, user.host))
with LocalSqlClient(get_engine()) as client:
q = query.Query()
q.columns = ["grantee", "table_schema"]
q.tables = ["information_schema.SCHEMA_PRIVILEGES"]
q.group = ["grantee", "table_schema"]
q.where = ["privilege_type != 'USAGE'"]
t = text(str(q))
db_result = client.execute(t)
for db in db_result:
LOG.debug("\t db: %s" % db)
if db['grantee'] == "'%s'@'%s'" % (user.name, user.host):
mysql_db = models.MySQLDatabase()
mysql_db.name = db['table_schema']
user.databases.append(mysql_db.serialize())
def change_passwords(self, users):
"""Change the passwords of one or more existing users."""
LOG.debug("Changing the password of some users.""")
LOG.debug("Users is %s" % users)
with LocalSqlClient(get_engine()) as client:
for item in users:
LOG.debug("\tUser: %s" % item)
user_dict = {'_name': item['name'],
'_host': item['host'],
'_password': item['password'],
}
user = models.MySQLUser()
user.deserialize(user_dict)
LOG.debug("\tDeserialized: %s" % user.__dict__)
uu = query.UpdateUser(user.name, host=user.host,
clear=user.password)
t = text(str(uu))
client.execute(t)
def create_database(self, databases):
"""Create the list of specified databases"""
with LocalSqlClient(get_engine()) as client:
for item in databases:
mydb = models.MySQLDatabase()
mydb.deserialize(item)
cd = query.CreateDatabase(mydb.name,
mydb.character_set,
mydb.collate)
t = text(str(cd))
client.execute(t)
def create_user(self, users):
"""Create users and grant them privileges for the
specified databases"""
with LocalSqlClient(get_engine()) as client:
for item in users:
user = models.MySQLUser()
user.deserialize(item)
# TODO(cp16net):Should users be allowed to create users
# 'os_admin' or 'debian-sys-maint'
g = query.Grant(user=user.name, host=user.host,
clear=user.password)
t = text(str(g))
client.execute(t)
for database in user.databases:
mydb = models.MySQLDatabase()
mydb.deserialize(database)
g = query.Grant(permissions='ALL', database=mydb.name,
user=user.name, host=user.host,
clear=user.password)
t = text(str(g))
client.execute(t)
def delete_database(self, database):
"""Delete the specified database"""
with LocalSqlClient(get_engine()) as client:
mydb = models.MySQLDatabase()
mydb.deserialize(database)
dd = query.DropDatabase(mydb.name)
t = text(str(dd))
client.execute(t)
def delete_user(self, user):
"""Delete the specified users"""
with LocalSqlClient(get_engine()) as client:
mysql_user = models.MySQLUser()
mysql_user.deserialize(user)
du = query.DropUser(mysql_user.name, host=mysql_user.host)
t = text(str(du))
client.execute(t)
def enable_root(self):
"""Enable the root user global access and/or reset the root password"""
user = models.MySQLUser()
user.name = "root"
user.host = "%"
user.password = generate_random_password()
with LocalSqlClient(get_engine()) as client:
try:
cu = query.CreateUser(user.name, host=user.host)
t = text(str(cu))
client.execute(t, **cu.keyArgs)
except exc.OperationalError as err:
# Ignore, user is already created, just reset the password
# TODO(rnirmal): More fine grained error checking later on
LOG.debug(err)
with LocalSqlClient(get_engine()) as client:
uu = query.UpdateUser(user.name, host=user.host,
clear=user.password)
t = text(str(uu))
client.execute(t)
LOG.debug("CONF.root_grant: %s CONF.root_grant_option: %s" %
(CONF.root_grant, CONF.root_grant_option))
g = query.Grant(permissions=CONF.root_grant,
user=user.name,
host=user.host,
grant_option=CONF.root_grant_option,
clear=user.password)
t = text(str(g))
client.execute(t)
return user.serialize()
def get_user(self, username, hostname):
user = self._get_user(username, hostname)
if not user:
return None
return user.serialize()
def _get_user(self, username, hostname):
"""Return a single user matching the criteria"""
user = models.MySQLUser()
try:
user.name = username # Could possibly throw a BadRequest here.
except Exception.ValueError as ve:
raise exception.BadRequest("Username %s is not valid: %s"
% (username, ve.message))
with LocalSqlClient(get_engine()) as client:
q = query.Query()
q.columns = ['User', 'Host', 'Password']
q.tables = ['mysql.user']
q.where = ["Host != 'localhost'",
"User = '%s'" % username,
"Host = '%s'" % hostname,
]
q.order = ['User', 'Host']
t = text(str(q))
result = client.execute(t).fetchall()
LOG.debug("Result: %s" % result)
if len(result) != 1:
return None
found_user = result[0]
user.password = found_user['Password']
user.host = found_user['Host']
self._associate_dbs(user)
return user
def grant_access(self, username, hostname, databases):
"""Give a user permission to use a given database."""
user = self._get_user(username, hostname)
with LocalSqlClient(get_engine()) as client:
for database in databases:
g = query.Grant(permissions='ALL', database=database,
user=user.name, host=user.host,
hashed=user.password)
t = text(str(g))
client.execute(t)
def is_root_enabled(self):
"""Return True if root access is enabled; False otherwise."""
with LocalSqlClient(get_engine()) as client:
t = text(query.ROOT_ENABLED)
result = client.execute(t)
LOG.debug("result = " + str(result))
return result.rowcount != 0
def list_databases(self, limit=None, marker=None, include_marker=False):
"""List databases the user created on this mysql instance"""
LOG.debug(_("---Listing Databases---"))
databases = []
with LocalSqlClient(get_engine()) as client:
# If you have an external volume mounted at /var/lib/mysql
# the lost+found directory will show up in mysql as a database
# which will create errors if you try to do any database ops
# on it. So we remove it here if it exists.
q = query.Query()
q.columns = [
'schema_name as name',
'default_character_set_name as charset',
'default_collation_name as collation',
]
q.tables = ['information_schema.schemata']
q.where = ["schema_name NOT IN ("
"'mysql', 'information_schema', "
"'lost+found', '#mysql50#lost+found'"
")"]
q.order = ['schema_name ASC']
if limit:
q.limit = limit + 1
if marker:
q.where.append("schema_name %s '%s'" %
(INCLUDE_MARKER_OPERATORS[include_marker],
marker))
t = text(str(q))
database_names = client.execute(t)
next_marker = None
LOG.debug(_("database_names = %r") % database_names)
for count, database in enumerate(database_names):
if count >= limit:
break
LOG.debug(_("database = %s ") % str(database))
mysql_db = models.MySQLDatabase()
mysql_db.name = database[0]
next_marker = mysql_db.name
mysql_db.character_set = database[1]
mysql_db.collate = database[2]
databases.append(mysql_db.serialize())
LOG.debug(_("databases = ") + str(databases))
if database_names.rowcount <= limit:
next_marker = None
return databases, next_marker
def list_users(self, limit=None, marker=None, include_marker=False):
"""List users that have access to the database"""
'''
SELECT
User,
Host,
Marker
FROM
(SELECT
User,
Host,
CONCAT(User, '@', Host) as Marker
FROM mysql.user
ORDER BY 1, 2) as innerquery
WHERE
Marker > :marker
ORDER BY
Marker
LIMIT :limit;
'''
LOG.debug(_("---Listing Users---"))
users = []
with LocalSqlClient(get_engine()) as client:
mysql_user = models.MySQLUser()
iq = query.Query() # Inner query.
iq.columns = ['User', 'Host', "CONCAT(User, '@', Host) as Marker"]
iq.tables = ['mysql.user']
iq.order = ['User', 'Host']
innerquery = str(iq).rstrip(';')
oq = query.Query() # Outer query.
oq.columns = ['User', 'Host', 'Marker']
oq.tables = ['(%s) as innerquery' % innerquery]
oq.where = ["Host != 'localhost'"]
oq.order = ['Marker']
if marker:
oq.where.append("Marker %s '%s'" %
(INCLUDE_MARKER_OPERATORS[include_marker],
marker))
if limit:
oq.limit = limit + 1
t = text(str(oq))
result = client.execute(t)
next_marker = None
LOG.debug("result = " + str(result))
for count, row in enumerate(result):
if count >= limit:
break
LOG.debug("user = " + str(row))
mysql_user = models.MySQLUser()
mysql_user.name = row['User']
mysql_user.host = row['Host']
self._associate_dbs(mysql_user)
next_marker = row['Marker']
users.append(mysql_user.serialize())
if result.rowcount <= limit:
next_marker = None
LOG.debug("users = " + str(users))
return users, next_marker
def revoke_access(self, username, hostname, database):
"""Give a user permission to use a given database."""
user = self._get_user(username, hostname)
with LocalSqlClient(get_engine()) as client:
r = query.Revoke(database=database, user=user.name, host=user.host,
hashed=user.password)
t = text(str(r))
client.execute(t)
def list_access(self, username, hostname):
"""Show all the databases to which the user has more than
USAGE granted."""
user = self._get_user(username, hostname)
return user.databases
class KeepAliveConnection(interfaces.PoolListener):
"""
A connection pool listener that ensures live connections are returned
from the connecction pool at checkout. This alleviates the problem of
MySQL connections timeing out.
"""
def checkout(self, dbapi_con, con_record, con_proxy):
"""Event triggered when a connection is checked out from the pool"""
try:
try:
dbapi_con.ping(False)
except TypeError:
dbapi_con.ping()
except dbapi_con.OperationalError, ex:
if ex.args[0] in (2006, 2013, 2014, 2045, 2055):
raise exc.DisconnectionError()
else:
raise
class MySqlApp(object):
"""Prepares DBaaS on a Guest container."""
TIME_OUT = 1000
MYSQL_PACKAGE_VERSION = CONF.mysql_pkg
def __init__(self, status):
""" By default login with root no password for initial setup. """
self.state_change_wait_time = CONF.state_change_wait_time
self.status = status
def _create_admin_user(self, client, password):
"""
Create a os_admin user with a random password
with all privileges similar to the root user
"""
localhost = "localhost"
cu = query.CreateUser(ADMIN_USER_NAME, host=localhost)
t = text(str(cu))
client.execute(t, **cu.keyArgs)
uu = query.UpdateUser(ADMIN_USER_NAME, host=localhost, clear=password)
t = text(str(uu))
client.execute(t)
g = query.Grant(permissions='ALL', user=ADMIN_USER_NAME,
host=localhost, grant_option=True, clear=password)
t = text(str(g))
client.execute(t)
@staticmethod
def _generate_root_password(client):
""" Generate and set a random root password and forget about it. """
localhost = "localhost"
uu = query.UpdateUser("root", host=localhost,
clear=generate_random_password())
t = text(str(uu))
client.execute(t)
def install_if_needed(self):
"""Prepare the guest machine with a secure mysql server installation"""
LOG.info(_("Preparing Guest as MySQL Server"))
if not self.is_installed():
self._install_mysql()
LOG.info(_("Dbaas install_if_needed complete"))
def secure(self, memory_mb):
LOG.info(_("Generating root password..."))
admin_password = generate_random_password()
engine = create_engine("mysql://root:@localhost:3306", echo=True)
with LocalSqlClient(engine) as client:
self._generate_root_password(client)
self._remove_anonymous_user(client)
self._remove_remote_root_access(client)
self._create_admin_user(client, admin_password)
self.stop_db()
self._write_mycnf(memory_mb, admin_password)
self.start_mysql()
self.status.end_install_or_restart()
LOG.info(_("Dbaas secure complete."))
def _install_mysql(self):
"""Install mysql server. The current version is 5.5"""
LOG.debug(_("Installing mysql server"))
pkg.pkg_install(self.MYSQL_PACKAGE_VERSION, self.TIME_OUT)
LOG.debug(_("Finished installing mysql server"))
#TODO(rnirmal): Add checks to make sure the package got installed
def _enable_mysql_on_boot(self):
'''
There is a difference between the init.d mechanism and the upstart
The stock mysql uses the upstart mechanism, therefore, there is a
mysql.conf file responsible for the job. to toggle enable/disable
on boot one needs to modify this file. Percona uses the init.d
mechanism and there is no mysql.conf file. Instead, the update-rc.d
command needs to be used to modify the /etc/rc#.d/[S/K]##mysql links
'''
LOG.info("Enabling mysql on boot.")
conf = "/etc/init/mysql.conf"
if os.path.isfile(conf):
command = "sudo sed -i '/^manual$/d' %(conf)s"
command = command % locals()
else:
command = "sudo update-rc.d mysql enable"
utils.execute_with_timeout(command, with_shell=True)
def _disable_mysql_on_boot(self):
'''
There is a difference between the init.d mechanism and the upstart
The stock mysql uses the upstart mechanism, therefore, there is a
mysql.conf file responsible for the job. to toggle enable/disable
on boot one needs to modify this file. Percona uses the init.d
mechanism and there is no mysql.conf file. Instead, the update-rc.d
command needs to be used to modify the /etc/rc#.d/[S/K]##mysql links
'''
LOG.info("Disabling mysql on boot.")
conf = "/etc/init/mysql.conf"
if os.path.isfile(conf):
command = '''sudo sh -c "echo manual >> %(conf)s"'''
command = command % locals()
else:
command = "sudo update-rc.d mysql disable"
utils.execute_with_timeout(command, with_shell=True)
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
LOG.info(_("Stopping mysql..."))
if do_not_start_on_reboot:
self._disable_mysql_on_boot()
utils.execute_with_timeout("sudo", "/etc/init.d/mysql", "stop")
if not self.status.wait_for_real_status_to_change_to(
rd_models.ServiceStatuses.SHUTDOWN,
self.state_change_wait_time, update_db):
LOG.error(_("Could not stop MySQL!"))
self.status.end_install_or_restart()
raise RuntimeError("Could not stop MySQL!")
def _remove_anonymous_user(self, client):
t = text(query.REMOVE_ANON)
client.execute(t)
def _remove_remote_root_access(self, client):
t = text(query.REMOVE_ROOT)
client.execute(t)
def restart(self):
try:
self.status.begin_mysql_restart()
self.stop_db()
self.start_mysql()
finally:
self.status.end_install_or_restart()
def _replace_mycnf_with_template(self, template_path, original_path):
LOG.debug("replacing the mycnf with template")
LOG.debug("template_path(%s) original_path(%s)"
% (template_path, original_path))
if os.path.isfile(template_path):
if os.path.isfile(original_path):
utils.execute_with_timeout(
"sudo", "mv", original_path,
"%(name)s.%(date)s" %
{'name': original_path, 'date':
date.today().isoformat()})
utils.execute_with_timeout("sudo", "cp", template_path,
original_path)
def _write_temp_mycnf_with_admin_account(self, original_file_path,
temp_file_path, password):
utils.execute_with_timeout("sudo", "chmod", "0711", MYSQL_BASE_DIR)
mycnf_file = open(original_file_path, 'r')
tmp_file = open(temp_file_path, 'w')
for line in mycnf_file:
tmp_file.write(line)
if "[client]" in line:
tmp_file.write("user\t\t= %s\n" % ADMIN_USER_NAME)
tmp_file.write("password\t= %s\n" % password)
mycnf_file.close()
tmp_file.close()
def wipe_ib_logfiles(self):
"""Destroys the iblogfiles.
If for some reason the selected log size in the conf changes from the
current size of the files MySQL will fail to start, so we delete the
files to be safe.
"""
LOG.info(_("Wiping ib_logfiles..."))
for index in range(2):
try:
utils.execute_with_timeout("sudo", "rm", "%s/ib_logfile%d"
% (MYSQL_BASE_DIR, index))
except ProcessExecutionError as pe:
# On restarts, sometimes these are wiped. So it can be a race
# to have MySQL start up before it's restarted and these have
# to be deleted. That's why its ok if they aren't found.
LOG.error("Could not delete logfile!")
LOG.error(pe)
if "No such file or directory" not in str(pe):
raise
def _write_mycnf(self, update_memory_mb, admin_password):
"""
Install the set of mysql my.cnf templates from dbaas-mycnf package.
The package generates a template suited for the current
container flavor. Update the os_admin user and password
to the my.cnf file for direct login from localhost
"""
LOG.info(_("Writing my.cnf templates."))
if admin_password is None:
admin_password = get_auth_password()
# As of right here, the admin_password contains the password to be
# applied to the my.cnf file, whether it was there before (and we
# passed it in) or we generated a new one just now (because we didn't
# find it).
LOG.debug(_("Installing my.cnf templates"))
pkg.pkg_install("dbaas-mycnf", self.TIME_OUT)
LOG.info(_("Replacing my.cnf with template."))
template_path = DBAAS_MYCNF % update_memory_mb
# replace my.cnf with template.
self._replace_mycnf_with_template(template_path, ORIG_MYCNF)
LOG.info(_("Writing new temp my.cnf."))
self._write_temp_mycnf_with_admin_account(ORIG_MYCNF, TMP_MYCNF,
admin_password)
# permissions work-around
LOG.info(_("Moving tmp into final."))
utils.execute_with_timeout("sudo", "mv", TMP_MYCNF, FINAL_MYCNF)
LOG.info(_("Removing original my.cnf."))
utils.execute_with_timeout("sudo", "rm", ORIG_MYCNF)
LOG.info(_("Symlinking final my.cnf."))
utils.execute_with_timeout("sudo", "ln", "-s", FINAL_MYCNF, ORIG_MYCNF)
self.wipe_ib_logfiles()
def start_mysql(self, update_db=False):
LOG.info(_("Starting mysql..."))
# This is the site of all the trouble in the restart tests.
# Essentially what happens is thaty mysql start fails, but does not
# die. It is then impossible to kill the original, so
self._enable_mysql_on_boot()
try:
utils.execute_with_timeout("sudo", "/etc/init.d/mysql", "start")
except ProcessExecutionError:
# it seems mysql (percona, at least) might come back with [Fail]
# but actually come up ok. we're looking into the timing issue on
# parallel, but for now, we'd like to give it one more chance to
# come up. so regardless of the execute_with_timeout() respose,
# we'll assume mysql comes up and check it's status for a while.
pass
if not self.status.wait_for_real_status_to_change_to(
rd_models.ServiceStatuses.RUNNING,
self.state_change_wait_time, update_db):
LOG.error(_("Start up of MySQL failed!"))
# If it won't start, but won't die either, kill it by hand so we
# don't let a rouge process wander around.
try:
utils.execute_with_timeout("sudo", "pkill", "-9", "mysql")
except ProcessExecutionError, p:
LOG.error("Error killing stalled mysql start command.")
LOG.error(p)
# There's nothing more we can do...
self.status.end_install_or_restart()
raise RuntimeError("Could not start MySQL!")
def start_db_with_conf_changes(self, updated_memory_mb):
LOG.info(_("Starting mysql with conf changes to memory(%s)...")
% updated_memory_mb)
LOG.info(_("inside the guest - self.status.is_mysql_running(%s)...")
% self.status.is_mysql_running)
if self.status.is_mysql_running:
LOG.error(_("Cannot execute start_db_with_conf_changes because "
"MySQL state == %s!") % self.status)
raise RuntimeError("MySQL not stopped.")
LOG.info(_("Initiating config."))
self._write_mycnf(updated_memory_mb, None)
self.start_mysql(True)
def is_installed(self):
#(cp16net) could raise an exception, does it need to be handled here?
version = pkg.pkg_version(self.MYSQL_PACKAGE_VERSION)
return not version is None
| [
"[email protected]"
]
| |
1dcf7100804c87590039c11f80193c2082f9ad92 | 8d64f91ad22ddc10d313220fdd6d8748fcc620cb | /sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_receipts.py | a53c04497daab42b17eda68dde1b98b87f67c17b | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | openapi-env-ppe/azure-sdk-for-python | da8202c39aa757b5eac956fc473ce28b8d075236 | 93c10270978f1d8c9d3728609866a8408c437630 | refs/heads/master | 2020-09-12T09:58:37.172980 | 2020-06-06T03:40:24 | 2020-06-06T03:40:24 | 222,384,881 | 0 | 0 | MIT | 2019-11-18T07:09:18 | 2019-11-18T07:09:17 | null | UTF-8 | Python | false | false | 4,060 | py | # coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_recognize_receipts.py
DESCRIPTION:
This sample demonstrates how to recognize US sales receipts from a file.
USAGE:
python sample_recognize_receipts.py
Set the environment variables with your own values before running the sample:
1) AZURE_FORM_RECOGNIZER_ENDPOINT - the endpoint to your Cognitive Services resource.
2) AZURE_FORM_RECOGNIZER_KEY - your Form Recognizer API key
"""
import os
class RecognizeReceiptsSample(object):
def recognize_receipts(self):
# [START recognize_receipts]
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer import FormRecognizerClient
endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"]
key = os.environ["AZURE_FORM_RECOGNIZER_KEY"]
form_recognizer_client = FormRecognizerClient(
endpoint=endpoint, credential=AzureKeyCredential(key)
)
with open("sample_forms/receipt/contoso-allinone.jpg", "rb") as f:
poller = form_recognizer_client.begin_recognize_receipts(receipt=f)
receipts = poller.result()
for idx, receipt in enumerate(receipts):
print("--------Recognizing receipt #{}--------".format(idx))
receipt_type = receipt.fields.get("ReceiptType")
if receipt_type:
print("Receipt Type: {} has confidence: {}".format(receipt_type.value, receipt_type.confidence))
merchant_name = receipt.fields.get("MerchantName")
if merchant_name:
print("Merchant Name: {} has confidence: {}".format(merchant_name.value, merchant_name.confidence))
transaction_date = receipt.fields.get("TransactionDate")
if transaction_date:
print("Transaction Date: {} has confidence: {}".format(transaction_date.value, transaction_date.confidence))
print("Receipt items:")
for idx, item in enumerate(receipt.fields.get("Items").value):
print("...Item #{}".format(idx))
item_name = item.value.get("Name")
if item_name:
print("......Item Name: {} has confidence: {}".format(item_name.value, item_name.confidence))
item_quantity = item.value.get("Quantity")
if item_quantity:
print("......Item Quantity: {} has confidence: {}".format(item_quantity.value, item_quantity.confidence))
item_price = item.value.get("Price")
if item_price:
print("......Individual Item Price: {} has confidence: {}".format(item_price.value, item_price.confidence))
item_total_price = item.value.get("TotalPrice")
if item_total_price:
print("......Total Item Price: {} has confidence: {}".format(item_total_price.value, item_total_price.confidence))
subtotal = receipt.fields.get("Subtotal")
if subtotal:
print("Subtotal: {} has confidence: {}".format(subtotal.value, subtotal.confidence))
tax = receipt.fields.get("Tax")
if tax:
print("Tax: {} has confidence: {}".format(tax.value, tax.confidence))
tip = receipt.fields.get("Tip")
if tip:
print("Tip: {} has confidence: {}".format(tip.value, tip.confidence))
total = receipt.fields.get("Total")
if total:
print("Total: {} has confidence: {}".format(total.value, total.confidence))
print("--------------------------------------")
# [END recognize_receipts]
if __name__ == '__main__':
sample = RecognizeReceiptsSample()
sample.recognize_receipts()
| [
"[email protected]"
]
| |
9e8c11f95b2b779b5172eebc98046e5a2585afc2 | 8eab8ab725c2132bb8d090cdb2d23a5f71945249 | /virt/Lib/site-packages/pandas/_testing/_io.py | 4c7e669f947341ed8362b802a9e8836484fe7e05 | [
"MIT"
]
| permissive | JoaoSevergnini/metalpy | 6c88a413a82bc25edd9308b8490a76fae8dd76ca | c2d0098a309b6ce8c756ff840bfb53fb291747b6 | refs/heads/main | 2023-04-18T17:25:26.474485 | 2022-09-18T20:44:45 | 2022-09-18T20:44:45 | 474,773,752 | 3 | 1 | MIT | 2022-11-03T20:07:50 | 2022-03-27T22:21:01 | Python | UTF-8 | Python | false | false | 12,024 | py | from __future__ import annotations
import bz2
from functools import wraps
import gzip
import socket
from typing import (
TYPE_CHECKING,
Any,
Callable,
)
import zipfile
from pandas._typing import (
FilePath,
ReadPickleBuffer,
)
from pandas.compat import get_lzma_file
from pandas.compat._optional import import_optional_dependency
import pandas as pd
from pandas._testing._random import rands
from pandas._testing.contexts import ensure_clean
from pandas.io.common import urlopen
if TYPE_CHECKING:
from pandas import (
DataFrame,
Series,
)
# skip tests on exceptions with these messages
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client & urllib.error
# because it imports many things from the stdlib
import http.client
import urllib.error
return (
OSError,
http.client.HTTPException,
TimeoutError,
urllib.error.URLError,
socket.timeout,
)
def optional_args(decorator):
"""
allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)
"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = ()
return dec(f)
else:
return dec
return wrapper
@optional_args
def network(
t,
url="https://www.google.com",
raise_on_error=False,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'https://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to OSError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supersedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas import _testing as tm
>>> @tm.network
... def test_network():
... with pd.io.common.urlopen("rabbit://bonanza.com"):
... pass
>>> test_network() # doctest: +SKIP
Traceback
...
URLError: <urlopen error unknown url type: rabbit>
You can specify alternative URLs::
>>> @tm.network("https://www.yahoo.com")
... def test_something_with_yahoo():
... raise OSError("Failure Message")
>>> test_something_with_yahoo() # doctest: +SKIP
Traceback (most recent call last):
...
OSError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @tm.network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something() # doctest: +SKIP
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
import pytest
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if (
check_before_test
and not raise_on_error
and not can_connect(url, error_classes)
):
pytest.skip(
f"May not have network connectivity because cannot connect to {url}"
)
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
# error: "Exception" has no attribute "reason"
errno = getattr(err.reason, "errno", None) # type: ignore[attr-defined]
if errno in skip_errnos:
pytest.skip(f"Skipping test due to known errno and error {err}")
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
pytest.skip(
f"Skipping test because exception message is known and error {err}"
)
if not isinstance(err, error_classes) or raise_on_error:
raise
else:
pytest.skip(
f"Skipping test due to lack of connectivity and error {err}"
)
return wrapper
with_connectivity_check = network
def can_connect(url, error_classes=None):
"""
Try to connect to the given url. True if succeeds, False if OSError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no OSError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url, timeout=20) as response:
# Timeout just in case rate-limiting is applied
if response.status != 200:
return False
except error_classes:
return False
else:
return True
# ------------------------------------------------------------------
# File-IO
def round_trip_pickle(
obj: Any, path: FilePath | ReadPickleBuffer | None = None
) -> DataFrame | Series:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: str | None = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: str | None = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
args: tuple[Any, ...] = (data,)
mode = "wb"
method = "write"
compress_method: Callable
if compression == "zip":
compress_method = zipfile.ZipFile
mode = "w"
args = (dest, data)
method = "writestr"
elif compression == "gzip":
compress_method = gzip.GzipFile
elif compression == "bz2":
compress_method = bz2.BZ2File
elif compression == "zstd":
compress_method = import_optional_dependency("zstandard").open
elif compression == "xz":
compress_method = get_lzma_file()
else:
raise ValueError(f"Unrecognized compression type: {compression}")
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
# ------------------------------------------------------------------
# Plotting
def close(fignum=None):
from matplotlib.pyplot import (
close as _close,
get_fignums,
)
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
| [
"[email protected]"
]
| |
1d1507e47706ec4d6148821d1c194eafc2b495f8 | 353b1065e5dfce1878d367bf9758387cf34ce52f | /gallery/migrations/0003_auto_20200512_1841.py | 864a55eddd5bddc5a6e66705c875d147284dc73b | []
| no_license | Vaibhav-21-git/Gym-Website-Project | 62d19c2708f602b165fb4fecd0659a56b38d3008 | 9ff78ae40a8e14ffdbce4f87e2b9915a8fe5bc0d | refs/heads/master | 2022-11-28T04:16:44.877412 | 2020-08-06T09:48:04 | 2020-08-06T09:48:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | # Generated by Django 3.0.6 on 2020-05-12 14:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gallery', '0002_auto_20200512_1826'),
]
operations = [
migrations.AlterField(
model_name='gallery',
name='image',
field=models.ImageField(upload_to='gallery/%Y/%m/%d'),
),
]
| [
"[email protected]"
]
| |
12fe1e9ca7b7d66b0262bd3120a66ba3ee706e76 | 8bcd0d995d5441f4519b8b7b8d23391bfb7a9e28 | /Python Homework/20-09-18-week02/上机/daydayup-three.py | b863d82a49aeddd26c0ab4dc0c620b8ba9522f0a | []
| no_license | EdwinVan/Python | 6cd193b52929521a4bbe35537754cdbb933e8f7d | 5c853f754ada423dff9af68ed278336deda21ca0 | refs/heads/master | 2023-05-08T03:11:01.575372 | 2021-06-05T06:11:32 | 2021-06-05T06:11:32 | 299,629,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # Daydayup365-“三天打鱼两天晒网”式学习
# 2020/09/18
# fyj
dayup,dayfactor = 1.0, 0.01 #初始设为1.0,每天变化为前一天的%1
for i in range(365):
if i % 5 in [0, 1, 2]:
dayup = dayup * (1 + dayfactor) # 提升
else:
dayup = dayup * (1 - dayfactor) # 下降
print("三天打鱼两天晒网式学习后一年:{:.2f}." .format(dayup))
| [
"[email protected]"
]
| |
497e63b9b10e6909610456e26aa112b51033dba3 | 8481e401985cfbe5969103fb0a8e6cbdc7cb4c30 | /manage.py | 287c640bd224fd267edb3ae714125c0ec08ab905 | []
| no_license | crowdbotics-apps/test-18441 | 00523c529eac54cc1b75dd0dab3c6abde2ce6869 | ce112e391c6ec1750f661112f818cded2927e9f7 | refs/heads/master | 2022-11-13T13:41:59.355446 | 2020-06-26T22:34:43 | 2020-06-26T22:34:43 | 275,259,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_18441.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
79b9c9c08b7324f14dbbf8e8b1796cdb4db1abdc | ffa970f1ca42e70685a89a62df9c8f2f3caa784c | /nifpga_dll_calls_5775.py | fb2641013dd2eb535db4bf0b04f67700f9ab3771 | []
| no_license | kalyanramu/FlexRIO_Python | 57db5988a55358100fdbd89277438659010933d0 | f82ee6dff413fc4ce5e880b8200bd556a3a57362 | refs/heads/master | 2020-12-29T11:48:20.440358 | 2020-04-13T03:30:14 | 2020-04-13T03:30:14 | 238,596,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,016 | py | #C:\Program Files (x86)\National Instruments\Shared\ExternalCompilerSupport\C\include
from ctypes import *
class nifga_dll():
dll_path = r"C:\Windows\System32\niflexrioapi.dll"
def __init__(self, session):
try:
self.nifpga_dll = cdll.LoadLibrary(self.dll_path)
self.nifpga_session = session._session
except Exception as error:
print('Error Initializing NI FPGA DLL :', error)
raise
def wait_for_io_done(self, timeout =5):
#int32_t niFlexRIO_WaitForIoReady(uint32_t session in, int32_t timeoutInMs, uint32_t *ioReady, int32_t *ioError);
time_out = c_uint32(timeout)
io_ready = c_uint32(0)
io_error = c_int32(0)
io_ready_ptr = POINTER(io_ready)
io_error_ptr = POINTER(io_error)
try:
status = self.nifpga_dll.niFlexRIO_WaitForIoReady(self.nifpga_session,time_out,io_ready_ptr, io_error_ptr)
if status < 0:
raise Exception
return status
except Exception as error:
print('Error during Wait IO Done function call ', error)
raise
def configure_stream_finite(self, stream_instance, num_samples):
#int32_t niFlexRIO_ConfigureStreamFinite(uint32_t session in, uint32_t instance, const uint64_t num samples);
c_stream_instance = c_uint32(stream_instance)
c_num_samples = c_uint64(num_samples)
try:
status = self.nifpga_dll.niFlexRIO_ConfigureStreamFinite(self.nifpga_session,c_stream_instance,c_num_samples)
if status < 0:
raise Exception
return status
except Exception as error:
print('Error during Stream Finite call ', error)
raise
def configure_stream_EnabledChannels(self, stream_instance, channel_list, channel_count):
#int32_t niFlexRIO_ConfigureStreamEnabledChannels(uint32_t session in, uint32_t instance, const char* channelsEnabled, int32_t* channelCount);
c_stream_instance = c_uint32(stream_instance)
c_channel_list = c_char_p(channel_list)
c_channel_count = c_int32(channel_count)
try:
status = self.nifpga_dll.niFlexRIO_ConfigureStreamEnabledChannels(self.nifpga_session,c_stream_instance,c_channel_list, pointer(c_channel_count))
if status < 0:
raise Exception
return status
except Exception as error:
print('Error during Stream Enabled Channels ', error)
raise
def commit(self):
#int32_t niFlexRIO_Commit(uint32_t session in);
try:
status = self.nifpga_dll.niFlexRIO_Commit(self.nifpga_session)
if status < 0:
raise Exception
return status
except Exception as error:
print('Error during Commit ', error)
raise
def clear_stream(self, stream_number):
#int32_t niFlexRIO_ClearStream(uint32_t session, uint32_t stream number);
c_stream_number = c_uint32(stream_number)
try:
status = self.nifpga_dll.niFlexRIO_ClearStream(self.nifpga_session, c_stream_number)
if status < 0:
raise Exception
return status
except Exception as error:
print('Error during Clear Stream ', error)
raise
def start_stream(self, stream_number):
#int32_t niFlexRIO_StartStream(uint32_t session, uint32_t stream number);
c_stream_number = c_uint32(stream_number)
try:
status = self.nifpga_dll.niFlexRIO_StartStream(self.nifpga_session, c_stream_number)
if status < 0:
raise Exception
return status
except Exception as error:
print('Error during Clear Stream ', error)
raise
def read_data(self, stream_instance, time_out, samples_per_wfm, num_Wfms = 1):
#int32_t niFlexRIO_ReadStream2DF64(NiFpga_Session session, int32_t streamInstance, int32_t timeoutInMs,
# size_t numberOfWfms, size_t numberOfElements, double* elementArray, FlexRIO_WfmInfo* wfmInfoArray);
c_stream_instance = c_int32(stream_instance)
c_timeout = c_int32(time_out)
c_numWfms = c_size_t(num_Wfms)
c_numElements = c_size_t(samples_per_wfm)
c_elementArray = c_double * (num_Wfms * samples_per_wfm)()
try:
status = self.nifpga_dll.niFlexRIO_StartStream(self.nifpga_session, c_stream_instance, c_timeout, c_numWfms, c_numElements, c_elementArray, None)
if status < 0:
raise Exception
return c_elementArray
except Exception as error:
print('Error during Clear Stream ', error)
raise
| [
"[email protected]"
]
| |
ba180eff9a73705043105ef9940ba13d10c5214a | 59d5a801dd8361fe2b68f0cdfc1a0c06bbe9d275 | /Competition/恶意样本检测/features/backup/v2/strings.py | 2a2bcc9a224d91e07b94570ea11e642a4d6cbb9c | []
| no_license | HanKin2015/Machine_to_DeepingLearning | 2ff377aa68655ca246eb19bea20fec232cec5d77 | 58fa8d06ef8a8eb0762e7cbd32a09552882c5412 | refs/heads/master | 2023-01-25T01:16:41.440064 | 2023-01-18T08:23:49 | 2023-01-18T08:23:49 | 134,238,811 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,246 | py | import subprocess
import os
import numpy
from sklearn.ensemble import RandomForestClassifier
from sklearn import tree
from sklearn.feature_extraction import FeatureHasher
from sklearn.model_selection import train_test_split
import time
from log import logger
import pandas as pd
SAMPLE_PATH = './test/' #AIFirst_data
TRAIN_WHITE_PATH = SAMPLE_PATH+'train/white/' # 训练集白样本路径
TRAIN_BLACK_PATH = SAMPLE_PATH+'train/black/' # 训练集黑样本路径
TEST_PATH = SAMPLE_PATH+'test/' # 测试集样本路径
DATA_PATH = './data/' # 数据路径
TRAIN_WHITE_STRING_FEATURES_PATH = DATA_PATH+'train_white_string_features.csv' # 训练集白样本数据集路径
TRAIN_BLACK_STRING_FEATURES_PATH = DATA_PATH+'train_black_string_features.csv' # 训练集黑样本数据集路径
TEST_STRING_FEATURES_PATH = DATA_PATH+'test_string_features.csv' # 测试集样本数据集路径
TRAIN_WHITE_CUSTOM_STRINGS_PATH = DATA_PATH+'train_white_strings.csv' # 训练集白样本数据集路径
TRAIN_BLACK_CUSTOM_STRINGS_PATH = DATA_PATH+'train_black_strings.csv' # 训练集黑样本数据集路径
TEST_CUSTOM_STRINGS_PATH = DATA_PATH+'test_strings.csv' # 测试集样本数据集路径
# 线程数量
THREAD_NUM = 64
# 创建数据文件夹
if not os.path.exists(DATA_PATH):
os.makedirs(DATA_PATH)
def extract_strings(filepath):
'''This methods extracts the strings from a file using the strings command in unix os'''
strings = subprocess.Popen(['strings', filepath], stdout=subprocess.PIPE).communicate()[0].decode('utf-8').split('\n')
return strings
def get_string_features(all_strings):
data_features = []
hasher = FeatureHasher(20000) # We initialize the featurehasher using 20,000 features
for all_string in all_strings:
# store string features in dictionary form
string_features = {}
for string in all_string:
string_features[string] = 1
# hash the features using the hashing trick
hashed_features = hasher.transform([string_features])
# do some data munging to get the feature array
hashed_features = hashed_features.todense()
hashed_features = numpy.asarray(hashed_features)
hashed_features = hashed_features[0]
data_features.extend([hashed_features])
return data_features
def training(train_black_path, train_white_path):
"""
"""
train_black_dataset = pd.read_csv(train_black_path, header=None)
train_white_dataset = pd.read_csv(train_white_path, header=None)
train_dataset = pd.concat([train_black_dataset, train_white_dataset], ignore_index=True)
logger.info('train_dataset shape: ({}, {}).'.format(train_dataset.shape[0], train_dataset.shape[1]))
X = train_dataset.values
y = [0 for _ in range(train_black_dataset.shape[0])] + [1 for _ in range(train_white_dataset.shape[0])]
logger.info('X shape: ({}, {}).'.format(len(X), len(X[0])))
logger.info('y len: {}.'.format(len(y)))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=2022)
clf = RandomForestClassifier(n_estimators=100, class_weight="balanced", random_state=2022)
clf.fit(X_train, y_train)
logger.info('Random Forest Classifier on hold-out (70% Train, 30% Test): {}.'.format(clf.score(X_test, y_test)))
logger.info([x for x in clf.feature_importances_ if x > 0.1])
def training_stratify(train_black_path, train_white_path):
train_black_dataset = pd.read_csv(train_black_path, header=None)
train_white_dataset = pd.read_csv(train_white_path, header=None)
train_dataset = pd.concat([train_black_dataset, train_white_dataset], ignore_index=True)
logger.info('train_dataset shape: ({}, {}).'.format(train_dataset.shape[0], train_dataset.shape[1]))
train_dataset = train_dataset.astype(int)
X = train_dataset.values
y = [0 for _ in range(train_black_dataset.shape[0])] + [1 for _ in range(train_white_dataset.shape[0])]
logger.info('X shape: ({}, {}).'.format(len(X), len(X[0])))
logger.info('y len: {}.'.format(len(y)))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=2022, stratify=y)
clf = RandomForestClassifier(n_estimators=100, class_weight="balanced", random_state=2022)
clf.fit(X_train, y_train)
logger.info('Random Forest Classifier on hold-out (70% Train, 30% Test): {}.'.format(clf.score(X_test, y_test)))
logger.info([x for x in clf.feature_importances_ if x > 0.1])
def training_(train_black_path, train_white_path):
train_black_dataset = pd.read_csv(train_black_path, header=None)
train_white_dataset = pd.read_csv(train_white_path, header=None)
train_dataset = pd.concat([train_black_dataset, train_white_dataset], ignore_index=True)
logger.info('train_dataset shape: ({}, {}).'.format(train_dataset.shape[0], train_dataset.shape[1]))
X = train_dataset.loc[:, 1::].values
y = [0 for _ in range(train_black_dataset.shape[0])] + [1 for _ in range(train_white_dataset.shape[0])]
logger.info('X shape: ({}, {}).'.format(len(X), len(X[0])))
logger.info('y len: {}.'.format(len(y)))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=2022)
clf = RandomForestClassifier(n_estimators=100, class_weight="balanced", random_state=2022)
clf.fit(X_train, y_train)
logger.info('Random Forest Classifier on hold-out (70% Train, 30% Test): {}.'.format(clf.score(X_test, y_test)))
logger.info([x for x in clf.feature_importances_ if x > 0.1])
def training_x(train_black_features, train_white_features):
"""很有意思去掉这个也提高了class_weight="balanced"
"""
X = train_black_features + train_white_features
y = [0 for _ in range(len(train_black_features))] + [1 for _ in range(len(train_white_features))]
logger.info('X shape: ({}, {}).'.format(len(X), len(X[0])))
logger.info('y len: {}.'.format(len(y)))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=2022)
clf = RandomForestClassifier(n_estimators=100, random_state=2022)
clf.fit(X_train, y_train)
logger.info('Random Forest Classifier on hold-out (70% Train, 30% Test): {}.'.format(clf.score(X_test, y_test)))
logger.info([x for x in clf.feature_importances_ if x > 0.1])
def training_y(train_black_features, train_white_features):
"""
"""
X = train_white_features + train_black_features
y = [1 for _ in range(len(train_white_features))] + [0 for _ in range(len(train_black_features))]
logger.info('X shape: ({}, {}).'.format(len(X), len(X[0])))
logger.info('y len: {}.'.format(len(y)))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=2022)
clf = RandomForestClassifier(n_estimators=100, class_weight="balanced", random_state=2022)
clf.fit(X_train, y_train)
logger.info('Random Forest Classifier on hold-out (70% Train, 30% Test): {}.'.format(clf.score(X_test, y_test)))
logger.info([x for x in clf.feature_importances_ if x > 0.1])
def save2csv(data, csv_path):
"""数据保存到本地csv文件中
"""
df = pd.DataFrame(data)
df.to_csv(csv_path, sep=',', encoding='utf-8', index=False, header=False)
def string_features_processing(sample_path, save_path):
"""字符串特征处理(获取)
"""
file_names = os.listdir(sample_path)
logger.info('{} file count: {}.'.format(sample_path, len(file_names)))
all_strings = [extract_strings(sample_path + file_names[i]) for i in range(len(file_names))]
logger.info('all_strings count: {}.'.format(len(all_strings)))
string_features = get_string_features(all_strings)
logger.info('string_features count: {}.'.format(len(string_features)))
save2csv(string_features, save_path)
return string_features
def main():
train_white_features = string_features_processing(TRAIN_WHITE_PATH, TRAIN_WHITE_STRING_FEATURES_PATH)
train_black_features = string_features_processing(TRAIN_BLACK_PATH, TRAIN_BLACK_STRING_FEATURES_PATH)
test_features = string_features_processing(TEST_PATH, TEST_STRING_FEATURES_PATH)
training_x(train_black_features, train_white_features)
training_x(train_white_features, train_black_features)
training_y(train_black_features, train_white_features)
training(TRAIN_BLACK_STRING_FEATURES_PATH, TRAIN_WHITE_STRING_FEATURES_PATH)
training(TRAIN_WHITE_STRING_FEATURES_PATH, TRAIN_BLACK_STRING_FEATURES_PATH)
training_stratify(TRAIN_BLACK_STRING_FEATURES_PATH, TRAIN_WHITE_STRING_FEATURES_PATH)
training_stratify(TRAIN_WHITE_STRING_FEATURES_PATH, TRAIN_BLACK_STRING_FEATURES_PATH)
training_(TRAIN_BLACK_CUSTOM_STRINGS_PATH, TRAIN_WHITE_CUSTOM_STRINGS_PATH)
if __name__ == '__main__':
start_time = time.time()
main()
end_time = time.time()
logger.info('process spend {} s.'.format(round(end_time - start_time, 3)))
| [
"[email protected]"
]
| |
786a5dded9fde0f9385c8a94df1e5896755d4613 | 2c74bb301f1ed83b79254944183ac5a18a639fdf | /homeassistant/components/devolo_home_network/binary_sensor.py | 2e87bd180b1c5fcc960d698d5298ae2db6a29232 | [
"Apache-2.0"
]
| permissive | Adminiuga/home-assistant | 5bec93007ddac1a268cc359bf7e48530c5f73b38 | dcf68d768e4f628d038f1fdd6e40bad713fbc222 | refs/heads/dev | 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 | Apache-2.0 | 2023-02-22T06:14:31 | 2018-03-05T14:11:09 | Python | UTF-8 | Python | false | false | 3,169 | py | """Platform for binary sensor integration."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from devolo_plc_api.device import Device
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import CONNECTED_PLC_DEVICES, CONNECTED_TO_ROUTER, DOMAIN
from .entity import DevoloEntity
def _is_connected_to_router(entity: DevoloBinarySensorEntity) -> bool:
"""Check, if device is attached to the router."""
return all(
device["attached_to_router"]
for device in entity.coordinator.data["network"]["devices"]
if device["mac_address"] == entity.device.mac
)
@dataclass
class DevoloBinarySensorRequiredKeysMixin:
"""Mixin for required keys."""
value_func: Callable[[DevoloBinarySensorEntity], bool]
@dataclass
class DevoloBinarySensorEntityDescription(
BinarySensorEntityDescription, DevoloBinarySensorRequiredKeysMixin
):
"""Describes devolo sensor entity."""
SENSOR_TYPES: dict[str, DevoloBinarySensorEntityDescription] = {
CONNECTED_TO_ROUTER: DevoloBinarySensorEntityDescription(
key=CONNECTED_TO_ROUTER,
device_class=BinarySensorDeviceClass.PLUG,
entity_category=EntityCategory.DIAGNOSTIC,
entity_registry_enabled_default=False,
icon="mdi:router-network",
name="Connected to router",
value_func=_is_connected_to_router,
),
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Get all devices and sensors and setup them via config entry."""
device: Device = hass.data[DOMAIN][entry.entry_id]["device"]
coordinators: dict[str, DataUpdateCoordinator] = hass.data[DOMAIN][entry.entry_id][
"coordinators"
]
entities: list[BinarySensorEntity] = []
if device.plcnet:
entities.append(
DevoloBinarySensorEntity(
coordinators[CONNECTED_PLC_DEVICES],
SENSOR_TYPES[CONNECTED_TO_ROUTER],
device,
entry.title,
)
)
async_add_entities(entities)
class DevoloBinarySensorEntity(DevoloEntity, BinarySensorEntity):
"""Representation of a devolo binary sensor."""
def __init__(
self,
coordinator: DataUpdateCoordinator,
description: DevoloBinarySensorEntityDescription,
device: Device,
device_name: str,
) -> None:
"""Initialize entity."""
self.entity_description: DevoloBinarySensorEntityDescription = description
super().__init__(coordinator, device, device_name)
@property
def is_on(self) -> bool:
"""State of the binary sensor."""
return self.entity_description.value_func(self)
| [
"[email protected]"
]
| |
e0a9b0e049c5cdf5f4693d5b8e8739d79ea91337 | b55bfb7e3587772a0949dcaaf3f839330d9609bc | /www/user/amand_tihon/createDIP.py | 08e6aa227025e143b9ea22511914dee4ddf2e885 | []
| no_license | baptistelabat/gedasymbols | 4cdd403c93551b3f553470b83abd02fdecfdeeaa | b9a118308ee26e29458b3ea365958d76f3d22ca0 | refs/heads/master | 2021-01-17T23:10:12.043442 | 2015-03-04T14:01:21 | 2015-03-04T14:01:21 | 31,658,196 | 1 | 0 | null | 2015-03-04T13:24:40 | 2015-03-04T13:24:40 | null | UTF-8 | Python | false | false | 5,478 | py | #! /usr/bin/python3.1
# -*- coding: utf-8 -*-
# kate: space-indent on; indent-width 2; mixedindent off; indent-mode python;
# Copyright (C) 2009 Amand Tihon <[email protected]>
#
# You can redistribute this file and/or modify it under the terms of the
# GNU General Public License as published by the Free Software Foundation,
# either version 3 of the License or (at your option) any later version.
import sys, getopt
class DIP:
# Some config
PinSize = 8000 # 80 mil
OvalPinSize = 6000 # 60 mil
DrillSize = 1500 # 15 mil
PadLength = 5000 # 50 mil
BothPads = True # On component side as well as on solder side
FirstSquare = True # pin no 1 is square
LineWidth = 1500 # Silk line width
def __init__(self, pins, width, oval):
self.pins = pins
self.rows = pins//2
self.width = width
self.oval = oval
self.name = "U000"
self.value = ""
# Description
d1 = "%i-pins Dual in-line package" % self.pins
if width==300:
d2 = ", narrow (300 mil)"
elif width==400:
d2 = ", medium wide (400 mil)"
elif width==600:
d2 = ", wide (600 mil)"
else:
d2 = ", %i mil"
if oval:
d3 = ", oval pads"
else:
d3 = ""
self.description = "%s%s%s" % (d1, d2, d3)
# Some values nearly out of nowhere for centerpoint
self.mx = 20000
self.my = 150000
# Position of the first pin
self.startx = -width*100//2 # *100 because pcb uses 100th of mils
if self.rows % 2:
self.starty = -(self.rows//2) * 10000
else:
self.starty = -(self.rows//2) * 10000 + 5000
# Label position, orientation, scale
self.tx = self.startx + 10000
self.ty = self.starty - 15000
self.tdir = 0
self.tscale = 100
self.flags = ""
def out(self):
print('Element[ "" "%s" "%s" "%s" %i %i %i %i %i %i ""]' %
(self.description, self.name, self.value, self.mx, self.my,
self.tx, self.ty, self.tdir, self.tscale))
print('(')
self.makepins()
self.makesilk()
print(')')
def makepins(self):
"""Draw pins"""
if self.oval:
self.PinSize = self.OvalPinSize
n = 1
sq = ""
if self.FirstSquare:
sq = "square"
r = 1
x = self.startx
y = self.starty
# Top down for first column
while r <= self.rows:
# draw pin
print('\tPin[%i %i %i 2000 %i %i "%i" "%i" "%s"]' %
(x, y, self.PinSize, int(self.PinSize*1.1), self.DrillSize,
n, n, sq))
if self.oval:
# draw pad
px = x - (self.PadLength//2)
print('\tPad[%i %i %i %i %i 2000 %i "%i" "%i" "%s"]' %
(px, y, px+self.PadLength, y, self.PinSize, int(self.PinSize*1.1),
n, n, "onsolder,square" if sq else "onsolder"))
if self.BothPads:
print('\tPad[%i %i %i %i %i 2000 %i "%i" "%i" "%s"]' %
(px, y, px+self.PadLength, y, self.PinSize, int(self.PinSize*1.1),
n, n, sq))
sq = ""
n += 1
r += 1
y += 10000
# Bottom up for the second column
x += self.width * 100
y -= 10000
while r > 1:
# draw pin
print('\tPin[%i %i %i 2000 %i %i "%i" "%i" "%s"]' %
(x, y, self.PinSize, int(self.PinSize*1.1), self.DrillSize,
n, n, sq))
if self.oval:
# draw pad
px = x - (self.PadLength//2)
print('\tPad[%i %i %i %i %i 2000 %i "%i" "%i" "%s"]' %
(px, y, px+self.PadLength, y, self.PinSize, int(self.PinSize*1.1),
n, n, "onsolder,square" if sq else "onsolder"))
if self.BothPads:
print('\tPad[%i %i %i %i %i 2000 %i "%i" "%i" "%s"]' %
(px, y, px+self.PadLength, y, self.PinSize, int(self.PinSize*1.1),
n, n, sq))
n += 1
r -= 1
y -= 10000
def makesilk(self):
"""Draw silkscreen"""
r = 1
x1 = self.startx + 2500
y1 = self.starty - 7500
x2 = self.startx + (self.width * 100) - 2500
y2 = self.starty + (self.rows * 10000) - 2500
# Lines
print('\tElementLine[%i %i %i %i %i]' % (x1, y1, -5000, y1, self.LineWidth))
print('\tElementLine[%i %i %i %i %i]' % (5000, y1, x2, y1, self.LineWidth))
print('\tElementLine[%i %i %i %i %i]' % (x1, y1, x1, y2, self.LineWidth))
print('\tElementLine[%i %i %i %i %i]' % (x1, y2, x2, y2, self.LineWidth))
print('\tElementLine[%i %i %i %i %i]' % (x2, y1, x2, y2, self.LineWidth))
# Index
print('\tElementArc[%i %i 5000 5000 0 180 %i]' %
((x1+x2)//2, y1, self.LineWidth))
def usage():
print("Usage: %s [-w spacing] [-o] npins" % sys.argv[0], file=sys.stderr)
print("The 'npins' argument must be even.", file=sys.stderr)
print(" -w spacing pin spacing (default is 300 mil)", file=sys.stderr)
print(" -o Make oval pads", file=sys.stderr)
def main():
width = 300
oval = False
try:
opts, args = getopt.getopt(sys.argv[1:], "ow:")
except getopt.GetoptError as err:
usage()
print("Error:", err, file=sys.stderr)
exit(1)
try:
npins = int(args[0])
except ValueError as err:
usage()
exit(1)
if npins % 2:
usage()
exit(1)
for arg in opts:
if arg[0] == '-o':
oval = True
elif arg[0] == '-w':
try:
width = int(arg[1])
except ValueError as err:
usage()
exit(1)
elt = DIP(npins, width, oval)
elt.out()
if __name__ == "__main__":
main()
exit(0) | [
""
]
| |
43924e80cdbe766dc459183fc0228ac12c6fdaf8 | 59cd3ca83aadc9a4c830e6dda57922e682e7a317 | /django_project/geocontext/utilities/xml.py | dfe023e8b8f88d56907fb2d9629099c106a223d1 | []
| no_license | kartoza/geocontext | a8df6f4f17086301a6e7384056e49784fa6e2580 | ae35b96356e3e21c18412b3e5fc529d8568866b4 | refs/heads/develop | 2022-09-11T06:15:15.108151 | 2022-09-01T13:37:01 | 2022-09-01T13:37:01 | 126,920,992 | 6 | 11 | null | 2022-09-01T13:37:02 | 2018-03-27T03:05:13 | Python | UTF-8 | Python | false | false | 1,094 | py | from xml.etree import ElementTree
import logging
LOGGER = logging.getLogger(__name__)
def get_bounding_box_srs(service, content):
root = ElementTree.fromstring(content)
tag = root.tag.split('}')
layer_tag = 'Layer'
bound_tag = 'BoundingBox'
name_tag = 'Name'
if len(tag) > 1:
layer_tag = tag[0] + '}' + layer_tag
bound_tag = tag[0] + '}' + bound_tag
name_tag = tag[0] + '}' + name_tag
layers = root.iter(layer_tag)
out = {}
layer_name = service.split(':')[1] if len(service.split(':')) > 1 else service
for layer in layers:
if layer.find(name_tag) is None:
continue
if layer.find(name_tag).text == layer_name or layer.find(name_tag).text == service:
out['srs'] = layer.find(bound_tag).get('CRS')
out['bbox'] = '{},{},{},{}'.format(
layer.find(bound_tag).get('minx'),
layer.find(bound_tag).get('miny'),
layer.find(bound_tag).get('maxx'),
layer.find(bound_tag).get('maxy'),
)
return out
| [
"[email protected]"
]
| |
a88f5c1c605745175557dbfed78336036dd4ee49 | a7b07e14f58008e4c9567a9ae67429cedf00e1dc | /docs/jnpr_healthbot_swagger/swagger_client/models/__init__.py | c8d66650da9abc89ba8d93e7de9bbe4d54dc38d7 | [
"Apache-2.0"
]
| permissive | dmontagner/healthbot-py-client | 3750d8375bc4fa7bedcdbc6f85f17fb812c19ea9 | 0952e0a9e7ed63c9fe84879f40407c3327735252 | refs/heads/master | 2020-08-03T12:16:38.428848 | 2019-09-30T01:57:24 | 2019-09-30T01:57:24 | 211,750,200 | 0 | 0 | Apache-2.0 | 2019-09-30T01:17:48 | 2019-09-30T01:17:47 | null | UTF-8 | Python | false | false | 14,798 | py | # coding: utf-8
# flake8: noqa
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into model package
from swagger_client.models.affected_groups import AffectedGroups
from swagger_client.models.apply_macro_schema import ApplyMacroSchema
from swagger_client.models.applymacro_schema_data import ApplymacroSchemaData
from swagger_client.models.ca_profile_schema import CaProfileSchema
from swagger_client.models.command_rpc import CommandRpc
from swagger_client.models.commit_job import CommitJob
from swagger_client.models.datastore_schema import DatastoreSchema
from swagger_client.models.destination_schema import DestinationSchema
from swagger_client.models.destination_schema_disk import DestinationSchemaDisk
from swagger_client.models.destination_schema_email import DestinationSchemaEmail
from swagger_client.models.destinations_schema import DestinationsSchema
from swagger_client.models.device_group_health_tree import DeviceGroupHealthTree
from swagger_client.models.device_group_schema import DeviceGroupSchema
from swagger_client.models.device_groups_schema import DeviceGroupsSchema
from swagger_client.models.device_health_schema import DeviceHealthSchema
from swagger_client.models.device_health_tree import DeviceHealthTree
from swagger_client.models.device_schema import DeviceSchema
from swagger_client.models.device_schema_i_agent import DeviceSchemaIAgent
from swagger_client.models.device_schema_openconfig import DeviceSchemaOpenconfig
from swagger_client.models.device_schema_snmp import DeviceSchemaSnmp
from swagger_client.models.device_schema_snmp_v2 import DeviceSchemaSnmpV2
from swagger_client.models.device_schema_variable import DeviceSchemaVariable
from swagger_client.models.device_schema_vendor import DeviceSchemaVendor
from swagger_client.models.device_schema_vendor_cisco import DeviceSchemaVendorCisco
from swagger_client.models.device_schema_vendor_juniper import DeviceSchemaVendorJuniper
from swagger_client.models.devicegroup_schema_authentication import DevicegroupSchemaAuthentication
from swagger_client.models.devicegroup_schema_authentication_password import DevicegroupSchemaAuthenticationPassword
from swagger_client.models.devicegroup_schema_authentication_ssh import DevicegroupSchemaAuthenticationSsh
from swagger_client.models.devicegroup_schema_authentication_ssl import DevicegroupSchemaAuthenticationSsl
from swagger_client.models.devicegroup_schema_logging import DevicegroupSchemaLogging
from swagger_client.models.devicegroup_schema_logging_i_agent import DevicegroupSchemaLoggingIAgent
from swagger_client.models.devicegroup_schema_logging_nativegpb import DevicegroupSchemaLoggingNativegpb
from swagger_client.models.devicegroup_schema_logging_nonsensorrules import DevicegroupSchemaLoggingNonsensorrules
from swagger_client.models.devicegroup_schema_logging_openconfig import DevicegroupSchemaLoggingOpenconfig
from swagger_client.models.devicegroup_schema_logging_reportsgeneration import DevicegroupSchemaLoggingReportsgeneration
from swagger_client.models.devicegroup_schema_logging_snmp import DevicegroupSchemaLoggingSnmp
from swagger_client.models.devicegroup_schema_logging_triggerevaluation import DevicegroupSchemaLoggingTriggerevaluation
from swagger_client.models.devicegroup_schema_nativegpb import DevicegroupSchemaNativegpb
from swagger_client.models.devicegroup_schema_notification import DevicegroupSchemaNotification
from swagger_client.models.devicegroup_schema_rawdata import DevicegroupSchemaRawdata
from swagger_client.models.devicegroup_schema_rawdata_summarize import DevicegroupSchemaRawdataSummarize
from swagger_client.models.devicegroup_schema_scheduler import DevicegroupSchemaScheduler
from swagger_client.models.devicegroup_schema_variable import DevicegroupSchemaVariable
from swagger_client.models.devicegroup_schema_variablevalue import DevicegroupSchemaVariablevalue
from swagger_client.models.devices_schema import DevicesSchema
from swagger_client.models.error import Error
from swagger_client.models.event import Event
from swagger_client.models.group_health_schema import GroupHealthSchema
from swagger_client.models.health_schema import HealthSchema
from swagger_client.models.inline_response_200 import InlineResponse200
from swagger_client.models.inline_response_200_1 import InlineResponse2001
from swagger_client.models.instance_schedule_state_schema import InstanceScheduleStateSchema
from swagger_client.models.instances_schedule_state_schema import InstancesScheduleStateSchema
from swagger_client.models.license_feature_schema import LicenseFeatureSchema
from swagger_client.models.license_features_schema import LicenseFeaturesSchema
from swagger_client.models.license_key_schema import LicenseKeySchema
from swagger_client.models.license_keys_schema import LicenseKeysSchema
from swagger_client.models.license_raw_key_schema import LicenseRawKeySchema
from swagger_client.models.license_raw_keys_schema import LicenseRawKeysSchema
from swagger_client.models.licensekey_schema_features import LicensekeySchemaFeatures
from swagger_client.models.local_certificate_schema import LocalCertificateSchema
from swagger_client.models.network_group_schema import NetworkGroupSchema
from swagger_client.models.network_groups_schema import NetworkGroupsSchema
from swagger_client.models.network_health_tree import NetworkHealthTree
from swagger_client.models.networkgroup_schema_logging import NetworkgroupSchemaLogging
from swagger_client.models.notification_schema import NotificationSchema
from swagger_client.models.notification_schema_httppost import NotificationSchemaHttppost
from swagger_client.models.notification_schema_httppost_basic import NotificationSchemaHttppostBasic
from swagger_client.models.notification_schema_kafkapublish import NotificationSchemaKafkapublish
from swagger_client.models.notification_schema_kafkapublish_sasl import NotificationSchemaKafkapublishSasl
from swagger_client.models.notification_schema_slack import NotificationSchemaSlack
from swagger_client.models.notifications_schema import NotificationsSchema
from swagger_client.models.playbook_schema import PlaybookSchema
from swagger_client.models.playbooks_schema import PlaybooksSchema
from swagger_client.models.profile_schema import ProfileSchema
from swagger_client.models.profile_schema_datasummarization import ProfileSchemaDatasummarization
from swagger_client.models.profile_schema_datasummarization_raw import ProfileSchemaDatasummarizationRaw
from swagger_client.models.profile_schema_security import ProfileSchemaSecurity
from swagger_client.models.profiles_schema import ProfilesSchema
from swagger_client.models.profiles_schema_profile import ProfilesSchemaProfile
from swagger_client.models.raw_data_summarizations_schema import RawDataSummarizationsSchema
from swagger_client.models.raw_schema import RawSchema
from swagger_client.models.raw_schema_datatype import RawSchemaDatatype
from swagger_client.models.raw_schema_path import RawSchemaPath
from swagger_client.models.report_generation_schema import ReportGenerationSchema
from swagger_client.models.report_schema import ReportSchema
from swagger_client.models.report_schema_canvaspanel import ReportSchemaCanvaspanel
from swagger_client.models.report_schema_graphcanvas import ReportSchemaGraphcanvas
from swagger_client.models.reports_schema import ReportsSchema
from swagger_client.models.retention_policies_schema import RetentionPoliciesSchema
from swagger_client.models.retention_policy_schema import RetentionPolicySchema
from swagger_client.models.rule_schema import RuleSchema
from swagger_client.models.rule_schema_argument import RuleSchemaArgument
from swagger_client.models.rule_schema_constant import RuleSchemaConstant
from swagger_client.models.rule_schema_dataifmissing import RuleSchemaDataifmissing
from swagger_client.models.rule_schema_field import RuleSchemaField
from swagger_client.models.rule_schema_formula import RuleSchemaFormula
from swagger_client.models.rule_schema_formula_1 import RuleSchemaFormula1
from swagger_client.models.rule_schema_formula_1_and import RuleSchemaFormula1And
from swagger_client.models.rule_schema_formula_1_or import RuleSchemaFormula1Or
from swagger_client.models.rule_schema_formula_1_unique import RuleSchemaFormula1Unique
from swagger_client.models.rule_schema_formula_1_unless import RuleSchemaFormula1Unless
from swagger_client.models.rule_schema_formula_count import RuleSchemaFormulaCount
from swagger_client.models.rule_schema_formula_dynamicthreshold import RuleSchemaFormulaDynamicthreshold
from swagger_client.models.rule_schema_formula_max import RuleSchemaFormulaMax
from swagger_client.models.rule_schema_formula_mean import RuleSchemaFormulaMean
from swagger_client.models.rule_schema_formula_microburst import RuleSchemaFormulaMicroburst
from swagger_client.models.rule_schema_formula_min import RuleSchemaFormulaMin
from swagger_client.models.rule_schema_formula_outlierdetection import RuleSchemaFormulaOutlierdetection
from swagger_client.models.rule_schema_formula_outlierdetection_algorithm import RuleSchemaFormulaOutlierdetectionAlgorithm
from swagger_client.models.rule_schema_formula_outlierdetection_algorithm_dbscan import RuleSchemaFormulaOutlierdetectionAlgorithmDbscan
from swagger_client.models.rule_schema_formula_outlierdetection_algorithm_dbscan_sensitivity import RuleSchemaFormulaOutlierdetectionAlgorithmDbscanSensitivity
from swagger_client.models.rule_schema_formula_outlierdetection_algorithm_kfold3sigma import RuleSchemaFormulaOutlierdetectionAlgorithmKfold3sigma
from swagger_client.models.rule_schema_formula_predict import RuleSchemaFormulaPredict
from swagger_client.models.rule_schema_formula_stddev import RuleSchemaFormulaStddev
from swagger_client.models.rule_schema_formula_userdefinedfunction import RuleSchemaFormulaUserdefinedfunction
from swagger_client.models.rule_schema_formula_userdefinedfunction_argument import RuleSchemaFormulaUserdefinedfunctionArgument
from swagger_client.models.rule_schema_function import RuleSchemaFunction
from swagger_client.models.rule_schema_i_agent import RuleSchemaIAgent
from swagger_client.models.rule_schema_i_agent_args import RuleSchemaIAgentArgs
from swagger_client.models.rule_schema_nativegpb import RuleSchemaNativegpb
from swagger_client.models.rule_schema_openconfig import RuleSchemaOpenconfig
from swagger_client.models.rule_schema_reference import RuleSchemaReference
from swagger_client.models.rule_schema_reference_dataifmissing import RuleSchemaReferenceDataifmissing
from swagger_client.models.rule_schema_ruleproperties import RuleSchemaRuleproperties
from swagger_client.models.rule_schema_ruleproperties_catalogue import RuleSchemaRulepropertiesCatalogue
from swagger_client.models.rule_schema_ruleproperties_helperfiles import RuleSchemaRulepropertiesHelperfiles
from swagger_client.models.rule_schema_ruleproperties_supporteddevices import RuleSchemaRulepropertiesSupporteddevices
from swagger_client.models.rule_schema_ruleproperties_supporteddevices_juniper import RuleSchemaRulepropertiesSupporteddevicesJuniper
from swagger_client.models.rule_schema_ruleproperties_supporteddevices_juniper_operatingsystem import RuleSchemaRulepropertiesSupporteddevicesJuniperOperatingsystem
from swagger_client.models.rule_schema_ruleproperties_supporteddevices_juniper_products import RuleSchemaRulepropertiesSupporteddevicesJuniperProducts
from swagger_client.models.rule_schema_ruleproperties_supporteddevices_juniper_releases import RuleSchemaRulepropertiesSupporteddevicesJuniperReleases
from swagger_client.models.rule_schema_ruleproperties_supporteddevices_othervendor import RuleSchemaRulepropertiesSupporteddevicesOthervendor
from swagger_client.models.rule_schema_sensor import RuleSchemaSensor
from swagger_client.models.rule_schema_sensor_1 import RuleSchemaSensor1
from swagger_client.models.rule_schema_snmp import RuleSchemaSnmp
from swagger_client.models.rule_schema_term import RuleSchemaTerm
from swagger_client.models.rule_schema_then import RuleSchemaThen
from swagger_client.models.rule_schema_then_argument import RuleSchemaThenArgument
from swagger_client.models.rule_schema_then_status import RuleSchemaThenStatus
from swagger_client.models.rule_schema_then_userdefinedaction import RuleSchemaThenUserdefinedaction
from swagger_client.models.rule_schema_trigger import RuleSchemaTrigger
from swagger_client.models.rule_schema_variable import RuleSchemaVariable
from swagger_client.models.rule_schema_vector import RuleSchemaVector
from swagger_client.models.rule_schema_when import RuleSchemaWhen
from swagger_client.models.rule_schema_when_doesnotmatchwith import RuleSchemaWhenDoesnotmatchwith
from swagger_client.models.rule_schema_when_equalto import RuleSchemaWhenEqualto
from swagger_client.models.rule_schema_when_exists import RuleSchemaWhenExists
from swagger_client.models.rule_schema_when_increasingatleastbyrate import RuleSchemaWhenIncreasingatleastbyrate
from swagger_client.models.rule_schema_when_increasingatleastbyvalue import RuleSchemaWhenIncreasingatleastbyvalue
from swagger_client.models.rule_schema_when_maxrateofincrease import RuleSchemaWhenMaxrateofincrease
from swagger_client.models.rule_schema_when_range import RuleSchemaWhenRange
from swagger_client.models.rule_schema_when_userdefinedfunction import RuleSchemaWhenUserdefinedfunction
from swagger_client.models.rule_schema_where import RuleSchemaWhere
from swagger_client.models.scheduler_schema import SchedulerSchema
from swagger_client.models.scheduler_schema_repeat import SchedulerSchemaRepeat
from swagger_client.models.scheduler_schema_repeat_interval import SchedulerSchemaRepeatInterval
from swagger_client.models.scheduler_schema_runfor import SchedulerSchemaRunfor
from swagger_client.models.schedulers_schema import SchedulersSchema
from swagger_client.models.service_status import ServiceStatus
from swagger_client.models.ssh_key_profile_schema import SshKeyProfileSchema
from swagger_client.models.system_settings_schema import SystemSettingsSchema
from swagger_client.models.systemsettings_schema_systemsettings import SystemsettingsSchemaSystemsettings
from swagger_client.models.systemsettings_schema_systemsettings_reportgeneration import SystemsettingsSchemaSystemsettingsReportgeneration
from swagger_client.models.table_schema import TableSchema
from swagger_client.models.time_range_mandatory import TimeRangeMandatory
from swagger_client.models.topic_schema import TopicSchema
from swagger_client.models.topics_schema import TopicsSchema
from swagger_client.models.when_lhs_rhs_group import WhenLhsRhsGroup
| [
"[email protected]"
]
| |
e2cca9a8fad4f216f575087dd3d1af8b2cf2e0c8 | 8c8037f70bcc8b848c2fba3f27b5c01d341bfd11 | /abca/manage.py | 89ddba2d3afe50bcb9f092c394a3bbb7a3c9f89a | []
| no_license | maufarinelli/abca | e8977d9d0fff1e3782155070300036d43c621032 | fc354bd8ffbe8e40e6f9401706df48913d19925e | refs/heads/master | 2020-05-18T16:42:52.988373 | 2014-05-22T21:01:11 | 2014-05-22T21:01:11 | 19,862,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "abca.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
]
| |
190c88b738926f8cd336a581c1de43d1dbb1d189 | a2d44312a3b8b769196623cea08524ac7fcd99a2 | /io_scene_fbx/__init__.py | 9611cb1f2600fa3c84c2dde7747b8a22a4f7d468 | [
"Unlicense"
]
| permissive | 1-MillionParanoidTterabytes/blender-addons-master | 2491321430993d8f867e1dcbbeb97601a043effd | acc8fc23a38e6e89099c3e5079bea31ce85da06a | refs/heads/master | 2020-03-20T20:56:38.302065 | 2018-06-18T05:47:11 | 2018-06-18T05:47:11 | 137,715,574 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,891 | py | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
bl_info = {
"name": "FBX format",
"author": "Campbell Barton, Bastien Montagne, Jens Restemeier",
"version": (3, 9, 1),
"blender": (2, 79, 1),
"location": "File > Import-Export",
"description": "FBX IO meshes, UV's, vertex colors, materials, textures, cameras, lamps and actions",
"warning": "",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/Scripts/Import-Export/Autodesk_FBX",
"support": 'OFFICIAL',
"category": "Import-Export",
}
if "bpy" in locals():
import importlib
if "import_fbx" in locals():
importlib.reload(import_fbx)
if "export_fbx_bin" in locals():
importlib.reload(export_fbx_bin)
if "export_fbx" in locals():
importlib.reload(export_fbx)
import bpy
from bpy.props import (
StringProperty,
BoolProperty,
FloatProperty,
EnumProperty,
)
from bpy_extras.io_utils import (
ImportHelper,
ExportHelper,
orientation_helper_factory,
path_reference_mode,
axis_conversion,
)
IOFBXOrientationHelper = orientation_helper_factory("IOFBXOrientationHelper", axis_forward='-Z', axis_up='Y')
class ImportFBX(bpy.types.Operator, ImportHelper, IOFBXOrientationHelper):
"""Load a FBX file"""
bl_idname = "import_scene.fbx"
bl_label = "Import FBX"
bl_options = {'UNDO', 'PRESET'}
directory = StringProperty()
filename_ext = ".fbx"
filter_glob = StringProperty(default="*.fbx", options={'HIDDEN'})
ui_tab = EnumProperty(
items=(('MAIN', "Main", "Main basic settings"),
('ARMATURE', "Armatures", "Armature-related settings"),
),
name="ui_tab",
description="Import options categories",
)
use_manual_orientation = BoolProperty(
name="Manual Orientation",
description="Specify orientation and scale, instead of using embedded data in FBX file",
default=False,
)
global_scale = FloatProperty(
name="Scale",
min=0.001, max=1000.0,
default=1.0,
)
bake_space_transform = BoolProperty(
name="!EXPERIMENTAL! Apply Transform",
description="Bake space transform into object data, avoids getting unwanted rotations to objects when "
"target space is not aligned with Blender's space "
"(WARNING! experimental option, use at own risks, known broken with armatures/animations)",
default=False,
)
use_custom_normals = BoolProperty(
name="Import Normals",
description="Import custom normals, if available (otherwise Blender will recompute them)",
default=True,
)
use_image_search = BoolProperty(
name="Image Search",
description="Search subdirs for any associated images (WARNING: may be slow)",
default=True,
)
use_alpha_decals = BoolProperty(
name="Alpha Decals",
description="Treat materials with alpha as decals (no shadow casting)",
default=False,
)
decal_offset = FloatProperty(
name="Decal Offset",
description="Displace geometry of alpha meshes",
min=0.0, max=1.0,
default=0.0,
)
use_anim = BoolProperty(
name="Import Animation",
description="Import FBX animation",
default=True,
)
anim_offset = FloatProperty(
name="Animation Offset",
description="Offset to apply to animation during import, in frames",
default=1.0,
)
use_custom_props = BoolProperty(
name="Import User Properties",
description="Import user properties as custom properties",
default=True,
)
use_custom_props_enum_as_string = BoolProperty(
name="Import Enums As Strings",
description="Store enumeration values as strings",
default=True,
)
ignore_leaf_bones = BoolProperty(
name="Ignore Leaf Bones",
description="Ignore the last bone at the end of each chain (used to mark the length of the previous bone)",
default=False,
)
force_connect_children = BoolProperty(
name="Force Connect Children",
description="Force connection of children bones to their parent, even if their computed head/tail "
"positions do not match (can be useful with pure-joints-type armatures)",
default=False,
)
automatic_bone_orientation = BoolProperty(
name="Automatic Bone Orientation",
description="Try to align the major bone axis with the bone children",
default=False,
)
primary_bone_axis = EnumProperty(
name="Primary Bone Axis",
items=(('X', "X Axis", ""),
('Y', "Y Axis", ""),
('Z', "Z Axis", ""),
('-X', "-X Axis", ""),
('-Y', "-Y Axis", ""),
('-Z', "-Z Axis", ""),
),
default='Y',
)
secondary_bone_axis = EnumProperty(
name="Secondary Bone Axis",
items=(('X', "X Axis", ""),
('Y', "Y Axis", ""),
('Z', "Z Axis", ""),
('-X', "-X Axis", ""),
('-Y', "-Y Axis", ""),
('-Z', "-Z Axis", ""),
),
default='X',
)
use_prepost_rot = BoolProperty(
name="Use Pre/Post Rotation",
description="Use pre/post rotation from FBX transform (you may have to disable that in some cases)",
default=True,
)
def draw(self, context):
layout = self.layout
layout.prop(self, "ui_tab", expand=True)
if self.ui_tab == 'MAIN':
layout.prop(self, "use_manual_orientation"),
sub = layout.column()
sub.enabled = self.use_manual_orientation
sub.prop(self, "axis_forward")
sub.prop(self, "axis_up")
layout.prop(self, "global_scale")
layout.prop(self, "bake_space_transform")
layout.prop(self, "use_custom_normals")
layout.prop(self, "use_anim")
layout.prop(self, "anim_offset")
layout.prop(self, "use_custom_props")
sub = layout.row()
sub.enabled = self.use_custom_props
sub.prop(self, "use_custom_props_enum_as_string")
layout.prop(self, "use_image_search")
# layout.prop(self, "use_alpha_decals")
layout.prop(self, "decal_offset")
layout.prop(self, "use_prepost_rot")
elif self.ui_tab == 'ARMATURE':
layout.prop(self, "ignore_leaf_bones")
layout.prop(self, "force_connect_children"),
layout.prop(self, "automatic_bone_orientation"),
sub = layout.column()
sub.enabled = not self.automatic_bone_orientation
sub.prop(self, "primary_bone_axis")
sub.prop(self, "secondary_bone_axis")
def execute(self, context):
keywords = self.as_keywords(ignore=("filter_glob", "directory", "ui_tab"))
keywords["use_cycles"] = (context.scene.render.engine == 'CYCLES')
from . import import_fbx
return import_fbx.load(self, context, **keywords)
class ExportFBX(bpy.types.Operator, ExportHelper, IOFBXOrientationHelper):
"""Write a FBX file"""
bl_idname = "export_scene.fbx"
bl_label = "Export FBX"
bl_options = {'UNDO', 'PRESET'}
filename_ext = ".fbx"
filter_glob = StringProperty(default="*.fbx", options={'HIDDEN'})
# List of operator properties, the attributes will be assigned
# to the class instance from the operator settings before calling.
version = EnumProperty(
items=(('BIN7400', "FBX 7.4 binary", "Modern 7.4 binary version"),
('ASCII6100', "FBX 6.1 ASCII",
"Legacy 6.1 ascii version - WARNING: Deprecated and no more maintained"),
),
name="Version",
description="Choose which version of the exporter to use",
)
# 7.4 only
ui_tab = EnumProperty(
items=(('MAIN', "Main", "Main basic settings"),
('GEOMETRY', "Geometries", "Geometry-related settings"),
('ARMATURE', "Armatures", "Armature-related settings"),
('ANIMATION', "Animation", "Animation-related settings"),
),
name="ui_tab",
description="Export options categories",
)
use_selection = BoolProperty(
name="Selected Objects",
description="Export selected objects on visible layers",
default=False,
)
global_scale = FloatProperty(
name="Scale",
description="Scale all data (Some importers do not support scaled armatures!)",
min=0.001, max=1000.0,
soft_min=0.01, soft_max=1000.0,
default=1.0,
)
# 7.4 only
apply_unit_scale = BoolProperty(
name="Apply Unit",
description="Take into account current Blender units settings (if unset, raw Blender Units values are used as-is)",
default=True,
)
# 7.4 only
apply_scale_options = EnumProperty(
items=(('FBX_SCALE_NONE', "All Local",
"Apply custom scaling and units scaling to each object transformation, FBX scale remains at 1.0"),
('FBX_SCALE_UNITS', "FBX Units Scale",
"Apply custom scaling to each object transformation, and units scaling to FBX scale"),
('FBX_SCALE_CUSTOM', "FBX Custom Scale",
"Apply custom scaling to FBX scale, and units scaling to each object transformation"),
('FBX_SCALE_ALL', "FBX All",
"Apply custom scaling and units scaling to FBX scale"),
),
name="Apply Scalings",
description="How to apply custom and units scalings in generated FBX file "
"(Blender uses FBX scale to detect units on import, "
"but many other applications do not handle the same way)",
)
# 7.4 only
bake_space_transform = BoolProperty(
name="!EXPERIMENTAL! Apply Transform",
description="Bake space transform into object data, avoids getting unwanted rotations to objects when "
"target space is not aligned with Blender's space "
"(WARNING! experimental option, use at own risks, known broken with armatures/animations)",
default=False,
)
object_types = EnumProperty(
name="Object Types",
options={'ENUM_FLAG'},
items=(('EMPTY', "Empty", ""),
('CAMERA', "Camera", ""),
('LAMP', "Lamp", ""),
('ARMATURE', "Armature", "WARNING: not supported in dupli/group instances"),
('MESH', "Mesh", ""),
('OTHER', "Other", "Other geometry types, like curve, metaball, etc. (converted to meshes)"),
),
description="Which kind of object to export",
default={'EMPTY', 'CAMERA', 'LAMP', 'ARMATURE', 'MESH', 'OTHER'},
)
use_mesh_modifiers = BoolProperty(
name="Apply Modifiers",
description="Apply modifiers to mesh objects (except Armature ones) - "
"WARNING: prevents exporting shape keys",
default=True,
)
use_mesh_modifiers_render = BoolProperty(
name="Use Modifiers Render Setting",
description="Use render settings when applying modifiers to mesh objects",
default=True,
)
mesh_smooth_type = EnumProperty(
name="Smoothing",
items=(('OFF', "Normals Only", "Export only normals instead of writing edge or face smoothing data"),
('FACE', "Face", "Write face smoothing"),
('EDGE', "Edge", "Write edge smoothing"),
),
description="Export smoothing information "
"(prefer 'Normals Only' option if your target importer understand split normals)",
default='OFF',
)
use_mesh_edges = BoolProperty(
name="Loose Edges",
description="Export loose edges (as two-vertices polygons)",
default=False,
)
# 7.4 only
use_tspace = BoolProperty(
name="Tangent Space",
description="Add binormal and tangent vectors, together with normal they form the tangent space "
"(will only work correctly with tris/quads only meshes!)",
default=False,
)
# 7.4 only
use_custom_props = BoolProperty(
name="Custom Properties",
description="Export custom properties",
default=False,
)
add_leaf_bones = BoolProperty(
name="Add Leaf Bones",
description="Append a final bone to the end of each chain to specify last bone length "
"(use this when you intend to edit the armature from exported data)",
default=True # False for commit!
)
primary_bone_axis = EnumProperty(
name="Primary Bone Axis",
items=(('X', "X Axis", ""),
('Y', "Y Axis", ""),
('Z', "Z Axis", ""),
('-X', "-X Axis", ""),
('-Y', "-Y Axis", ""),
('-Z', "-Z Axis", ""),
),
default='Y',
)
secondary_bone_axis = EnumProperty(
name="Secondary Bone Axis",
items=(('X', "X Axis", ""),
('Y', "Y Axis", ""),
('Z', "Z Axis", ""),
('-X', "-X Axis", ""),
('-Y', "-Y Axis", ""),
('-Z', "-Z Axis", ""),
),
default='X',
)
use_armature_deform_only = BoolProperty(
name="Only Deform Bones",
description="Only write deforming bones (and non-deforming ones when they have deforming children)",
default=False,
)
armature_nodetype = EnumProperty(
name="Armature FBXNode Type",
items=(('NULL', "Null", "'Null' FBX node, similar to Blender's Empty (default)"),
('ROOT', "Root", "'Root' FBX node, supposed to be the root of chains of bones..."),
('LIMBNODE', "LimbNode", "'LimbNode' FBX node, a regular joint between two bones..."),
),
description="FBX type of node (object) used to represent Blender's armatures "
"(use Null one unless you experience issues with other app, other choices may no import back "
"perfectly in Blender...)",
default='NULL',
)
# Anim - 7.4
bake_anim = BoolProperty(
name="Baked Animation",
description="Export baked keyframe animation",
default=True,
)
bake_anim_use_all_bones = BoolProperty(
name="Key All Bones",
description="Force exporting at least one key of animation for all bones "
"(needed with some target applications, like UE4)",
default=True,
)
bake_anim_use_nla_strips = BoolProperty(
name="NLA Strips",
description="Export each non-muted NLA strip as a separated FBX's AnimStack, if any, "
"instead of global scene animation",
default=True,
)
bake_anim_use_all_actions = BoolProperty(
name="All Actions",
description="Export each action as a separated FBX's AnimStack, instead of global scene animation "
"(note that animated objects will get all actions compatible with them, "
"others will get no animation at all)",
default=True,
)
bake_anim_force_startend_keying = BoolProperty(
name="Force Start/End Keying",
description="Always add a keyframe at start and end of actions for animated channels",
default=True,
)
bake_anim_step = FloatProperty(
name="Sampling Rate",
description="How often to evaluate animated values (in frames)",
min=0.01, max=100.0,
soft_min=0.1, soft_max=10.0,
default=1.0,
)
bake_anim_simplify_factor = FloatProperty(
name="Simplify",
description="How much to simplify baked values (0.0 to disable, the higher the more simplified)",
min=0.0, max=100.0, # No simplification to up to 10% of current magnitude tolerance.
soft_min=0.0, soft_max=10.0,
default=1.0, # default: min slope: 0.005, max frame step: 10.
)
# Anim - 6.1
use_anim = BoolProperty(
name="Animation",
description="Export keyframe animation",
default=True,
)
use_anim_action_all = BoolProperty(
name="All Actions",
description=("Export all actions for armatures or just the currently selected action"),
default=True,
)
use_default_take = BoolProperty(
name="Default Take",
description="Export currently assigned object and armature animations into a default take from the scene "
"start/end frames",
default=True
)
use_anim_optimize = BoolProperty(
name="Optimize Keyframes",
description="Remove double keyframes",
default=True,
)
anim_optimize_precision = FloatProperty(
name="Precision",
description="Tolerance for comparing double keyframes (higher for greater accuracy)",
min=0.0, max=20.0, # from 10^2 to 10^-18 frames precision.
soft_min=1.0, soft_max=16.0,
default=6.0, # default: 10^-4 frames.
)
# End anim
path_mode = path_reference_mode
# 7.4 only
embed_textures = BoolProperty(
name="Embed Textures",
description="Embed textures in FBX binary file (only for \"Copy\" path mode!)",
default=False,
)
batch_mode = EnumProperty(
name="Batch Mode",
items=(('OFF', "Off", "Active scene to file"),
('SCENE', "Scene", "Each scene as a file"),
('GROUP', "Group", "Each group as a file"),
),
)
use_batch_own_dir = BoolProperty(
name="Batch Own Dir",
description="Create a dir for each exported file",
default=True,
)
use_metadata = BoolProperty(
name="Use Metadata",
default=True,
options={'HIDDEN'},
)
def draw(self, context):
layout = self.layout
layout.prop(self, "version")
if self.version == 'BIN7400':
layout.prop(self, "ui_tab", expand=True)
if self.ui_tab == 'MAIN':
layout.prop(self, "use_selection")
col = layout.column(align=True)
row = col.row(align=True)
row.prop(self, "global_scale")
sub = row.row(align=True)
sub.prop(self, "apply_unit_scale", text="", icon='NDOF_TRANS')
col.prop(self, "apply_scale_options")
layout.prop(self, "axis_forward")
layout.prop(self, "axis_up")
layout.separator()
layout.prop(self, "object_types")
layout.prop(self, "bake_space_transform")
layout.prop(self, "use_custom_props")
layout.separator()
row = layout.row(align=True)
row.prop(self, "path_mode")
sub = row.row(align=True)
sub.enabled = (self.path_mode == 'COPY')
sub.prop(self, "embed_textures", text="", icon='PACKAGE' if self.embed_textures else 'UGLYPACKAGE')
row = layout.row(align=True)
row.prop(self, "batch_mode")
sub = row.row(align=True)
sub.prop(self, "use_batch_own_dir", text="", icon='NEWFOLDER')
elif self.ui_tab == 'GEOMETRY':
layout.prop(self, "use_mesh_modifiers")
sub = layout.row()
sub.enabled = self.use_mesh_modifiers
sub.prop(self, "use_mesh_modifiers_render")
layout.prop(self, "mesh_smooth_type")
layout.prop(self, "use_mesh_edges")
sub = layout.row()
#~ sub.enabled = self.mesh_smooth_type in {'OFF'}
sub.prop(self, "use_tspace")
elif self.ui_tab == 'ARMATURE':
layout.prop(self, "use_armature_deform_only")
layout.prop(self, "add_leaf_bones")
layout.prop(self, "primary_bone_axis")
layout.prop(self, "secondary_bone_axis")
layout.prop(self, "armature_nodetype")
elif self.ui_tab == 'ANIMATION':
layout.prop(self, "bake_anim")
col = layout.column()
col.enabled = self.bake_anim
col.prop(self, "bake_anim_use_all_bones")
col.prop(self, "bake_anim_use_nla_strips")
col.prop(self, "bake_anim_use_all_actions")
col.prop(self, "bake_anim_force_startend_keying")
col.prop(self, "bake_anim_step")
col.prop(self, "bake_anim_simplify_factor")
else:
layout.prop(self, "use_selection")
layout.prop(self, "global_scale")
layout.prop(self, "axis_forward")
layout.prop(self, "axis_up")
layout.separator()
layout.prop(self, "object_types")
layout.prop(self, "use_mesh_modifiers")
layout.prop(self, "mesh_smooth_type")
layout.prop(self, "use_mesh_edges")
sub = layout.row()
#~ sub.enabled = self.mesh_smooth_type in {'OFF'}
sub.prop(self, "use_tspace")
layout.prop(self, "use_armature_deform_only")
layout.prop(self, "use_anim")
col = layout.column()
col.enabled = self.use_anim
col.prop(self, "use_anim_action_all")
col.prop(self, "use_default_take")
col.prop(self, "use_anim_optimize")
col.prop(self, "anim_optimize_precision")
layout.separator()
layout.prop(self, "path_mode")
layout.prop(self, "batch_mode")
layout.prop(self, "use_batch_own_dir")
@property
def check_extension(self):
return self.batch_mode == 'OFF'
def execute(self, context):
from mathutils import Matrix
if not self.filepath:
raise Exception("filepath not set")
global_matrix = (axis_conversion(to_forward=self.axis_forward,
to_up=self.axis_up,
).to_4x4())
keywords = self.as_keywords(ignore=("check_existing",
"filter_glob",
"ui_tab",
))
keywords["global_matrix"] = global_matrix
if self.version == 'BIN7400':
from . import export_fbx_bin
return export_fbx_bin.save(self, context, **keywords)
else:
from . import export_fbx
return export_fbx.save(self, context, **keywords)
def menu_func_import(self, context):
self.layout.operator(ImportFBX.bl_idname, text="FBX (.fbx)")
def menu_func_export(self, context):
self.layout.operator(ExportFBX.bl_idname, text="FBX (.fbx)")
classes = (
ImportFBX,
ExportFBX,
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.INFO_MT_file_import.append(menu_func_import)
bpy.types.INFO_MT_file_export.append(menu_func_export)
def unregister():
bpy.types.INFO_MT_file_import.remove(menu_func_import)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
for cls in classes:
bpy.utils.unregister_class(cls)
if __name__ == "__main__":
register()
| [
"[email protected]"
]
| |
d74236cdbb87c68656abea942412de4ccb0d64a9 | a1c219c95e687fbc7f84c752f1714c7e85b4d5af | /mask-RCNN/maskRCNN/Mask_RCNN/samples/maskrcnn1.py | a97d0512bd0b2c75d3c7396012d602a96899c74c | [
"MIT"
]
| permissive | fmigone/vehicle_recognition_suite | 2d16b15425be3fa903a971be418d0f98a30a8ecd | fe1aecefd55e06bdbbda159a8728d6b7e6098bae | refs/heads/master | 2020-07-05T19:45:29.027336 | 2018-11-13T21:24:48 | 2018-11-13T21:24:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,372 | py | import os
import sys
#import skimage.io
#import matplotlib.pyplot as plt
import cv2
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize_cv
# Import COCO config
sys.path.append(os.path.join(ROOT_DIR, "samples/coco/")) # To find local version
import coco
import numpy as np
import time
# classes =====================================================================
class InferenceConfig(coco.CocoConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
#==============================================================================
# global constants ============================================================
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
# Directory of images to run detection on
IMAGE_DIR = os.path.join(ROOT_DIR, "images")
VID_DIRECTORY = "C:\\Users\\victor\\Desktop\\vm-master\\Videos\\"
#VID_DIRECTORY = "D:\\Users\\f202897\\Desktop\\vm-master\\Videos\\"
# points = x,y (l-r, t-b)
ROI_CORNERS = np.array([[(1000,180),(120,180), (50,720), (1100,720)]], dtype=np.int32)
#==============================================================================
config = InferenceConfig()
config.display()
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
# Load a random image from the images folder
#file_names = next(os.walk(IMAGE_DIR))[2]
#filename = os.path.join(IMAGE_DIR, 'van.png')
#image = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names)))
#image = skimage.io.imread(filename)
cap = cv2.VideoCapture(VID_DIRECTORY+'tarde.mp4')
#out = cv2.VideoWriter('outpy.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 30, (1280//2,720//2))
#benchmarking
timeslist=[]
framecount=0
while(1):
millis1 = time.time()
ret, frame = cap.read()
width, heigth, channels = frame.shape
if frame is None:
print("none")
break
framecount+=1
framecopy=frame.copy()
#getting ROI ==============================================================
# mask defaulting to black for 3-channel and transparent for 4-channel
mask = np.zeros(frame.shape, dtype=np.uint8)
# fill the ROI so it doesn't get wiped out when the mask is applied
channel_count = frame.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,)*channel_count
cv2.fillConvexPoly(mask, ROI_CORNERS, ignore_mask_color)
# apply the mask
roi = cv2.bitwise_and(frame, mask)
rectangle=cv2.boundingRect(ROI_CORNERS)
#cv2.rectangle(roi,(rectangle[0],rectangle[1]),(rectangle[0]+rectangle[2],rectangle[1]+rectangle[3]),(0,0,255),3)
x1=rectangle[0]
x2=rectangle[0]+rectangle[2]
y1=rectangle[1]
y2=rectangle[1]+rectangle[3]
roi=roi[y1:y2,x1:x2]
roi=cv2.resize(roi, (heigth//2,width//2))
#==========================================================================
# Run detection
results = model.detect([roi], verbose=1)
# Visualize results
r = results[0]
frameresult=visualize_cv.display_instances(roi, r['rois'], r['masks'], r['class_ids'],
class_names, r['scores'])
cv2.imshow('result', frameresult)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
millis2 = time.time()
millis=millis2 - millis1
timeslist.append(millis*1000)
#saving video
#out.write(frameresult)
cap.release()
#out.release()
cv2.destroyAllWindows()
mean = sum(timeslist)/len(timeslist)
| [
"[email protected]"
]
| |
2ff6fee5e28e362384ede9ace71cf0ce33283eaf | ac69ba64eefe9260f54f09d0073fb648fea99b1d | /singel.py | c8c6494ffb7414f733a2161fad1f6ba4a65493c8 | []
| no_license | abushaik/Coding-fun | 7786881f1c9e4a5590ad14735bc75d466464a8dc | fdf17f49950d95ddebef2f14a50ce0c7e6fc8a71 | refs/heads/master | 2023-04-13T07:31:02.989765 | 2023-03-26T18:06:32 | 2023-03-26T18:06:32 | 117,871,146 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,382 | py | import matplotlib.pyplot as plt
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
import random
print("SIMULATION PROJECT\n")
len=int(input("Enter Arrival time: "))
arv = [random.randint(10,80) for _ in range(len-1)]
serv = [random.randint(10,80) for _ in range(len)]
arrRandom=[0]+arv
arrivalTime =[0]+ [random.randint(1,10) for _ in range(len-1)]
print("Random number of Arrival time")
for i in range(len):
print(arrRandom[i],end=",")
print("\n\nRandom number of Service time")
for i in range(len):
print(serv[i],end=",")
print("\n\nTime Between Arrival Determination:")
print("----------------------------------------------------------------------")
print("CustomerNumber |Random Digit | Time Between Arrival")
print("----------------------------------------------------------------------")
for line in range(0,len):
print("\t|",line+1,"\t\t|",arrRandom[line],"\t\t|",arrivalTime[line])
print("----------------------------------------------------------------------")
print("\n\nService Time Genarated:")
print("----------------------------------------------------------------------")
print("CustomerNumber |Random Digit | Service Time")
print("----------------------------------------------------------------------")
serviceTime = [random.randint(1,10) for _ in range(len)]
for line in range(0,len):
print("\t|",line+1,"\t\t|",serv[line],"\t\t|",serviceTime[line])
print("----------------------------------------------------------------------")
print("\n\nSimulation Table:")
print("----------------------------------------------------------------------")
print(" IAT=Inter Arrival Time\n At=Arrival Time\n ST=Service Time\n STB= Service Time Begin\n WS=Waiting Service\n STE= Service Time End\n TSS=Time Spend in Service\n IT=Idle Time")
print("Customer|IAT\t|AT\t|ST\t|STB\t|WS\t|STE\t|TSS\t|IT")
print("----------------------------------------------------------------------")
L=0
LatArraival=[]
serviceTimeEnd=[]
spendInSystem=[]
customerspend=[]
idleTime=[0]+[]
TimeServiceBegin=[]
waitQueue=[0]+[]
for line in range(0,len):
L+=arrivalTime[line]
LatArraival.append(L)
serviceTimeEnd.append(LatArraival[line]+serviceTime[line])
spendInSystem.append(serviceTimeEnd[line]-serviceTime[line])
customerspend.append(serviceTimeEnd[line]-LatArraival[line])
for line in range(0,len):
for line1 in range(1,len):
if(serviceTimeEnd[line1-1]>LatArraival[line1]):
waitQueue.append(serviceTimeEnd[line1-1]-LatArraival[line1])
serviceTimeEnd.append(waitQueue[line1]+LatArraival[line1])
else:
waitQueue.append(0)
TimeServiceBegin.append(LatArraival[line]+waitQueue[line])
for line in range(0,len):
for line3 in range(1,len):
idleTime.append(TimeServiceBegin[line3]-serviceTimeEnd[line3-1])
print(line+1,"\t|",arrivalTime[line],"\t|",LatArraival[line],"\t|",serviceTime[line],"\t|",TimeServiceBegin[line],"\t|",waitQueue[line],"\t|",serviceTimeEnd[line],"\t|",customerspend[line],"\t|",idleTime[line])
print("----------------------------------------------------------------------")
sum_wait=0
for i in range(0,len):
sum_wait+=waitQueue[i];
average_wait= sum_wait/len
print("Average waiting Time:",average_wait)
sum_idle=0
for i in range(0,len):
sum_idle+=idleTime[i];
pro_idle=sum_idle/serviceTimeEnd[len-1]
print("Probability of Idle time Server:",pro_idle)
sum_service=0
for i in range(0,len):
sum_service+=serviceTime[i];
average_service= sum_service/len
print("Average Service Time:",average_service)
sum_arrival=0
for i in range(0,len):
sum_arrival+=arrivalTime[i];
average_arrival= sum_arrival/len
print("Average Inter Arrival Time:",average_arrival)
sum_spend=0
for i in range(0,len):
sum_spend+=customerspend[i];
average_spend= sum_spend/len
print("Average Customer Spend Time:",average_spend)
canvas = canvas.Canvas("Report.pdf", pagesize=letter)
canvas.setLineWidth(.3)
canvas.setFont('Helvetica', 12)
canvas.drawString(200,750,'Daffodil International University.')
canvas.drawString(215,715,'Simulation and Modeling.')
canvas.drawString(30,650,'Single-Channel Queue')
canvas.drawString(500,650,"31/07/2018")
canvas.drawString(30,625,'Result:')
canvas.drawString(30,605,'Average Service Time:')
canvas.drawString(30,585,str(average_service))
canvas.drawString(30,565,'Average waiting Time:')
canvas.drawString(30,545,str(average_wait))
canvas.drawString(30,525,'Probability of Idle time Server:')
canvas.drawString(30,505,str(pro_idle))
canvas.drawString(30,485,'Simulation Table:')
canvas.drawString(30,465,'Customer')
canvas.drawString(150,465,'Time Spend In System')
canvas.drawString(400,465,'Ideal Time')
for i in range(0,len):
canvas.drawString(30,445-(i*20),str(i+1))
for i in range(0,len):
canvas.drawString(150,445-(i*20),str(customerspend[i]))
for i in range(0,len):
canvas.drawString(400,445-(i*20),str(idleTime[i]))
canvas.save()
new_arrival=[]
customer=[]
for i in range(0,len):
new_arrival.append(LatArraival[i])
customer.append(i)
plt.bar(new_arrival,customer)
plt.title("Number of Customers in the System")
plt.xlabel("Arrival Time")
plt.ylabel("customer")
plt.show()
| [
"[email protected]"
]
| |
40473e122c6e64c3b0f9a5bad0386866cdf0a30b | 85582ec08c7c58501edae203f66c9b31749ff1d8 | /note/migrations/0001_initial.py | 02c783c58b8e32398894326ad56ca6c63f3886e5 | []
| no_license | pedroeagle/django-notes-app | fca51ec239905e090907c7ea124578774aa3d545 | 55b6e79ea8c2d1dc981246a57c5ba708f3a8d9af | refs/heads/master | 2023-07-17T09:03:11.559029 | 2021-09-02T20:47:24 | 2021-09-02T20:47:24 | 401,793,704 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | # Generated by Django 3.2.6 on 2021-09-01 01:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Note',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('content', models.CharField(max_length=10000)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.RESTRICT, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
]
| |
71ab8eb17ca6951457756a58b0a25d39c96bbef8 | dcbd17cacf44bea74655242ea9bd59cf0ea86730 | /backend/hashing.py | 4f780a3096f8b73aa24ed23af13a6abb093be701 | []
| no_license | Penta301/Institute_JHAM | 34c799ffadd244913fc70ead7c6be206aad5a6fe | 66de8cf0024879e8b1355637ab6666926e91a860 | refs/heads/main | 2023-07-12T02:23:18.992335 | 2021-08-18T20:24:29 | 2021-08-18T20:24:29 | 388,658,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 297 | py | from passlib.context import CryptContext
pwd_cxt = CryptContext(schemes=['bcrypt'], deprecated='auto')
class Hash():
def encrypt(password):
return pwd_cxt.hash(password)
def verify(plain_password, hashed_password):
return pwd_cxt.verify(plain_password, hashed_password) | [
"[email protected]"
]
| |
271fcc479089aac99db3c2fff510481a840c6ef1 | 5b4312ddc24f29538dce0444b7be81e17191c005 | /autoware.ai/1.12.0_cuda/install/autoware_system_msgs/lib/python2.7/dist-packages/autoware_system_msgs/msg/_SystemStatus.py | 0a28bb4a5d1599d1d13d76ce22070dc6303ed3bf | [
"MIT"
]
| permissive | muyangren907/autoware | b842f1aeb2bfe7913fb2be002ea4fc426b4e9be2 | 5ae70f0cdaf5fc70b91cd727cf5b5f90bc399d38 | refs/heads/master | 2020-09-22T13:08:14.237380 | 2019-12-03T07:12:49 | 2019-12-03T07:12:49 | 225,167,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,350 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from autoware_system_msgs/SystemStatus.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import autoware_system_msgs.msg
import rosgraph_msgs.msg
import genpy
import std_msgs.msg
class SystemStatus(genpy.Message):
_md5sum = "4410e98d931508de40c30e12f5aa1ed0"
_type = "autoware_system_msgs/SystemStatus"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
string[] available_nodes
bool detect_too_match_warning
autoware_system_msgs/NodeStatus[] node_status
autoware_system_msgs/HardwareStatus[] hardware_status
rosgraph_msgs/TopicStatistics[] topic_statistics
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: autoware_system_msgs/NodeStatus
Header header
string node_name
bool node_activated
autoware_system_msgs/DiagnosticStatusArray[] status
================================================================================
MSG: autoware_system_msgs/DiagnosticStatusArray
autoware_system_msgs/DiagnosticStatus[] status
================================================================================
MSG: autoware_system_msgs/DiagnosticStatus
Header header
string key
string value
string description
uint8 UNDEFINED = 0
uint8 type
uint8 OUT_OF_RANGE = 1
uint8 RATE_IS_SLOW = 2
uint8 HARDWARE = 255
uint8 level
uint8 OK = 1
uint8 WARN = 2
uint8 ERROR = 3
uint8 FATAL = 4
================================================================================
MSG: autoware_system_msgs/HardwareStatus
std_msgs/Header header
string hardware_name
autoware_system_msgs/DiagnosticStatusArray[] status
================================================================================
MSG: rosgraph_msgs/TopicStatistics
# name of the topic
string topic
# node id of the publisher
string node_pub
# node id of the subscriber
string node_sub
# the statistics apply to this time window
time window_start
time window_stop
# number of messages delivered during the window
int32 delivered_msgs
# numbers of messages dropped during the window
int32 dropped_msgs
# traffic during the window, in bytes
int32 traffic
# mean/stddev/max period between two messages
duration period_mean
duration period_stddev
duration period_max
# mean/stddev/max age of the message based on the
# timestamp in the message header. In case the
# message does not have a header, it will be 0.
duration stamp_age_mean
duration stamp_age_stddev
duration stamp_age_max
"""
__slots__ = ['header','available_nodes','detect_too_match_warning','node_status','hardware_status','topic_statistics']
_slot_types = ['std_msgs/Header','string[]','bool','autoware_system_msgs/NodeStatus[]','autoware_system_msgs/HardwareStatus[]','rosgraph_msgs/TopicStatistics[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,available_nodes,detect_too_match_warning,node_status,hardware_status,topic_statistics
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(SystemStatus, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.available_nodes is None:
self.available_nodes = []
if self.detect_too_match_warning is None:
self.detect_too_match_warning = False
if self.node_status is None:
self.node_status = []
if self.hardware_status is None:
self.hardware_status = []
if self.topic_statistics is None:
self.topic_statistics = []
else:
self.header = std_msgs.msg.Header()
self.available_nodes = []
self.detect_too_match_warning = False
self.node_status = []
self.hardware_status = []
self.topic_statistics = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.available_nodes)
buff.write(_struct_I.pack(length))
for val1 in self.available_nodes:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack('<I%ss'%length, length, val1))
buff.write(_get_struct_B().pack(self.detect_too_match_warning))
length = len(self.node_status)
buff.write(_struct_I.pack(length))
for val1 in self.node_status:
_v1 = val1.header
buff.write(_get_struct_I().pack(_v1.seq))
_v2 = _v1.stamp
_x = _v2
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v1.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.node_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(val1.node_activated))
length = len(val1.status)
buff.write(_struct_I.pack(length))
for val2 in val1.status:
length = len(val2.status)
buff.write(_struct_I.pack(length))
for val3 in val2.status:
_v3 = val3.header
buff.write(_get_struct_I().pack(_v3.seq))
_v4 = _v3.stamp
_x = _v4
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v3.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val3.key
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val3.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val3.description
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val3
buff.write(_get_struct_2B().pack(_x.type, _x.level))
length = len(self.hardware_status)
buff.write(_struct_I.pack(length))
for val1 in self.hardware_status:
_v5 = val1.header
buff.write(_get_struct_I().pack(_v5.seq))
_v6 = _v5.stamp
_x = _v6
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v5.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.hardware_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(val1.status)
buff.write(_struct_I.pack(length))
for val2 in val1.status:
length = len(val2.status)
buff.write(_struct_I.pack(length))
for val3 in val2.status:
_v7 = val3.header
buff.write(_get_struct_I().pack(_v7.seq))
_v8 = _v7.stamp
_x = _v8
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v7.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val3.key
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val3.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val3.description
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val3
buff.write(_get_struct_2B().pack(_x.type, _x.level))
length = len(self.topic_statistics)
buff.write(_struct_I.pack(length))
for val1 in self.topic_statistics:
_x = val1.topic
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.node_pub
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.node_sub
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_v9 = val1.window_start
_x = _v9
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_v10 = val1.window_stop
_x = _v10
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = val1
buff.write(_get_struct_3i().pack(_x.delivered_msgs, _x.dropped_msgs, _x.traffic))
_v11 = val1.period_mean
_x = _v11
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
_v12 = val1.period_stddev
_x = _v12
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
_v13 = val1.period_max
_x = _v13
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
_v14 = val1.stamp_age_mean
_x = _v14
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
_v15 = val1.stamp_age_stddev
_x = _v15
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
_v16 = val1.stamp_age_max
_x = _v16
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.node_status is None:
self.node_status = None
if self.hardware_status is None:
self.hardware_status = None
if self.topic_statistics is None:
self.topic_statistics = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.available_nodes = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.available_nodes.append(val1)
start = end
end += 1
(self.detect_too_match_warning,) = _get_struct_B().unpack(str[start:end])
self.detect_too_match_warning = bool(self.detect_too_match_warning)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.node_status = []
for i in range(0, length):
val1 = autoware_system_msgs.msg.NodeStatus()
_v17 = val1.header
start = end
end += 4
(_v17.seq,) = _get_struct_I().unpack(str[start:end])
_v18 = _v17.stamp
_x = _v18
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v17.frame_id = str[start:end].decode('utf-8')
else:
_v17.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.node_name = str[start:end].decode('utf-8')
else:
val1.node_name = str[start:end]
start = end
end += 1
(val1.node_activated,) = _get_struct_B().unpack(str[start:end])
val1.node_activated = bool(val1.node_activated)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.status = []
for i in range(0, length):
val2 = autoware_system_msgs.msg.DiagnosticStatusArray()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val2.status = []
for i in range(0, length):
val3 = autoware_system_msgs.msg.DiagnosticStatus()
_v19 = val3.header
start = end
end += 4
(_v19.seq,) = _get_struct_I().unpack(str[start:end])
_v20 = _v19.stamp
_x = _v20
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v19.frame_id = str[start:end].decode('utf-8')
else:
_v19.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val3.key = str[start:end].decode('utf-8')
else:
val3.key = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val3.value = str[start:end].decode('utf-8')
else:
val3.value = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val3.description = str[start:end].decode('utf-8')
else:
val3.description = str[start:end]
_x = val3
start = end
end += 2
(_x.type, _x.level,) = _get_struct_2B().unpack(str[start:end])
val2.status.append(val3)
val1.status.append(val2)
self.node_status.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.hardware_status = []
for i in range(0, length):
val1 = autoware_system_msgs.msg.HardwareStatus()
_v21 = val1.header
start = end
end += 4
(_v21.seq,) = _get_struct_I().unpack(str[start:end])
_v22 = _v21.stamp
_x = _v22
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v21.frame_id = str[start:end].decode('utf-8')
else:
_v21.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.hardware_name = str[start:end].decode('utf-8')
else:
val1.hardware_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.status = []
for i in range(0, length):
val2 = autoware_system_msgs.msg.DiagnosticStatusArray()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val2.status = []
for i in range(0, length):
val3 = autoware_system_msgs.msg.DiagnosticStatus()
_v23 = val3.header
start = end
end += 4
(_v23.seq,) = _get_struct_I().unpack(str[start:end])
_v24 = _v23.stamp
_x = _v24
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v23.frame_id = str[start:end].decode('utf-8')
else:
_v23.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val3.key = str[start:end].decode('utf-8')
else:
val3.key = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val3.value = str[start:end].decode('utf-8')
else:
val3.value = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val3.description = str[start:end].decode('utf-8')
else:
val3.description = str[start:end]
_x = val3
start = end
end += 2
(_x.type, _x.level,) = _get_struct_2B().unpack(str[start:end])
val2.status.append(val3)
val1.status.append(val2)
self.hardware_status.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.topic_statistics = []
for i in range(0, length):
val1 = rosgraph_msgs.msg.TopicStatistics()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.topic = str[start:end].decode('utf-8')
else:
val1.topic = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.node_pub = str[start:end].decode('utf-8')
else:
val1.node_pub = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.node_sub = str[start:end].decode('utf-8')
else:
val1.node_sub = str[start:end]
_v25 = val1.window_start
_x = _v25
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
_v26 = val1.window_stop
_x = _v26
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
_x = val1
start = end
end += 12
(_x.delivered_msgs, _x.dropped_msgs, _x.traffic,) = _get_struct_3i().unpack(str[start:end])
_v27 = val1.period_mean
_x = _v27
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])
_v28 = val1.period_stddev
_x = _v28
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])
_v29 = val1.period_max
_x = _v29
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])
_v30 = val1.stamp_age_mean
_x = _v30
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])
_v31 = val1.stamp_age_stddev
_x = _v31
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])
_v32 = val1.stamp_age_max
_x = _v32
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])
self.topic_statistics.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.available_nodes)
buff.write(_struct_I.pack(length))
for val1 in self.available_nodes:
length = len(val1)
if python3 or type(val1) == unicode:
val1 = val1.encode('utf-8')
length = len(val1)
buff.write(struct.pack('<I%ss'%length, length, val1))
buff.write(_get_struct_B().pack(self.detect_too_match_warning))
length = len(self.node_status)
buff.write(_struct_I.pack(length))
for val1 in self.node_status:
_v33 = val1.header
buff.write(_get_struct_I().pack(_v33.seq))
_v34 = _v33.stamp
_x = _v34
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v33.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.node_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_get_struct_B().pack(val1.node_activated))
length = len(val1.status)
buff.write(_struct_I.pack(length))
for val2 in val1.status:
length = len(val2.status)
buff.write(_struct_I.pack(length))
for val3 in val2.status:
_v35 = val3.header
buff.write(_get_struct_I().pack(_v35.seq))
_v36 = _v35.stamp
_x = _v36
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v35.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val3.key
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val3.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val3.description
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val3
buff.write(_get_struct_2B().pack(_x.type, _x.level))
length = len(self.hardware_status)
buff.write(_struct_I.pack(length))
for val1 in self.hardware_status:
_v37 = val1.header
buff.write(_get_struct_I().pack(_v37.seq))
_v38 = _v37.stamp
_x = _v38
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v37.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.hardware_name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(val1.status)
buff.write(_struct_I.pack(length))
for val2 in val1.status:
length = len(val2.status)
buff.write(_struct_I.pack(length))
for val3 in val2.status:
_v39 = val3.header
buff.write(_get_struct_I().pack(_v39.seq))
_v40 = _v39.stamp
_x = _v40
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = _v39.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val3.key
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val3.value
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val3.description
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val3
buff.write(_get_struct_2B().pack(_x.type, _x.level))
length = len(self.topic_statistics)
buff.write(_struct_I.pack(length))
for val1 in self.topic_statistics:
_x = val1.topic
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.node_pub
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = val1.node_sub
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_v41 = val1.window_start
_x = _v41
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_v42 = val1.window_stop
_x = _v42
buff.write(_get_struct_2I().pack(_x.secs, _x.nsecs))
_x = val1
buff.write(_get_struct_3i().pack(_x.delivered_msgs, _x.dropped_msgs, _x.traffic))
_v43 = val1.period_mean
_x = _v43
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
_v44 = val1.period_stddev
_x = _v44
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
_v45 = val1.period_max
_x = _v45
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
_v46 = val1.stamp_age_mean
_x = _v46
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
_v47 = val1.stamp_age_stddev
_x = _v47
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
_v48 = val1.stamp_age_max
_x = _v48
buff.write(_get_struct_2i().pack(_x.secs, _x.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.node_status is None:
self.node_status = None
if self.hardware_status is None:
self.hardware_status = None
if self.topic_statistics is None:
self.topic_statistics = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.available_nodes = []
for i in range(0, length):
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1 = str[start:end].decode('utf-8')
else:
val1 = str[start:end]
self.available_nodes.append(val1)
start = end
end += 1
(self.detect_too_match_warning,) = _get_struct_B().unpack(str[start:end])
self.detect_too_match_warning = bool(self.detect_too_match_warning)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.node_status = []
for i in range(0, length):
val1 = autoware_system_msgs.msg.NodeStatus()
_v49 = val1.header
start = end
end += 4
(_v49.seq,) = _get_struct_I().unpack(str[start:end])
_v50 = _v49.stamp
_x = _v50
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v49.frame_id = str[start:end].decode('utf-8')
else:
_v49.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.node_name = str[start:end].decode('utf-8')
else:
val1.node_name = str[start:end]
start = end
end += 1
(val1.node_activated,) = _get_struct_B().unpack(str[start:end])
val1.node_activated = bool(val1.node_activated)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.status = []
for i in range(0, length):
val2 = autoware_system_msgs.msg.DiagnosticStatusArray()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val2.status = []
for i in range(0, length):
val3 = autoware_system_msgs.msg.DiagnosticStatus()
_v51 = val3.header
start = end
end += 4
(_v51.seq,) = _get_struct_I().unpack(str[start:end])
_v52 = _v51.stamp
_x = _v52
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v51.frame_id = str[start:end].decode('utf-8')
else:
_v51.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val3.key = str[start:end].decode('utf-8')
else:
val3.key = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val3.value = str[start:end].decode('utf-8')
else:
val3.value = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val3.description = str[start:end].decode('utf-8')
else:
val3.description = str[start:end]
_x = val3
start = end
end += 2
(_x.type, _x.level,) = _get_struct_2B().unpack(str[start:end])
val2.status.append(val3)
val1.status.append(val2)
self.node_status.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.hardware_status = []
for i in range(0, length):
val1 = autoware_system_msgs.msg.HardwareStatus()
_v53 = val1.header
start = end
end += 4
(_v53.seq,) = _get_struct_I().unpack(str[start:end])
_v54 = _v53.stamp
_x = _v54
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v53.frame_id = str[start:end].decode('utf-8')
else:
_v53.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.hardware_name = str[start:end].decode('utf-8')
else:
val1.hardware_name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val1.status = []
for i in range(0, length):
val2 = autoware_system_msgs.msg.DiagnosticStatusArray()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
val2.status = []
for i in range(0, length):
val3 = autoware_system_msgs.msg.DiagnosticStatus()
_v55 = val3.header
start = end
end += 4
(_v55.seq,) = _get_struct_I().unpack(str[start:end])
_v56 = _v55.stamp
_x = _v56
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v55.frame_id = str[start:end].decode('utf-8')
else:
_v55.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val3.key = str[start:end].decode('utf-8')
else:
val3.key = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val3.value = str[start:end].decode('utf-8')
else:
val3.value = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val3.description = str[start:end].decode('utf-8')
else:
val3.description = str[start:end]
_x = val3
start = end
end += 2
(_x.type, _x.level,) = _get_struct_2B().unpack(str[start:end])
val2.status.append(val3)
val1.status.append(val2)
self.hardware_status.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.topic_statistics = []
for i in range(0, length):
val1 = rosgraph_msgs.msg.TopicStatistics()
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.topic = str[start:end].decode('utf-8')
else:
val1.topic = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.node_pub = str[start:end].decode('utf-8')
else:
val1.node_pub = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
val1.node_sub = str[start:end].decode('utf-8')
else:
val1.node_sub = str[start:end]
_v57 = val1.window_start
_x = _v57
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
_v58 = val1.window_stop
_x = _v58
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2I().unpack(str[start:end])
_x = val1
start = end
end += 12
(_x.delivered_msgs, _x.dropped_msgs, _x.traffic,) = _get_struct_3i().unpack(str[start:end])
_v59 = val1.period_mean
_x = _v59
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])
_v60 = val1.period_stddev
_x = _v60
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])
_v61 = val1.period_max
_x = _v61
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])
_v62 = val1.stamp_age_mean
_x = _v62
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])
_v63 = val1.stamp_age_stddev
_x = _v63
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])
_v64 = val1.stamp_age_max
_x = _v64
start = end
end += 8
(_x.secs, _x.nsecs,) = _get_struct_2i().unpack(str[start:end])
self.topic_statistics.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_B = None
def _get_struct_B():
global _struct_B
if _struct_B is None:
_struct_B = struct.Struct("<B")
return _struct_B
_struct_3i = None
def _get_struct_3i():
global _struct_3i
if _struct_3i is None:
_struct_3i = struct.Struct("<3i")
return _struct_3i
_struct_2i = None
def _get_struct_2i():
global _struct_2i
if _struct_2i is None:
_struct_2i = struct.Struct("<2i")
return _struct_2i
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_2B = None
def _get_struct_2B():
global _struct_2B
if _struct_2B is None:
_struct_2B = struct.Struct("<2B")
return _struct_2B
_struct_2I = None
def _get_struct_2I():
global _struct_2I
if _struct_2I is None:
_struct_2I = struct.Struct("<2I")
return _struct_2I
| [
"[email protected]"
]
| |
8e392d4061af3dde2fd15069c9bf097705824883 | 4eaf53e9259d26d114f52a7c3823e6b27daccfaf | /tensorflow_probability/python/layers/distribution_layer_test.py | 2563eaac5d3c3de888f949732903ca17059d3a86 | [
"Apache-2.0"
]
| permissive | oahziur/probability | f700ea6bd16bd2be0b39237221158299f6646b5c | 11645be43d2845da65a4fbafde4cfa95780280c0 | refs/heads/master | 2020-04-17T08:26:13.888698 | 2019-01-18T15:24:06 | 2019-01-18T15:24:06 | 166,412,051 | 0 | 0 | Apache-2.0 | 2019-01-18T13:49:04 | 2019-01-18T13:49:04 | null | UTF-8 | Python | false | false | 30,600 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
# Dependency imports
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
tfe = tf.contrib.eager
tfk = tf.keras
tfkl = tf.keras.layers
tfb = tfp.bijectors
tfd = tfp.distributions
tfpl = tfp.layers
def _logit_avg_expit(t):
"""Computes `logit(mean(expit(t)))` in a numerically stable manner."""
log_avg_prob = (tf.reduce_logsumexp(-tf.nn.softplus(-t), axis=0) -
tf.log(tf.cast(tf.shape(t)[0], t.dtype)))
return log_avg_prob - tf.log1p(-tf.exp(log_avg_prob))
def _vec_pad(x, value=0):
"""Prepends a column of zeros to a matrix."""
paddings = tf.concat(
[tf.zeros([tf.rank(x) - 1, 2], dtype=tf.int32), [[1, 0]]], axis=0)
return tf.pad(x, paddings=paddings, constant_values=value)
@tfe.run_all_tests_in_graph_and_eager_modes
class EndToEndTest(tf.test.TestCase):
"""Test tfp.layers work in all three Keras APIs.
For end-to-end tests we fit a Variational Autoencoder (VAE) because this
requires chaining two Keras models, an encoder and decoder. Chaining two
models is important because making a `Distribution` as output by a Keras model
the input of another Keras model--and concurrently fitting both--is the
primary value-add of using the `tfp.layers.DistributionLambda`. Otherwise,
under many circumstances you can directly return a Distribution from a Keras
layer, as long as the Distribution base class has a tensor conversion function
registered via `tf.register_tensor_conversion_function`.
Fundamentally, there are three ways to be Keras models:
1. `tf.keras.Sequential`
2. Functional API
3. Subclass `tf.keras.Model`.
Its important to have end-to-end tests for all three, because #1 and #2 call
`__call__` and `call` differently. (#3's call pattern depends on user
implementation details, but in general ends up being either #1 or #2.)
"""
def setUp(self):
self.encoded_size = 2
self.input_shape = [2, 2, 1]
self.train_size = 100
self.test_size = 100
self.x = np.random.rand(
self.train_size, *self.input_shape).astype(np.float32)
self.x_test = np.random.rand(
self.test_size, *self.input_shape).astype(np.float32)
# TODO(b/120307671): Once this bug is resolved, use
# `activity_regularizer=tfpl.KLDivergenceRegularizer` instead of
# `KLDivergenceAddLoss`.
def test_keras_sequential_api(self):
"""Test `DistributionLambda`s are composable via Keras `Sequential` API."""
encoder_model = tfk.Sequential([
tfkl.InputLayer(input_shape=self.input_shape),
tfkl.Flatten(),
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(self.encoded_size)),
tfpl.MultivariateNormalTriL(self.encoded_size),
tfpl.KLDivergenceAddLoss(
tfd.Independent(tfd.Normal(loc=[0., 0], scale=1),
reinterpreted_batch_ndims=1),
weight=self.train_size),
])
decoder_model = tfk.Sequential([
tfkl.InputLayer(input_shape=[self.encoded_size]),
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.IndependentBernoulli.params_size(self.input_shape)),
tfpl.IndependentBernoulli(self.input_shape, tfd.Bernoulli.logits),
])
vae_model = tfk.Model(
inputs=encoder_model.inputs,
outputs=decoder_model(encoder_model.outputs[0]))
vae_model.compile(optimizer=tf.train.AdamOptimizer(),
loss=lambda x, rv_x: -rv_x.log_prob(x),
metrics=[])
vae_model.fit(self.x, self.x,
batch_size=25,
epochs=1,
verbose=True,
validation_data=(self.x_test, self.x_test),
shuffle=True)
yhat = vae_model(tf.convert_to_tensor(self.x_test))
self.assertIsInstance(yhat, tfd.Independent)
self.assertIsInstance(yhat.distribution, tfd.Bernoulli)
def test_keras_functional_api(self):
"""Test `DistributionLambda`s are composable via Keras functional API."""
encoder_model = [
tfkl.Flatten(),
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(
self.encoded_size)),
tfpl.MultivariateNormalTriL(self.encoded_size),
tfpl.KLDivergenceAddLoss(
tfd.Independent(tfd.Normal(loc=[0., 0], scale=1),
reinterpreted_batch_ndims=1),
weight=self.train_size),
]
decoder_model = [
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.IndependentBernoulli.params_size(self.input_shape)),
tfpl.IndependentBernoulli(self.input_shape, tfd.Bernoulli.logits),
]
images = tfkl.Input(shape=self.input_shape)
encoded = functools.reduce(lambda x, f: f(x), encoder_model, images)
decoded = functools.reduce(lambda x, f: f(x), decoder_model, encoded)
vae_model = tfk.Model(inputs=images, outputs=decoded)
vae_model.compile(optimizer=tf.train.AdamOptimizer(),
loss=lambda x, rv_x: -rv_x.log_prob(x),
metrics=[])
vae_model.fit(self.x, self.x,
batch_size=25,
epochs=1,
verbose=True,
validation_data=(self.x_test, self.x_test),
shuffle=True)
yhat = vae_model(tf.convert_to_tensor(self.x_test))
self.assertIsInstance(yhat, tfd.Independent)
self.assertIsInstance(yhat.distribution, tfd.Bernoulli)
def test_keras_model_api(self):
"""Test `DistributionLambda`s are composable via Keras `Model` API."""
class Encoder(tfk.Model):
"""Encoder."""
def __init__(self, input_shape, encoded_size, train_size):
super(Encoder, self).__init__()
self._layers = [
tfkl.Flatten(),
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(encoded_size)),
tfpl.MultivariateNormalTriL(encoded_size),
tfpl.KLDivergenceAddLoss(
tfd.Independent(tfd.Normal(loc=tf.zeros(encoded_size), scale=1),
reinterpreted_batch_ndims=1),
weight=train_size),
]
def call(self, inputs):
return functools.reduce(lambda x, f: f(x), self._layers, inputs)
class Decoder(tfk.Model):
"""Decoder."""
def __init__(self, output_shape):
super(Decoder, self).__init__()
self._layers = [
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.IndependentBernoulli.params_size(output_shape)),
tfpl.IndependentBernoulli(output_shape, tfd.Bernoulli.logits),
]
def call(self, inputs):
return functools.reduce(lambda x, f: f(x), self._layers, inputs)
encoder = Encoder(self.input_shape, self.encoded_size, self.train_size)
decoder = Decoder(self.input_shape)
images = tfkl.Input(shape=self.input_shape)
encoded = encoder(images)
decoded = decoder(encoded)
vae_model = tfk.Model(inputs=images, outputs=decoded)
vae_model.compile(optimizer=tf.train.AdamOptimizer(),
loss=lambda x, rv_x: -rv_x.log_prob(x),
metrics=[])
vae_model.fit(self.x, self.x,
batch_size=25,
epochs=1,
validation_data=(self.x_test, self.x_test))
yhat = vae_model(tf.convert_to_tensor(self.x_test))
self.assertIsInstance(yhat, tfd.Independent)
self.assertIsInstance(yhat.distribution, tfd.Bernoulli)
def test_keras_sequential_api_multiple_draws(self):
num_draws = 2
encoder_model = tfk.Sequential([
tfkl.InputLayer(input_shape=self.input_shape),
tfkl.Flatten(),
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.MultivariateNormalTriL.params_size(self.encoded_size)),
tfpl.MultivariateNormalTriL(self.encoded_size,
lambda s: s.sample(num_draws, seed=42)),
tfpl.KLDivergenceAddLoss(
# TODO(b/119756336): Due to eager/graph Jacobian graph caching bug
# we add here the capability for deferred construction of the prior.
lambda: tfd.MultivariateNormalDiag(loc=tf.zeros(self.encoded_size)),
weight=self.train_size),
])
decoder_model = tfk.Sequential([
tfkl.InputLayer(input_shape=[self.encoded_size]),
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.IndependentBernoulli.params_size(
self.input_shape)),
tfkl.Lambda(_logit_avg_expit), # Same as averaging the Bernoullis.
tfpl.IndependentBernoulli(self.input_shape, tfd.Bernoulli.logits),
])
vae_model = tfk.Model(
inputs=encoder_model.inputs,
outputs=decoder_model(encoder_model.outputs[0]))
vae_model.compile(optimizer=tf.train.AdamOptimizer(),
loss=lambda x, rv_x: -rv_x.log_prob(x),
metrics=[])
vae_model.fit(self.x, self.x,
batch_size=25,
epochs=1,
steps_per_epoch=1, # Usually `n // batch_size`.
validation_data=(self.x_test, self.x_test))
yhat = vae_model(tf.convert_to_tensor(self.x_test))
self.assertIsInstance(yhat, tfd.Independent)
self.assertIsInstance(yhat.distribution, tfd.Bernoulli)
@tfe.run_all_tests_in_graph_and_eager_modes
class KLDivergenceAddLoss(tf.test.TestCase):
def test_approx_kl(self):
# TODO(b/120320323): Enable this test in eager.
if tf.executing_eagerly(): return
event_size = 2
prior = tfd.MultivariateNormalDiag(loc=tf.zeros(event_size))
model = tfk.Sequential([
tfpl.MultivariateNormalTriL(event_size,
lambda s: s.sample(int(1e3), seed=42)),
tfpl.KLDivergenceAddLoss(prior, test_points_reduce_axis=0),
])
loc = [-1., 1.]
scale_tril = [[1.1, 0.],
[0.2, 1.3]]
actual_kl = tfd.kl_divergence(
tfd.MultivariateNormalTriL(loc, scale_tril), prior)
x = tf.concat(
[loc, tfb.ScaleTriL().inverse(scale_tril)], axis=0)[tf.newaxis]
y = model(x)
self.assertEqual(1, len(model.losses))
y = model(x)
self.assertEqual(2, len(model.losses))
[loc_, scale_tril_, actual_kl_, approx_kl_] = self.evaluate([
y.loc, y.scale.to_dense(), actual_kl, model.losses[0]])
self.assertAllClose([loc], loc_, atol=0., rtol=1e-5)
self.assertAllClose([scale_tril], scale_tril_, atol=0., rtol=1e-5)
self.assertNear(actual_kl_, approx_kl_, err=0.15)
model.compile(optimizer=tf.train.AdamOptimizer(),
loss=lambda x, dist: -dist.log_prob(x[0, :event_size]),
metrics=[])
model.fit(x, x,
batch_size=25,
epochs=1,
steps_per_epoch=1) # Usually `n // batch_size`.
@tfe.run_all_tests_in_graph_and_eager_modes
class MultivariateNormalTriLTest(tf.test.TestCase):
def _check_distribution(self, t, x):
self.assertIsInstance(x, tfd.MultivariateNormalTriL)
t_back = tf.concat([
x.loc, tfb.ScaleTriL().inverse(x.scale.to_dense())], axis=-1)
self.assertAllClose(*self.evaluate([t, t_back]), atol=1e-6, rtol=1e-5)
def test_new(self):
d = 4
p = tfpl.MultivariateNormalTriL.params_size(d)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = tfpl.MultivariateNormalTriL.new(t, d, validate_args=True)
self._check_distribution(t, x)
def test_layer(self):
d = 4
p = tfpl.MultivariateNormalTriL.params_size(d)
layer = tfpl.MultivariateNormalTriL(d, tfd.Distribution.mean)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = layer(t)
self._check_distribution(t, x)
def test_doc_string(self):
# Load data.
n = int(1e3)
scale_tril = np.array([[1.6180, 0.],
[-2.7183, 3.1416]]).astype(np.float32)
scale_noise = 0.01
x = tfd.Normal(loc=0, scale=1).sample([n, 2])
eps = tfd.Normal(loc=0, scale=scale_noise).sample([n, 2])
y = tf.matmul(x, scale_tril) + eps
# To save testing time, let's encode the answer (i.e., _cheat_). Note: in
# writing this test we verified the correct answer is achieved with random
# initialization.
true_kernel = np.pad(scale_tril, [[0, 0], [0, 3]], 'constant')
true_bias = np.array([0, 0, np.log(scale_noise), 0, np.log(scale_noise)])
# Create model.
d = tf.dimension_value(y.shape[-1])
model = tf.keras.Sequential([
tf.keras.layers.Dense(
tfpl.MultivariateNormalTriL.params_size(d),
kernel_initializer=lambda s, **_: true_kernel,
bias_initializer=lambda s, **_: true_bias),
tfpl.MultivariateNormalTriL(d),
])
# Fit.
model.compile(optimizer=tf.train.AdamOptimizer(),
loss=lambda y, model: -model.log_prob(y),
metrics=[])
batch_size = 100
model.fit(x, y,
batch_size=batch_size,
epochs=1, # One ping only.
steps_per_epoch=n // batch_size)
self.assertAllClose(true_kernel, model.get_weights()[0],
atol=1e-2, rtol=1e-3)
self.assertAllClose(true_bias, model.get_weights()[1],
atol=1e-2, rtol=1e-3)
@tfe.run_all_tests_in_graph_and_eager_modes
class OneHotCategoricalTest(tf.test.TestCase):
def _check_distribution(self, t, x):
self.assertIsInstance(x, tfd.OneHotCategorical)
[t_, x_logits_, x_probs_, mean_] = self.evaluate([
t, x.logits, x.probs, x.mean()])
self.assertAllClose(t_, x_logits_, atol=1e-6, rtol=1e-5)
self.assertAllClose(x_probs_, mean_, atol=1e-6, rtol=1e-5)
def test_new(self):
d = 4
p = tfpl.OneHotCategorical.params_size(d)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = tfpl.OneHotCategorical.new(t, d, validate_args=True)
self._check_distribution(t, x)
def test_layer(self):
d = 4
p = tfpl.OneHotCategorical.params_size(d)
layer = tfpl.OneHotCategorical(d, validate_args=True)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = layer(t)
self._check_distribution(t, x)
def test_doc_string(self):
# Load data.
n = int(1e4)
scale_noise = 0.01
x = tfd.Normal(loc=0, scale=1).sample([n, 2])
eps = tfd.Normal(loc=0, scale=scale_noise).sample([n, 1])
y = tfd.OneHotCategorical(
logits=_vec_pad(
0.3142 + 1.6180 * x[..., :1] - 2.7183 * x[..., 1:] + eps),
dtype=tf.float32).sample()
# Create model.
d = tf.dimension_value(y.shape[-1])
model = tf.keras.Sequential([
tf.keras.layers.Dense(tfpl.OneHotCategorical.params_size(d) - 1),
tf.keras.layers.Lambda(_vec_pad),
tfpl.OneHotCategorical(d),
])
# Fit.
model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.5),
loss=lambda y, model: -model.log_prob(y),
metrics=[])
batch_size = 100
model.fit(x, y,
batch_size=batch_size,
epochs=1,
steps_per_epoch=n // batch_size,
shuffle=True)
self.assertAllClose([[1.6180], [-2.7183]], model.get_weights()[0],
atol=0, rtol=0.1)
@tfe.run_all_tests_in_graph_and_eager_modes
class CategoricalMixtureOfOneHotCategoricalTest(tf.test.TestCase):
def _check_distribution(self, t, x):
self.assertIsInstance(x, tfd.MixtureSameFamily)
self.assertIsInstance(x.mixture_distribution, tfd.Categorical)
self.assertIsInstance(x.components_distribution, tfd.OneHotCategorical)
t_back = tf.concat([
x.mixture_distribution.logits,
tf.reshape(x.components_distribution.logits, shape=[2, 3, -1]),
], axis=-1)
[
t_, t_back_,
x_mean_, x_log_mean_,
sample_mean_,
] = self.evaluate([
t, t_back,
x.mean(), x.log_mean(),
tf.reduce_mean(x.sample(int(10e3), seed=42), axis=0),
])
self.assertAllClose(t_, t_back_, atol=1e-6, rtol=1e-5)
self.assertAllClose(x_mean_, np.exp(x_log_mean_), atol=1e-6, rtol=1e-5)
self.assertAllClose(sample_mean_, x_mean_, atol=1e-6, rtol=0.1)
def test_new(self):
k = 2 # num components
d = 4 # event size
p = tfpl.CategoricalMixtureOfOneHotCategorical.params_size(d, k)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = tfpl.CategoricalMixtureOfOneHotCategorical.new(
t, d, k, validate_args=True)
self._check_distribution(t, x)
def test_layer(self):
k = 2 # num components
d = 4 # event size
p = tfpl.CategoricalMixtureOfOneHotCategorical.params_size(d, k)
layer = tfpl.CategoricalMixtureOfOneHotCategorical(
d, k, validate_args=True)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = layer(t)
self._check_distribution(t, x)
def test_doc_string(self):
# Load data.
n = int(1e3)
scale_noise = 0.01
x = tfd.Normal(loc=0, scale=1).sample([n, 2])
eps = tfd.Normal(loc=0, scale=scale_noise).sample([n, 1])
y = tfd.OneHotCategorical(
logits=_vec_pad(
0.3142 + 1.6180 * x[..., :1] - 2.7183 * x[..., 1:] + eps),
dtype=tf.float32).sample()
# Create model.
d = tf.dimension_value(y.shape[-1])
k = 2
p = tfpl.CategoricalMixtureOfOneHotCategorical.params_size(d, k)
model = tf.keras.Sequential([
tf.keras.layers.Dense(p),
tfpl.CategoricalMixtureOfOneHotCategorical(d, k),
])
# Fit.
model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.5),
loss=lambda y, model: -model.log_prob(y),
metrics=[])
batch_size = 100
model.fit(x, y,
batch_size=batch_size,
epochs=1,
steps_per_epoch=n // batch_size,
shuffle=True)
yhat = model(x)
self.assertIsInstance(yhat, tfd.MixtureSameFamily)
self.assertIsInstance(yhat.mixture_distribution, tfd.Categorical)
self.assertIsInstance(yhat.components_distribution, tfd.OneHotCategorical)
# TODO(b/120221303): For now we just check that the code executes and we get
# back a distribution instance. Better would be to change the data
# generation so the model becomes well-specified (and we can check correctly
# fitted params). However, not doing this test is not critical since all
# components are unit-tested. (Ie, what we really want here--but don't
# strictly need--is another end-to-end test.)
@tfe.run_all_tests_in_graph_and_eager_modes
class IndependentBernoulliTest(tf.test.TestCase):
def _check_distribution(self, t, x):
self.assertIsInstance(x, tfd.Independent)
self.assertIsInstance(x.distribution, tfd.Bernoulli)
t_back = tf.reshape(x.distribution.logits, shape=[2, 3, -1])
[
t_, t_back_,
x_logits_, x_dist_logits_,
x_probs_, x_dist_probs_,
] = self.evaluate([
t, t_back,
x._logits, x.distribution.logits,
x._probs, x.distribution.probs,
])
self.assertAllClose(t_, t_back_, atol=1e-6, rtol=1e-5)
self.assertAllClose(x_logits_, x_dist_logits_, atol=1e-6, rtol=1e-5)
self.assertAllClose(x_probs_, x_dist_probs_, atol=1e-6, rtol=1e-5)
def test_new(self):
event_shape = [2, 3, 1]
p = tfpl.IndependentBernoulli.params_size(event_shape)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = tfpl.IndependentBernoulli.new(
t, event_shape, validate_args=True)
self._check_distribution(t, x)
def test_layer(self):
event_shape = [2, 3, 1]
p = tfpl.IndependentBernoulli.params_size(event_shape)
layer = tfpl.IndependentBernoulli(event_shape, validate_args=True)
t = tfd.Normal(0, 1).sample([2, 3, p], seed=42)
x = layer(t)
self._check_distribution(t, x)
def test_doc_string(self):
# Load data.
n = int(1e4)
scale_tril = np.array([[1.6180, 0.],
[-2.7183, 3.1416]]).astype(np.float32)
scale_noise = 0.01
x = tfd.Normal(loc=0, scale=1).sample([n, 2])
eps = tfd.Normal(loc=0, scale=scale_noise).sample([n, 2])
y = tfd.Bernoulli(logits=tf.reshape(
tf.matmul(x, scale_tril) + eps,
shape=[n, 1, 2, 1])).sample()
# Create model.
event_shape = y.shape[1:].as_list()
model = tf.keras.Sequential([
tf.keras.layers.Dense(
tfpl.IndependentBernoulli.params_size(event_shape)),
tfpl.IndependentBernoulli(event_shape),
])
# Fit.
model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.5),
loss=lambda y, model: -model.log_prob(y),
metrics=[])
batch_size = 100
model.fit(x, y,
batch_size=batch_size,
epochs=1,
steps_per_epoch=n // batch_size,
shuffle=True)
self.assertAllClose(scale_tril, model.get_weights()[0],
atol=0.05, rtol=0.05)
self.assertAllClose([0., 0.], model.get_weights()[1],
atol=0.05, rtol=0.05)
@tfe.run_all_tests_in_graph_and_eager_modes
class _IndependentNormalTest(object):
def _build_tensor(self, ndarray, dtype=None):
# Enforce parameterized dtype and static/dynamic testing.
ndarray = np.asarray(ndarray).astype(
dtype if dtype is not None else self.dtype)
return tf.placeholder_with_default(
input=ndarray, shape=ndarray.shape if self.use_static_shape else None)
def _check_distribution(self, t, x, batch_shape):
self.assertIsInstance(x, tfd.Independent)
self.assertIsInstance(x.distribution, tfd.Normal)
t_back = tf.concat([
tf.reshape(x.distribution.loc, tf.concat([batch_shape, [-1]], axis=-1)),
tfd.softplus_inverse(tf.reshape(
x.distribution.scale, tf.concat([batch_shape, [-1]], axis=-1)))
], -1)
[t_, t_back_] = self.evaluate([t, t_back])
self.assertAllClose(t_, t_back_, atol=1e-6, rtol=1e-5)
def test_new(self):
batch_shape = self._build_tensor([2], dtype=np.int32)
event_shape = self._build_tensor([2, 1, 2], dtype=np.int32)
low = self._build_tensor(-3.)
high = self._build_tensor(3.)
p = tfpl.IndependentNormal.params_size(event_shape)
t = tfd.Uniform(low, high).sample(tf.concat([batch_shape, [p]], 0), seed=42)
x = tfpl.IndependentNormal.new(
t, event_shape, validate_args=True)
self._check_distribution(t, x, batch_shape)
def test_layer(self):
batch_shape = self._build_tensor([7, 3], dtype=np.int32)
low = self._build_tensor(-3.)
high = self._build_tensor(3.)
p = tfpl.IndependentNormal.params_size()
layer = tfpl.IndependentNormal(validate_args=True)
t = tfd.Uniform(low, high).sample(tf.concat([batch_shape, [p]], 0), seed=42)
x = layer(t)
self._check_distribution(t, x, batch_shape)
def test_keras_sequential_with_unknown_input_size(self):
input_shape = [28, 28, 1]
encoded_shape = self._build_tensor([2], dtype=np.int32)
params_size = tfpl.IndependentNormal.params_size(encoded_shape)
def reshape(x):
return tf.reshape(x, tf.concat([tf.shape(x)[:-1], [-1, params_size]], 0))
# Test a Sequential model where the input to IndependentNormal does not have
# a statically-known shape.
encoder = tfk.Sequential([
tfkl.InputLayer(input_shape=input_shape, dtype=self.dtype),
tfkl.Flatten(),
tfkl.Dense(12, activation='relu'),
tfkl.Lambda(reshape),
# When encoded_shape/params_size are placeholders, the input to the
# IndependentNormal has shape (?, ?, ?) or (1, ?, ?), depending on
# whether or not encoded_shape's shape is known.
tfpl.IndependentNormal(encoded_shape),
tfkl.Lambda(lambda x: x + 0.) # To force conversion to tensor.
])
x = np.random.randn(*([1] + input_shape)).astype(self.dtype)
self.assertEqual((1, 3, 2), encoder.predict_on_batch(x).shape)
out = encoder(tf.convert_to_tensor(x))
if tf.executing_eagerly():
self.assertEqual((1, 3, 2), out.shape)
elif self.use_static_shape:
self.assertEqual([1, None, None], out.shape.as_list())
self.assertEqual((1, 3, 2), self.evaluate(out).shape)
@tfe.run_all_tests_in_graph_and_eager_modes
class IndependentNormalTestDynamicShape(tf.test.TestCase,
_IndependentNormalTest):
dtype = np.float32
use_static_shape = False
@tfe.run_all_tests_in_graph_and_eager_modes
class IndependentNormalTestStaticShape(tf.test.TestCase,
_IndependentNormalTest):
dtype = np.float64
use_static_shape = True
def test_doc_string(self):
input_shape = [28, 28, 1]
encoded_shape = 2
encoder = tfk.Sequential([
tfkl.InputLayer(input_shape=input_shape, dtype=self.dtype),
tfkl.Flatten(),
tfkl.Dense(10, activation='relu'),
tfkl.Dense(tfpl.IndependentNormal.params_size(encoded_shape)),
tfpl.IndependentNormal(encoded_shape),
tfkl.Lambda(lambda x: x + 0.) # To force conversion to tensor.
])
# Test that we can run the model and get a sample.
x = np.random.randn(*([1] + input_shape)).astype(self.dtype)
self.assertEqual((1, 2), encoder.predict_on_batch(x).shape)
out = encoder(tf.convert_to_tensor(x))
self.assertEqual((1, 2), out.shape)
self.assertEqual((1, 2), self.evaluate(out).shape)
@tfe.run_all_tests_in_graph_and_eager_modes
class _MixtureSameFamilyTest(object):
def _build_tensor(self, ndarray, dtype=None):
# Enforce parameterized dtype and static/dynamic testing.
ndarray = np.asarray(ndarray).astype(
dtype if dtype is not None else self.dtype)
return tf.placeholder_with_default(
input=ndarray, shape=ndarray.shape if self.use_static_shape else None)
def _check_distribution(self, t, x, batch_shape):
self.assertIsInstance(x, tfd.MixtureSameFamily)
self.assertIsInstance(x.mixture_distribution, tfd.Categorical)
self.assertIsInstance(x.components_distribution, tfd.MultivariateNormalTriL)
shape = tf.concat([batch_shape, [-1]], axis=0)
batch_and_n_shape = tf.concat(
[tf.shape(x.mixture_distribution.logits), [-1]], axis=0)
cd = x.components_distribution
t_back = tf.concat([
x.mixture_distribution.logits,
tf.reshape(tf.concat([
tf.reshape(cd.loc, batch_and_n_shape),
tf.reshape(tfb.ScaleTriL().inverse(cd.scale.to_dense()),
batch_and_n_shape),
], axis=-1), shape),
], axis=-1)
[t_, t_back_] = self.evaluate([t, t_back])
self.assertAllClose(t_, t_back_, atol=1e-6, rtol=1e-5)
def test_new(self):
n = self._build_tensor(4, dtype=np.int32)
batch_shape = self._build_tensor([4, 2], dtype=np.int32)
event_size = self._build_tensor(3, dtype=np.int32)
low = self._build_tensor(-3.)
high = self._build_tensor(3.)
cps = tfpl.MultivariateNormalTriL.params_size(event_size)
p = tfpl.MixtureSameFamily.params_size(n, cps)
t = tfd.Uniform(low, high).sample(tf.concat([batch_shape, [p]], 0), seed=42)
normal = tfpl.MultivariateNormalTriL(event_size, validate_args=True)
x = tfpl.MixtureSameFamily.new(t, n, normal, validate_args=True)
self._check_distribution(t, x, batch_shape)
def test_layer(self):
n = self._build_tensor(3, dtype=np.int32)
batch_shape = self._build_tensor([7, 3], dtype=np.int32)
event_size = self._build_tensor(4, dtype=np.int32)
low = self._build_tensor(-3.)
high = self._build_tensor(3.)
cps = tfpl.MultivariateNormalTriL.params_size(event_size)
p = tfpl.MixtureSameFamily.params_size(n, cps)
normal = tfpl.MultivariateNormalTriL(event_size, validate_args=True)
layer = tfpl.MixtureSameFamily(n, normal, validate_args=True)
t = tfd.Uniform(low, high).sample(tf.concat([batch_shape, [p]], 0), seed=42)
x = layer(t)
self._check_distribution(t, x, batch_shape)
def test_doc_string(self):
# Load data (graph of a cardioid).
n = 2000
t = tfd.Uniform(low=-np.pi, high=np.pi).sample([n, 1])
r = 2 * (1 - tf.cos(t))
x = r * tf.sin(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])
y = r * tf.cos(t) + tfd.Normal(loc=0., scale=0.1).sample([n, 1])
# Model the distribution of y given x with a Mixture Density Network.
event_shape = self._build_tensor([1], dtype=np.int32)
num_components = self._build_tensor(5, dtype=np.int32)
params_size = tfpl.MixtureSameFamily.params_size(
num_components, tfpl.IndependentNormal.params_size(event_shape))
model = tfk.Sequential([
tfkl.Dense(12, activation='relu'),
# NOTE: We must hard-code 15 below, instead of using `params_size`,
# because the first argument to `tfkl.Dense` must be an integer (and
# not, e.g., a placeholder tensor).
tfkl.Dense(15, activation=None),
tfpl.MixtureSameFamily(num_components,
tfpl.IndependentNormal(event_shape)),
])
# Fit.
batch_size = 100
model.compile(optimizer=tf.train.AdamOptimizer(learning_rate=0.02),
loss=lambda y, model: -model.log_prob(y))
model.fit(x, y,
batch_size=batch_size,
epochs=1,
steps_per_epoch=n // batch_size)
self.assertEqual(15, self.evaluate(tf.convert_to_tensor(params_size)))
@tfe.run_all_tests_in_graph_and_eager_modes
class MixtureSameFamilyTestDynamicShape(tf.test.TestCase,
_MixtureSameFamilyTest):
dtype = np.float32
use_static_shape = False
@tfe.run_all_tests_in_graph_and_eager_modes
class MixtureSameFamilyTestStaticShape(tf.test.TestCase,
_MixtureSameFamilyTest):
dtype = np.float32
use_static_shape = True
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
]
| |
4cb9f2aca7bafdf2b4cef1d0d23c4d476b321127 | 64c5d603fa97a22afc3cd010858b00bb26da795f | /breezy/settings.py | 2fbd840e22de3b5780bb7b4261c48a3bdd6f3388 | []
| no_license | welz-atm/MusicPlayerAppDjangoandBulma | 7360be06333ad37b8f94c397e06b698b24db8519 | 4194b12d865e123a8b2bcfd6ec7660d1a59b145e | refs/heads/main | 2023-01-23T06:58:15.811108 | 2020-12-10T17:27:54 | 2020-12-10T17:27:54 | 310,572,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,998 | py | """
Django settings for breezy project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
from dotenv import load_dotenv
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
env_path = Path(".") / ".env"
load_dotenv(dotenv_path=env_path)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# SECURITY WARNING: don't run with debug turned on in production!
SECRET_KEY = os.getenv('SECRET_KEY')
DEBUG = os.getenv('DEBUG')
ALLOWED_HOSTS = ['jazbreezy.herokuapp.com']
AUTH_USER_MODEL = 'authentication.CustomUser'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'authentication',
'music',
'celery',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'breezy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'template')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'breezy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
#DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
#}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('DB_NAME'),
'USER': os.getenv('DB_USER'),
'PASSWORD': os.getenv('DB_PASSWORD'),
'HOST': os.getenv('DB_HOST'),
'PORT': os.getenv('DB_PORT'),
}
}
prod_db = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(prod_db)
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
MEDIA_URL = 'media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage' | [
"[email protected]"
]
| |
66dbd62812d3b9270e01ed5891186f2b6506efa8 | 0f141dd46f9214dec5d3f68e1ea0bea117460300 | /scripts/extractReviews.py | 5545f5d825500e5bafd3f91e058a09eb297b4ca7 | []
| no_license | Libardo1/YelpAcademicDataAnalysis | af28a8aace39a7a802ac1276067775c201f7213d | a4084fba88907f0a6bdd847346a0ed9b20a1888a | refs/heads/master | 2021-01-19T07:21:08.697977 | 2014-02-03T13:47:06 | 2014-02-03T13:47:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | import pandas as pd
import nltk
def main():
global daysOfWeek
daysOfWeek = {0:'Sunday', 1: 'Monday', 2: 'Tuesday', 3: 'Wednesday', 4:'Thursday', 5:'Friday', 6:'Saturday'}
df = pd.read_csv('../data/processed/yelp_review.tsv',sep = '\t')
df.apply(exportReviewText, axis = 1)
def exportReviewText(row):
row = row.values
id_review = row[0]
weekday = daysOfWeek[row[6]]
rev = row[12]
path = '../data/reviews/' + weekday + '/' + id_review + '.txt'
with open(path,'w') as f:
f.write(str(rev))
f.close()
if __name__ == "__main__":
main() | [
"[email protected]"
]
| |
c889a7755276b858a3522aeab8b76a0881280e68 | 14c1d9e436e19893491e66c51e25834e40bed294 | /flask_app/legacy/__init__.py | 9528e0f01a19bc5a92f09fbc766451ff1db47534 | [
"MIT"
]
| permissive | crazynayan/za2019 | 90bfba8e05f2e747b4adcc96bcb31c47dd85d169 | 7f8e71b15a81dd1c3162a4fa42a38d0b7ff15f10 | refs/heads/master | 2022-12-13T12:05:20.071945 | 2021-08-14T08:49:42 | 2021-08-14T08:49:42 | 224,970,397 | 0 | 0 | MIT | 2022-12-08T10:54:53 | 2019-11-30T06:32:49 | Python | UTF-8 | Python | false | false | 101 | py | from flask import Blueprint
bp = Blueprint("legacy", __name__)
from flask_app.legacy import routes
| [
"[email protected]"
]
| |
ae975c775649babd03b37a818cb64b731b2f5297 | 151a1ffebbc162c16a14dade1db27d5815caca6a | /todolist/tests.py | a3c1c4134fe588712f95fb837fa63585f39d0b72 | []
| no_license | JordanField/Bizzorg-Server | 1958c01f715ef8811d27906191c7eb1e89e049d1 | 6396256f8e1d6203cb3b353492e3730af5e95474 | refs/heads/master | 2020-04-21T21:22:48.281391 | 2019-02-09T14:53:21 | 2019-02-09T14:53:21 | 169,877,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,098 | py | from django.test import TestCase
from .models import ToDoList, ToDoListItem
from groups.models import EmployeeGroup, GroupMembership
from django.contrib.auth.models import User
from django.core.exceptions import FieldError
class ToDoListTestCase(TestCase):
def test_that_delegated_employee_must_be_member_of_to_do_list_group(self):
testGroup = EmployeeGroup(name='test_group')
testGroup.save()
testUserInGroup = User(username='test_user_in_group')
testUserInGroup.save()
testMembership = GroupMembership(group=testGroup, user=testUserInGroup, admin_privileges=False)
testMembership.save()
testUserNotInGroup = User(username='test_user_not_in_group')
testUserNotInGroup.save()
testToDoList = testGroup.to_do_list
testToDoListItem = ToDoListItem(to_do_list=testToDoList, title='test_item')
testToDoListItem.save()
testToDoListItem.employees.add(testUserInGroup)
with self.assertRaises(FieldError):
testToDoListItem.employees.add(testUserNotInGroup) | [
"[email protected]"
]
| |
062a7d5952bd18457d17b100b6ca360af17c1b47 | 6336c7aaafa5bcb58477fb51231f4348d32f78f6 | /drawing_and_style_transfer/util/visualizer.py | cc4b9badf7749f2baee317f01669149938f5180c | [
"MIT",
"BSD-2-Clause"
]
| permissive | guy-oren/OneShotTranslationExt | c1475ab06c3c685f1f5987c4fe3e7da41f62ddf9 | b4a8c54105f7f41e71f86a6aa285ec702fee09eb | refs/heads/master | 2020-04-02T16:35:28.071314 | 2018-11-05T06:59:32 | 2018-11-05T06:59:32 | 154,619,413 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,788 | py | import numpy as np
import os
import ntpath
import time
from . import util
from . import html
from scipy.misc import imresize
class Visualizer():
def __init__(self, opt):
# self.opt = opt
self.display_id = opt.display_id
self.use_html = opt.isTrain and not opt.no_html
self.win_size = opt.display_winsize
self.name = opt.name
self.opt = opt
self.saved = False
if self.display_id > 0:
import visdom
self.vis = visdom.Visdom(server=opt.display_server, port=opt.display_port)
if self.use_html:
self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
self.img_dir = os.path.join(self.web_dir, 'images')
print('create web directory %s...' % self.web_dir)
util.mkdirs([self.web_dir, self.img_dir])
self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
with open(self.log_name, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
def reset(self):
self.saved = False
# |visuals|: dictionary of images to display or save
def display_current_results(self, visuals, epoch, save_result):
if self.display_id > 0: # show images in the browser
ncols = self.opt.display_single_pane_ncols
if ncols > 0:
h, w = next(iter(visuals.values())).shape[:2]
table_css = """<style>
table {border-collapse: separate; border-spacing:4px; white-space:nowrap; text-align:center}
table td {width: %dpx; height: %dpx; padding: 4px; outline: 4px solid black}
</style>""" % (w, h)
title = self.name
label_html = ''
label_html_row = ''
nrows = int(np.ceil(len(visuals.items()) / ncols))
images = []
idx = 0
for label, image_numpy in visuals.items():
label_html_row += '<td>%s</td>' % label
images.append(image_numpy.transpose([2, 0, 1]))
idx += 1
if idx % ncols == 0:
label_html += '<tr>%s</tr>' % label_html_row
label_html_row = ''
white_image = np.ones_like(image_numpy.transpose([2, 0, 1])) * 255
while idx % ncols != 0:
images.append(white_image)
label_html_row += '<td></td>'
idx += 1
if label_html_row != '':
label_html += '<tr>%s</tr>' % label_html_row
# pane col = image row
self.vis.images(images, nrow=ncols, win=self.display_id + 1,
padding=2, opts=dict(title=title + ' images'))
label_html = '<table>%s</table>' % label_html
self.vis.text(table_css + label_html, win=self.display_id + 2,
opts=dict(title=title + ' labels'))
else:
idx = 1
for label, image_numpy in visuals.items():
self.vis.image(image_numpy.transpose([2, 0, 1]), opts=dict(title=label),
win=self.display_id + idx)
idx += 1
if self.use_html and (save_result or not self.saved): # save images to a html file
self.saved = True
for label, image_numpy in visuals.items():
img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))
util.save_image(image_numpy, img_path)
# update website
webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, reflesh=1)
for n in range(epoch, 0, -1):
webpage.add_header('epoch [%d]' % n)
ims = []
txts = []
links = []
for label, image_numpy in visuals.items():
img_path = 'epoch%.3d_%s.png' % (n, label)
ims.append(img_path)
txts.append(label)
links.append(img_path)
webpage.add_images(ims, txts, links, width=self.win_size)
webpage.save()
# errors: dictionary of error labels and values
def plot_current_errors(self, epoch, counter_ratio, opt, errors):
if not hasattr(self, 'plot_data'):
self.plot_data = {'X': [], 'Y': [], 'legend': list(errors.keys())}
self.plot_data['X'].append(epoch + counter_ratio)
self.plot_data['Y'].append([errors[k] for k in self.plot_data['legend']])
self.vis.line(
X=np.stack([np.array(self.plot_data['X'])] * len(self.plot_data['legend']), 1),
Y=np.array(self.plot_data['Y']),
opts={
'title': self.name + ' loss over time',
'legend': self.plot_data['legend'],
'xlabel': 'epoch',
'ylabel': 'loss'},
win=self.display_id)
# errors: same format as |errors| of plotCurrentErrors
def print_current_errors(self, epoch, i, errors, t, t_data):
message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, i, t, t_data)
for k, v in errors.items():
message += '%s: %.3f ' % (k, v)
print(message)
with open(self.log_name, "a") as log_file:
log_file.write('%s\n' % message)
# save image to the disk
def save_images(self, webpage, visuals, image_path, aspect_ratio=1.0, index=None, split=1):
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
if index is not None:
name_splits = name.split("_")
if split == 0:
name = str(index)
else:
name = str(index) + "_" + name_splits[split]
webpage.add_header(name)
ims = []
txts = []
links = []
for label, im in visuals.items():
image_name = '%s_%s.png' % (name, label)
save_path = os.path.join(image_dir, image_name)
h, w, _ = im.shape
if aspect_ratio > 1.0:
im = imresize(im, (h, int(w * aspect_ratio)), interp='bicubic')
if aspect_ratio < 1.0:
im = imresize(im, (int(h / aspect_ratio), w), interp='bicubic')
util.save_image(im, save_path)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=self.win_size)
| [
"[email protected]"
]
| |
2614d9c46b453939b0a72f979dfa8b427fa9e68c | 15d29433c552e68215fc8a54e8f178f5c01dea67 | /orders/migrations/0004_foundry.py | 02b709cb14ce058a8487f7f81a64e7b25a79258c | []
| no_license | Jack-Xu-1996/Shapes-May-2020 | 3dbbd7d55c074505707b4b00a3259ad35e0bc3bc | 4870f6ba233c9c2f838f2ef8bc94768c664b3e26 | refs/heads/master | 2022-12-21T21:14:31.993703 | 2020-09-24T16:10:19 | 2020-09-24T16:10:19 | 266,877,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,610 | py | # Generated by Django 2.0 on 2020-04-21 19:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0003_auto_20200408_1546'),
]
operations = [
migrations.CreateModel(
name='Foundry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(blank=True, null=True)),
('furnace_number', models.IntegerField(blank=True, null=True)),
('heat_number', models.IntegerField(blank=True, null=True)),
('length', models.IntegerField(blank=True, null=True)),
('diameter', models.IntegerField(blank=True, null=True)),
('alloy', models.IntegerField(blank=True, null=True)),
('cast_qty', models.IntegerField(blank=True, null=True)),
('total_weight', models.FloatField(blank=True, default=0, null=True)),
('degass', models.IntegerField(blank=True, null=True)),
('cast_shift', models.CharField(max_length=50, null=True)),
('cast_speed', models.IntegerField(blank=True, null=True)),
('Mg', models.FloatField(blank=True, default=0, null=True)),
('Si', models.FloatField(blank=True, default=0, null=True)),
('Fe', models.FloatField(blank=True, default=0, null=True)),
('Cu', models.FloatField(blank=True, default=0, null=True)),
('Cr', models.FloatField(blank=True, default=0, null=True)),
('Mn', models.FloatField(blank=True, default=0, null=True)),
('Zn', models.FloatField(blank=True, default=0, null=True)),
('Ti', models.FloatField(blank=True, default=0, null=True)),
('Bo', models.FloatField(blank=True, default=0, null=True)),
('Mg_HL', models.CharField(max_length=50, null=True)),
('Si_HL', models.CharField(max_length=50, null=True)),
('Fe_HL', models.CharField(max_length=50, null=True)),
('Cu_HL', models.CharField(max_length=50, null=True)),
('Cr_HL', models.CharField(max_length=50, null=True)),
('Mn_HL', models.CharField(max_length=50, null=True)),
('Zn_HL', models.CharField(max_length=50, null=True)),
('Ti_HL', models.CharField(max_length=50, null=True)),
('Bo_HL', models.CharField(max_length=50, null=True)),
],
),
]
| [
"[email protected]"
]
| |
46d861589dadfbcbd06cff4a5f03b32e6884ad33 | 024a2fc57bb49fcc4defecdb7a0fa61933e14683 | /SecureFileSharing/settings.py | cc17cc1d80e6aac6391bea49047bf4353275e340 | []
| no_license | Sanju-S/SecureFileSharing | 3cd68568dfe7b54ed47a656c4e2f76f6be7290a5 | 592a2be4cdea844d1666fb8d6fb250166a2cc85a | refs/heads/master | 2022-12-10T08:23:19.839709 | 2020-09-13T23:31:01 | 2020-09-13T23:31:01 | 174,567,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,429 | py | """
Django settings for SecureFileSharing project.
Generated by 'django-admin startproject' using Django 2.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$1$Dj4oY3Qi$Jw3DyuZFUo2OpKD6FUl/w.'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'account',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
#'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
#'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'SecureFileSharing.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'account.context_processors.categories_processor',
],
},
},
]
WSGI_APPLICATION = 'SecureFileSharing.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
| [
"[email protected]"
]
| |
2cf39451572b8619fac70c570ebd20d92c803b10 | f5502b44cca7580201ae5b8119866a9fcdf2ffd8 | /Backend/myproject/myapp/admin.py | f8f3792a5b9491feb7038b82f281f150f7317aa0 | []
| no_license | aboli-naik/Assignment3 | 0bdf37ea337f43c9b3639b9951ce000b8ad0f009 | a5af11ae3e59bcc24a0001b28e76d11a66f85c73 | refs/heads/master | 2023-03-11T07:08:18.663440 | 2021-03-02T12:23:13 | 2021-03-02T12:23:13 | 343,751,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from django.contrib import admin
from .models import students,professor, courses,registration,evaluation
admin.site.register(students)
admin.site.register(professor)
admin.site.register(courses)
admin.site.register(registration)
admin.site.register(evaluation) | [
"[email protected]"
]
| |
590b6f5403f5043e59a7aec47dcce8c815e380ab | 4f327c57822ca3f43ee001e83040799f4721b97d | /coinchange.py | 970858f4ba3972e72c404d15ec69cac773ab7594 | []
| no_license | abhi8292/recursionAndDynamicProgramming | 3a4809d2ee10dd50b1a94dd7833201d149bee786 | 53234dd0c3ef17696ffe4909b0d7afed5cb476a1 | refs/heads/master | 2023-06-09T23:39:07.779056 | 2021-07-04T17:15:09 | 2021-07-04T17:15:09 | 330,320,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | class Solution:
def coinChange(self, coins: list[int], amount: int) -> int:
self.ans = []
self.minLen=amount+1
def option(coins, target, cur):
if target < 0:
return 0
if target == 0:
self.ans.append(cur.copy())
self.minLen = min(len(cur),self.minLen)
return
for i in coins:
cur.append(i)
option(coins,target-i,cur)
cur.pop()
option(coins,amount,[])
if self.minLen == amount+1:
return -1
else:
return self.minLen
print(Solution().coinChange([1,2,3,4],100)) | [
"[email protected]"
]
| |
f932c2b000f29193cb270c85efc77d991b8056b0 | 0f9f23561dca860ffc29b549d0a3c31ddcb0cd08 | /Coding Challenge/Day_1/Day1.py | 63d14b64922fae47d84707fc82175c0e190082ac | []
| no_license | jayshreevashistha/forskml | 17963047ed54bd18689e1571f9de25e2b3a0efc0 | bfc99f3bfd30c18d0c475f13fe1066c7d2d39991 | refs/heads/master | 2020-05-24T07:16:49.810102 | 2019-07-15T11:47:45 | 2019-07-15T11:47:45 | 187,156,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,140 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 7 11:52:56 2019
@author: computer
"""
Conversation opened. 1 read message.
Skip to content
Using Gmail with screen readers
1 of 14,013
[Forsk] FSSDP - Day 1
Inbox
x
Forsk Labs <[email protected]>
Attachments10:25 AM (1 hour ago)
to bcc: me
Dear Student,
Please find the attached file for your reference.
Forsk Team
3 Attachments
"""
Code Challenge
Name:
Gas Mileage Calculator
Filename:
mileage_cal.py
Problem Statement:
Assume my car travels 100 Kilometres after putting 5 litres of fuel.
Calculate the average of my car.
Hint:
Divide kilmeters by the litres used to get the average
"""
"""
Code Challenge
Name:
Ride Cost Calculator
Filename:
ridecost_cal.py
Problem Statement:
Assume you travel 80 km to and fro in a day.
Cost of Diesel per litre is 80 INR
Your vehicle Fuel Average is 18 km/litre.
Now calculate the cost of driving per day to office.
Hint:
"""
"""
Code Challenge
Name:
Weighted Score Calculator
Filename:
score_cal.py
Problem Statement:
Lets assume there are 3 assignments and 2 exams, each with max score of 100.
Respective weights are 10%, 10%, 10%, 35%, 35% .
Compute the weighted score based on individual assignment scores.
Hint:
weighted score = ( A1 + A2 + A3 ) *0.1 + (E1 + E2 ) * 0.35
"""
"""
Code Challenge
Name:
Name Printing Checkerboard pattern
Filename:
checker.py
Problem Statement:
Print the checkerboard pattern using escape Codes and Unicode
with multiple print statements and the multiplication operator
Hint:
Eight characters sequence in a line and
the next line should start with a space
try to use the * operator for printing
Input:
No input required
Output:
* * * * * * * *
* * * * * * * *
* * * * * * * *
* * * * * * * *
* * * * * * * *
* * * * * * * *
* * * * * * * *
"""
"""
Code Challenge
Name:
Facorial
Filename:
factorial.py
Problem Statement:
Find the factorial of a number.
Hint:
Factorial of 3 = 3 * 2 * 1= 6
Try to first find the function from math module using dir and help
Input:
Take the number from the keyboard as input from the user.
"""
"""
Code Challenge
Name:
Styling of String
Filename:
style.py
Problem Statement:
Convert to uppercase character
Convert to lower character
Convert to CamelCase or TitleCase.
Hint:
Try to find some function in the str class and see its help on how to use it.
Using dir and help functions
Input:
Take the name as input from the keyboard. ( SyLvEsTeR )
"""
"""
Code Challenge
Name:
Replacing of Characters
Filename:
restart.py
Problem Statement:
In a hardcoded string RESTART, replace all the R with $ except the first occurrence and print it.
Input:
RESTART
Output:
RESTA$T
"""
"""
Code Challenge
Name:
String Handling
Filename:
string.py
Problem Statement:
Take first and last name in single command from the user and print
them in reverse order with a space between them,
find the index using find/index function and then print using slicing concept of the index
Input:
Sylvester Fernandes
Output:
Fernandes Sylvester
"""
"""
Code Challenge
Name:
Formatted String
Filename:
format2.py
Problem Statement:
Write a program to print the output in the given format.
Take input from the user.
Hint:
Try to serach for some function in the str class using help() and dir()
Input:
Welcome to Pink City Jaipur
Output:
Welcome*to*Pink*City*Jaipur
"""
"""
Code Challenge
Name:
Formatted String
Filename:
format3.py
Problem Statement:
Write a program to print the output in the given format.
Take input from the user.
Hint:
Try to serach for some function in the str class using help() and dir()
Input:
Welcome to Pink City Jaipur
Output:
W*e*l*c*o*m*e* *t*o* *P*i*n*k* *C*i*t*y* *J*a*i*p*u*r
"""
# Hands On 1
# Print all the numbers from 1 to 10 using condition in while loop
# Hands On 2
# Print all the numbers from 1 to 10 using while True loop
# Hands On 3
# Print all the even numbers from 1 to 10 using condition in while loop
# Hands On 4
# Print all the even numbers from 1 to 10 using while True loop
# Hands On 5
# Print all the odd numbers from 1 to 10 using condition in while loop
# Hands On 6
# Print all the odd numbers from 1 to 10 using while True loop
"""
Code Challenge
Name:
Fizz Buzz
Filename:
fizzbuzz.py
Problem Statement:
Write a Python program which iterates the integers from 1 to 100(included).
For multiples of three print "Fizz" instead of the number and for the multiples of five print "Buzz".
For numbers which are multiples of both three and five print "FizzBuzz".
User Input
Not required
Output:
1
2
Fizz
4
Buzz
"""
Day_01_Code_Challenge.py
Displaying Basic_python-slicing-example.png. | [
"[email protected]"
]
| |
614ffd8081156d09c9b26112c5fc19d3c6747938 | 4ecf6ab7d881bb65cdcdea2261ee0cab168a94b1 | /admin/products/producer.py | a42d3fb0f78ae7e39624104fdb0abb7954ba9f4a | []
| no_license | BryceHamilton/python-microservices | cd16d30bf9bb3908f6763519f8854f18f85ce1d7 | d3bd50b725fc5d8a6efab61b6fb5166b92b434d5 | refs/heads/main | 2023-02-11T06:03:29.326115 | 2021-01-11T08:27:32 | 2021-01-11T08:27:32 | 328,257,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | import pika, json
params = pika.URLParameters('amqps://qkyaxufh:[email protected]/qkyaxufh')
connection = pika.BlockingConnection(params)
channel = connection.channel()
def publish(method, body):
properties = pika.BasicProperties(method)
channel.basic_publish(exchange='', routing_key='main', body=json.dumps(body), properties=properties)
| [
"[email protected]"
]
| |
0a7ba0dabfda565747d5ea000baa65de80c76e9e | f00b8e2d1ed9dee22b50c8a678b081c45840522b | /simple_pipeline.py | ea79566501218f693d6b283fbc4aabf39b895174 | []
| no_license | valeedmalik/145_whats_cooking | c365e6044beccaa4bb80ea664000304683834490 | 2be0a18424ddeb1fdb4d5b8ba9900058bf057206 | refs/heads/master | 2021-01-11T05:10:36.232484 | 2015-11-24T21:24:22 | 2015-11-24T21:24:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,221 | py | __author__ = 'davidnola'
import sklearn.pipeline as skpipe
import sklearn.cross_validation
import sklearn.feature_extraction as skfe
import numpy as np
import json
# import my stuff from pipeline_helpers.py
from pipeline_helpers import Printer,DeSparsify,JSONtoString
# Load up data. We transform it inside the pipeline now, so no need to preprocess
with open('train.json') as f:
train = json.loads(f.read())
with open('test.json') as f:
test = json.loads(f.read())
train_labels = [x['cuisine'] for x in train]
silent = False # set to True to shut up the printers
pipe = skpipe.Pipeline([
('printer0', Printer(silent)), # This prints out what the data currently looks like. Pipelines work by sequentially transforming the data step by step - so this Printer() will help you to see how those transformations go
('stringify_json', JSONtoString()),
('printer1', Printer(silent)),
('encoder',skfe.text.TfidfVectorizer(strip_accents='unicode',stop_words='english')),
('printer2', Printer(silent)),
('desparsify', DeSparsify()), # not necessary to desparsify for SVC, but helps you see how pipelines work
('printer3', Printer(silent)), # Note that tfidf is sparse, so most values are zero
('clf', sklearn.svm.LinearSVC()),
])
# when .fit() is called on the pipeline, each step of the pipeline has .fit() and .transform() called. When .predict() is called on the pipeline, each step only has .transform() called, because each transformer has already been fittted during .fit()
print("Fitting pipeline:")
pipe.fit(train,train_labels)
input("Press enter to continue on to run predict...")
predictions = pipe.predict(test)
input("Press enter to continue on to check pipeline CV score...")
# Lets see how it does in cross validation:
print('pipe score:',np.mean(sklearn.cross_validation.cross_val_score(pipe, train, train_labels,cv=2,n_jobs=1))) #CV=2, so you will see each pipeline run 2 times
# now write our predicitons to file:
ids = [x['id'] for x in test]
final_str = "id,cuisine"
for idx,i in enumerate(ids):
final_str+="\n"
final_str+=str(i)
final_str+=","
final_str+=predictions[idx]
with open('pipe_output.csv','w') as f:
f.write(final_str) | [
"[email protected]"
]
| |
3570bfbd6bc739bcbc15159e8235cbe8019311c5 | 8c11962530bd5f5a8aaee806e481d7ca9c4127ba | /multithread invoke testcode.py | dc1648f120eaaf1c57665cef17b1e47b08629410 | []
| no_license | yewonbahn/chaincode-invoke-test-ex- | a2c82f60f10fd46cc6b996b22d3f4a388f063beb | 7bb856f563562d94d3bb8cb89a7d8ea417e345a4 | refs/heads/main | 2023-07-13T19:25:21.647595 | 2021-08-18T01:56:18 | 2021-08-18T01:56:18 | 397,440,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,755 | py | #!/usr/bin/env python
import time
import os
import random
from threading import Thread
from multiprocessing import Pool
from datetime import datetime
now = datetime.today()
start_time=time.time()
def f(name,start,end):
a="peer chaincode invoke -o localhost:7050 --ordererTLSHostnameOverride orderer.example.com --tls --cafile ${PWD}/organizations/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem -C mychannel -n basic --peerAddresses localhost:7051 --tlsRootCertFiles ${PWD}/organizations/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/ca.crt --peerAddresses localhost:9051 --tlsRootCertFiles ${PWD}/organizations/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls/ca.crt -c '{\"Args\":[\"CreateAsset\","
b=",\"extract image\","
c=",\"MFTECmd.exe\",\"C:programfiles\",\"File_Created\",\"Directory\",\"yewon\"]}'"
e=",\"extract image\""
d=random.randint(1, 1000000000)
for i in range(start,end):
d=random.randint(1, 5000000000)
answer=a+"\""+now.isoformat()+"\""+e+",\""+str(d)+"\""+c
print os.system(answer)
print(i)
if __name__ == '__main__':
th1 = Thread(target=f, args=(1, 0, 12500))
th2 = Thread(target=f, args=(2, 12500, 25000))
th3 = Thread(target=f, args=(3, 25000, 37500 ))
th4 = Thread(target=f, args=(4, 37500, 50000))
th5 = Thread(target=f, args=(5, 50000, 62500))
th6 = Thread(target=f, args=(6, 62500, 75000))
th7 = Thread(target=f, args=(5, 75000, 87500))
th8 = Thread(target=f, args=(6, 87500, 100000))
th1.start()
th2.start()
th3.start()
th4.start()
th5.start()
th6.start()
th7.start()
th8.start()
th1.join()
th2.join()
th3.join()
th4.join()
th5.join()
th6.join()
th7.join()
th8.join()
| [
"[email protected]"
]
| |
5855c6f2590a1965838f29018eab23c08aeb3916 | fb0ddefaa2fec8b80b5f0d5f27eb201703e8599c | /Days/day13/image.py | a6fb455e29ff47ed018e7e00393ecc8d805496a4 | []
| no_license | hamidihekmat/Programming-Practice | faa10e8277bc6da3f413b35565b66a8e489f3064 | 2119357634482ca3c73de28818b9a6b4fa34a6cf | refs/heads/master | 2020-07-13T12:00:10.223812 | 2019-10-04T18:37:08 | 2019-10-04T18:37:08 | 205,078,317 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | from threading import Thread
import requests
counter = 0
def image_downloader(image):
global counter
name = image.split('/')
image = requests.get(image).content
counter += 1
with open('file{}.jpg'.format(counter), 'wb') as image_file:
image_file.write(image)
print('image downloaded')
image_1 = 'https://resize.hswstatic.com/w_907/gif/tesla-cat.jpg'
image_2 = 'https://images.unsplash.com/reserve/NnDHkyxLTFe7d5UZv9Bk_louvre.jpg?ixlib=rb-1.2.1&auto=format&fit=crop&w=1340&q=80'
image_list = [image_1, image_2]
for img in image_list:
Thread(target=image_downloader, args=[img]).start()
| [
"[email protected]"
]
| |
fae7ce6103432e5ee3ffc1da5cead980cbd4d6e9 | 0071bdbc2817c6bfe89fb9aef5d4f4ddb582ae2e | /build/aeriaman/cmake/aeriaman-genmsg-context.py | 6d6109825f958fc82ca48ace37351f1a4fa08620 | []
| no_license | ibrncfe/Aerial_HumanLike_Manipulator_ROS | bcc2073afea4413174f67c1935ede6e6c7c8f346 | 9677becadb75b437d64f7858b1fd9a88c1ff29ff | refs/heads/master | 2020-05-20T00:06:59.406556 | 2019-05-10T09:26:46 | 2019-05-10T09:26:46 | 185,279,492 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 624 | py | # generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/ibrncfe/manipulator_ws/src/aeriaman/msg/Num.msg"
services_str = "/home/ibrncfe/manipulator_ws/src/aeriaman/srv/AddTwoInts.srv"
pkg_name = "aeriaman"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "aeriaman;/home/ibrncfe/manipulator_ws/src/aeriaman/msg;std_msgs;/opt/ros/melodic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python2"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/melodic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| [
"[email protected]"
]
| |
b974eaa97cef6e176f618c6acaa768aed3ec961e | f9dbe4b28fc7b9611fc2f48c8abc1f24859e55a7 | /mysite/gl_ObjLoader.py | 141f95486c836bc93c3d2438402a03a76f2f7b57 | []
| no_license | jhn5052/3dImoji_Django | 644e813e529a9155ce0ecb450287a548be2c8e92 | 1cc0c5c680f878eca3974e9b2aa940c5b33ebee9 | refs/heads/master | 2020-06-02T00:22:12.678456 | 2019-06-09T09:22:09 | 2019-06-09T09:22:09 | 190,976,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,663 | py | import numpy as np
class ObjLoader:
def __init__(self):
self.vert_coords = []
self.text_coords = []
self.norm_coords = []
self.vertex_index = []
self.texture_index = []
self.normal_index = []
self.model = []
self.flag = -1
#Read Obj file and parsing
def parse_model(self, file):
for line in open(file, 'r',encoding='UTF8'):
if line.startswith('#'): continue
values = line.split()
if not values: continue
if values[0] == 'v':
values[1] = float(values[1])
values[2] = float(values[2])
values[3] = float(values[3])
self.vert_coords.append(values[1:4])
'''
if float(values[1]) < -0.119 and float(values[1]) >-0.126:
if float(values[2]) >0.18 and float(values[2]) < 0.21:
if float(values[3]) >= -0.071404 and float(values[3]) <= -0.064529:
print(values)
'''
if values[0] == 'vt':
self.text_coords.append(values[1:3])
if values[0] == 'vn':
self.norm_coords.append(values[1:4])
if values[0] == 'f':
face_i = []
text_i = []
norm_i = []
for v in values[1:4]:
w = v.split('/')
if '' in w:
w[1] = 1
face_i.append(int(w[0])-1)
text_i.append(int(w[1])-1)
norm_i.append(int(w[2])-1)
self.vertex_index.append(face_i)
self.texture_index.append(text_i)
self.normal_index.append(norm_i)
if len(self.text_coords) > 0 :
self.flag = 1
#ready to draw model
def load_model(self):
self.vertex_index = [y for x in self.vertex_index for y in x]
self.texture_index = [y for x in self.texture_index for y in x]
self.normal_index = [y for x in self.normal_index for y in x]
for i in self.vertex_index:
self.model.extend(self.vert_coords[i])
if self.flag == 1:
for i in self.texture_index:
self.model.extend(self.text_coords[i])
for i in self.normal_index:
self.model.extend(self.norm_coords[i])
self.model = np.array(self.model, dtype='float32')
| [
"[email protected]"
]
| |
e1445bde9f63484fff8644b27e3c5a9774a28588 | 360edfcbbc0a221e06b21ced12e818fe83a22122 | /adv-log-likelihoods/mle_geneconv_common.py | 961853199ab7732ae45b99966f21df30996f3c4d | []
| no_license | argriffing/ctmcaas | 068dcdfe69875fa26caf1e09e48ab026170dcf7a | 0d555447eada750056ed6c3eff72790f0e68637e | refs/heads/master | 2020-04-09T07:37:02.749389 | 2015-01-12T17:09:13 | 2015-01-12T17:09:13 | 24,764,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,052 | py | """
Common functions for gene conversion MLE.
"""
from __future__ import division, print_function
import functools
import json
import subprocess
import requests
import copy
import multiprocessing
from guppy import hpy
import numpy as np
from numpy.testing import assert_equal
import ll
import jsonctmctree.ll
__all__ = [
'eval_ll_cmdline',
'eval_ll_internets',
'eval_ll_module',
'eval_ll_v3module',
'eval_ll_v3module_multiprocessing',
'objective',
]
def eval_ll_v3module_multiprocessing(nworkers, j_data):
"""
Use multiple cores to process the json input.
When running OpenBLAS, use the OPENBLAS_MAIN_FREE=1
environment variable setting when using multiprocessing.
Otherwise OpenBLAS will reserve all of the cores for its
parallel linear algebra functions like matrix multiplication.
"""
# Copy the iid observations from the rest of the json input.
all_iid_observations = j_data['iid_observations']
all_site_weights = j_data['site_weights']
nsites = len(all_iid_observations)
assert_equal(len(all_iid_observations), len(all_site_weights))
# Define the per-worker observations and weights.
obs_per_worker = [[] for i in range(nworkers)]
site_weights_per_worker = [[] for i in range(nworkers)]
for i in range(nsites):
obs = all_iid_observations[i]
site_weight = all_site_weights[i]
worker = i % nworkers
obs_per_worker[worker].append(obs)
site_weights_per_worker[worker].append(site_weight)
# Define json data per worker.
# Use a shallow copy of the parsed json object,
# but overwrite the worker-specific observation and site weights.
json_data_per_worker = []
for i in range(nworkers):
worker_data = dict(j_data)
worker_data['iid_observations'] = obs_per_worker[i]
worker_data['site_weights'] = site_weights_per_worker[i]
json_data_per_worker.append(worker_data)
# FIXME just debugging...
#print('multiprocessing inputs:')
#for d in json_data_per_worker:
#print(d)
#print()
# Compute the log likelihood and some gradients,
# partitioning the independent sites among worker processes.
# These quantities are additive.
p = multiprocessing.Pool(nworkers)
f = jsonctmctree.ll.process_json_in
results = p.map(f, json_data_per_worker)
#print('multiprocessing results:')
#for r in results:
#print(r)
#print()
# Combine the results.
if any(r['status'] == 'error' for r in results):
status = 'error'
else:
status = 'success'
feasibility = all(r['feasibility'] for r in results)
#message = '\n'.join(r['message'].strip() for r in results)
log_likelihood = sum(r['log_likelihood'] for r in results)
d_per_partition = [r['edge_derivatives'] for r in results]
edge_derivatives = [sum(arr) for arr in zip(*d_per_partition)]
j_combined = dict(
status = status,
feasibility = feasibility,
#message = message,
log_likelihood = log_likelihood,
edge_derivatives = edge_derivatives)
return j_combined
def eval_ll_cmdline(j_data):
ll_input_string = json.dumps(j_data)
p = subprocess.Popen(
['python', 'll.py'],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
outdata, errdata = p.communicate(input=ll_input_string)
j_ll = json.loads(outdata)
return j_ll
def eval_ll_internets(url, j_data):
return requests.post(url, data=json.dumps(j_data)).json()
def eval_ll_module(j_data):
return ll.process_json_in(j_data)
def eval_ll_v3module(j_data):
return jsonctmctree.ll.process_json_in(j_data)
def _log_likelihood_and_edge_derivatives(
requested_derivatives,
abstract_model,
fn,
tree_row, tree_col, tree_process,
observable_nodes, observable_axes, iid_observations,
edges,
x):
"""
Evaluate the log likelihood and some of its derivatives.
The evaluated derivatives are the ones that correspond
to edge-specific scaling factor parameters.
"""
# Deduce some counts.
nsites = len(iid_observations)
# All sites are weighted equally.
site_weights = np.ones(nsites)
# Break the opaque parameters into two pieces.
# The first piece consists of parameters that affect the rate
# matrix in complicated ways, and for which we will use finite-differences
# to approximate sensitivities.
# The second piece consists of edge-specific rate scaling factor
# parameters whose sensitivities can be computed more efficiently
k = len(edges)
x_process, x_edge = x[:-k], x[-k:]
tree = dict(
row = tree_row,
col = tree_col,
rate = np.exp(x_edge).tolist(),
process = tree_process,
)
# create the processes
m0 = abstract_model.instantiate(x_process)
m1 = abstract_model.instantiate(x_process)
m0.set_tau(0)
# define the pair of processes
processes = []
for m in m0, m1:
row, col, rate = m.get_sparse_rates()
p = dict(row=row, col=col, rate=rate)
processes.append(p)
# define the prior distribution
prior_info = m0.get_distribution_info()
prior_feasible_states, prior_distribution = prior_info
# Build the nested structure to be converted to json.
data = dict(
site_weights = site_weights,
requested_derivatives = requested_derivatives,
node_count = len(edges) + 1,
process_count = len(processes),
state_space_shape = abstract_model.get_state_space_shape(),
tree = tree,
processes = processes,
prior_feasible_states = prior_feasible_states,
prior_distribution = prior_distribution.tolist(),
observable_nodes = observable_nodes,
observable_axes = observable_axes,
iid_observations = iid_observations,
)
j_ll = fn(data)
status = j_ll['status']
feasibility = j_ll['feasibility']
if status != 'success' or not feasibility:
print('results:')
print(j_ll)
print()
raise Exception('encountered some problem in the calculation of '
'log likelihood and its derivatives')
log_likelihood = j_ll['log_likelihood']
edge_derivatives = j_ll['edge_derivatives']
print('log likelihood:', log_likelihood)
print('edge derivatives:', edge_derivatives)
return log_likelihood, edge_derivatives
def objective_and_gradient(
abstract_model,
fn,
tree_row, tree_col, tree_process,
observable_nodes, observable_axes, iid_observations,
edges,
x):
"""
The x argument is the opaque 1d vector of parameters.
This requires an evaluator that knows about the derivative
of the log likelihood with respect to parameter values.
Hard-code the delta for non-edge-rate finite differences.
The intention is to use the default value used in L-BFGS-B
in scipy.optimize.minimize.
"""
delta = 1e-8
# Break the opaque parameters into two pieces.
# The first piece consists of parameters that affect the rate
# matrix in complicated ways, and for which we will use finite-differences
# to approximate sensitivities.
# The second piece consists of edge-specific rate scaling factor
# parameters whose sensitivities can be computed more efficiently
k = len(edges)
x_process, x_edge = x[:-k], x[-k:]
# For the first call, request derivatives for all edges.
requested_derivatives = list(range(k))
ll, edge_derivs = _log_likelihood_and_edge_derivatives(
requested_derivatives,
abstract_model,
fn,
tree_row, tree_col, tree_process,
observable_nodes, observable_axes, iid_observations,
edges,
x)
# Count the number of parameters that are not
# edge-specific rate scaling factors.
m = len(x) - k
# For subsequent calls, use finite differences to estimate
# derivatives with respect to these parameters.
other_derivs = []
requested_derivatives = []
for i in range(m):
x_plus_delta = np.array(x)
x_plus_delta[i] += delta
ll_delta, _ = _log_likelihood_and_edge_derivatives(
requested_derivatives,
abstract_model,
fn,
tree_row, tree_col, tree_process,
observable_nodes, observable_axes, iid_observations,
edges,
x_plus_delta)
d_estimate = (ll_delta - ll) / delta
other_derivs.append(d_estimate)
other_derivs = np.array(other_derivs)
print('other derivatives:', other_derivs)
#TODO this is for debugging
#raise Exception
# Return the function value and the gradient.
# Remember this is to be minimized so convert this to use signs correctly.
f = -ll
g = -np.concatenate((other_derivs, edge_derivs))
print('objective function:', f)
print('gradient:', g)
print()
return f, g
def objective_and_finite_differences(
abstract_model,
fn,
tree_row, tree_col, tree_process,
observable_nodes, observable_axes, iid_observations,
edges,
x):
"""
Use finite differences in the same way as the default L-BFGS-B.
This function uses finite differences for all parameters,
not just the ones that are not edge-specific rates.
"""
delta = 1e-8
requested_derivatives = []
curried_objective = functools.partial(
objective,
abstract_model,
fn,
tree_row, tree_col, tree_process,
observable_nodes, observable_axes, iid_observations,
edges)
f = curried_objective(x)
n = len(x)
diffs = []
for i in range(n):
u = np.array(x)
u[i] += delta
f_diff = curried_objective(u)
d = (f_diff - f) / delta
diffs.append(d)
g = np.array(diffs)
print('function value:', f)
print('finite differences:', g)
print()
#TODO this is for debugging
#raise Exception
return f, g
def objective(
abstract_model,
fn,
tree_row, tree_col, tree_process,
observable_nodes, observable_axes, iid_observations,
edges,
x):
"""
The x argument is the opaque 1d vector of parameters.
"""
# break the opaque parameters into two pieces
k = len(edges)
x_process, x_edge = x[:-k], x[-k:]
tree = dict(
row = tree_row,
col = tree_col,
rate = np.exp(x_edge).tolist(),
process = tree_process,
)
# create the processes
m0 = abstract_model.instantiate(x_process)
m1 = abstract_model.instantiate(x_process)
m0.set_tau(0)
# define the pair of processes
processes = []
for m in m0, m1:
row, col, rate = m.get_sparse_rates()
p = dict(row=row, col=col, rate=rate)
processes.append(p)
# define the prior distribution
prior_info = m0.get_distribution_info()
prior_feasible_states, prior_distribution = prior_info
# Build the nested structure to be converted to json.
data = dict(
node_count = len(edges) + 1,
process_count = len(processes),
state_space_shape = abstract_model.get_state_space_shape(),
tree = tree,
processes = processes,
prior_feasible_states = prior_feasible_states,
prior_distribution = prior_distribution.tolist(),
observable_nodes = observable_nodes,
observable_axes = observable_axes,
iid_observations = iid_observations,
)
j_ll = fn(data)
log_likelihood = sum(j_ll['log_likelihoods'])
y = -log_likelihood
print('value of objective:', y)
return y
| [
"[email protected]"
]
| |
fca480dadad19618acaf1c4253cfda6b34166472 | c41e62d099db6ac01032dfefa1a554b2335211b3 | /advent_of_cyber_16_where-is-santa.py | 123490323cfca27d7e42a3c2b38a4ff74cb1e0a9 | []
| no_license | philnipyo/various-scripts | bf6cb138666edea88cbff98b5b8bc398e315fbc8 | 9f8b171b80ef288fd71385a0d9d540ecca80a450 | refs/heads/master | 2023-02-12T12:58:45.467823 | 2021-01-17T05:02:53 | 2021-01-17T05:02:53 | 295,817,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | import requests
# Define the URL to send+retrieve requests
u = 'http://TRYHACKME_MACHINE_IP/api/'
for i in range(1, 100, 2):
# Append string version of i to the URL
URL = u + str(i)
r = requests.get(url = URL)
# Decode the JSON into string data
data = r.json()
# Prints out information in the following format: Number | Result
print('Item number {} yields: {}').format(data['item_id'], data['q'])
| [
"[email protected]"
]
| |
3d643a64973393ab701c545ab423cb75a3228c37 | f785e0ac303ef6c20748123b66c7b5cd179c03f6 | /FreeFallApp/migrations/0015_auto_20200118_1346.py | 1fd0dfc2e244e006cbf1aca025ad3ab7c1568c4e | []
| no_license | vasa911/snproject | 37e92a3defc19584cdc2306e7c08ddd21f970554 | d985a363e877954bca1507b6b2e7f7fd911f10b5 | refs/heads/master | 2020-08-30T02:27:28.852577 | 2020-08-18T16:09:32 | 2020-08-18T16:09:32 | 218,234,287 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | # Generated by Django 2.2.6 on 2020-01-18 11:46
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('FreeFallApp', '0014_auto_20200114_0936'),
]
operations = [
migrations.AddField(
model_name='landmark',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='landmarks/'),
),
migrations.AlterField(
model_name='hike',
name='creation_datetime',
field=models.DateTimeField(default=datetime.datetime(2020, 1, 18, 13, 46, 27, 951307)),
),
]
| [
"[email protected]"
]
| |
ee173a7e2b21e8aa89a60077bc05eee44a53ee25 | 6cd1133b3d86635c2864b4bda5537de7278ba727 | /chapter9/selectiveCopy.py | 63ac9321e4d65f7b5e805df94c11eedc4891a174 | []
| no_license | jpsalviano/ATBSWP_exercises | 6ff7409908452a4d6640741737b2e0f01e467c10 | e4e6513e6b6a82d59c1cff615bc31472ac5a9ff3 | refs/heads/master | 2021-07-02T13:44:03.588698 | 2020-10-21T15:45:46 | 2020-10-21T15:45:46 | 170,361,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,686 | py | #! /usr/bin/python3
# This code is my solution to Practice Project: Selective Copy in the book Automate the Boring Stuff with Python by Al Sweigart
# selectiveCopy.py - Searches all files for an extension in a folder tree and copies them into a new folder.
# Usage: selectiveCopy.py <extension> <source> <destination>:
# <extension> without . (dot)
# <source> is the folder whose tree will be searched and copied, if that's the case
# <destination> is the folder where files from <source> will be copied into
import sys, os, shutil
# get user input: extension (ext), folder tree (src), new folder (dest)
if len(sys.argv) != 4:
print("Usage: selectiveCopy.py <extension> <source> <destination>:\n<extension> without . (dot)\n<source> is the folder whose tree will be searched and copied, if that's the case\n<destination> is the folder where files from <source> will be copied into")
sys.exit()
else:
ext, src, dest = sys.argv[1], os.path.abspath(sys.argv[2]), os.path.abspath(sys.argv[3])
if not ( os.path.isdir(src) and os.path.isdir(dest) ): # check if folders exist
print('Both folders must exist!')
sys.exit()
# list all subfolders and files from <source>
# search all files for extension
# copy all matches into <destination>
for dir_path, subdir_list, file_list in os.walk(src):
if dir_path != dest: # avoids samefile error by not walking through <dest>
print('Searching {}...'.format(dir_path))
for filename in file_list:
if filename.endswith('.' + ext):
full_path = os.path.join(dir_path, filename)
shutil.copy(full_path, dest)
print('Done!')
| [
"[email protected]"
]
| |
11645dba3721936f5e4e5cf84bb238869849f386 | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/zulip/2018/8/setup_stripe.py | 7a07915d4153718a64bf53c4b1d55037db5cf5b3 | []
| no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 1,760 | py | from zerver.lib.management import ZulipBaseCommand
from zilencer.models import Plan, Coupon
from zproject.settings import get_secret
from typing import Any
import stripe
stripe.api_key = get_secret('stripe_secret_key')
class Command(ZulipBaseCommand):
help = """Script to add the appropriate products and plans to Stripe."""
def handle(self, *args: Any, **options: Any) -> None:
Plan.objects.all().delete()
# Zulip Cloud offerings
product = stripe.Product.create(
name="Zulip Cloud Premium",
type='service',
statement_descriptor="Zulip Cloud Premium",
unit_label="user")
plan = stripe.Plan.create(
currency='usd',
interval='month',
product=product.id,
amount=800,
billing_scheme='per_unit',
nickname=Plan.CLOUD_MONTHLY,
usage_type='licensed')
Plan.objects.create(nickname=Plan.CLOUD_MONTHLY, stripe_plan_id=plan.id)
plan = stripe.Plan.create(
currency='usd',
interval='year',
product=product.id,
amount=8000,
billing_scheme='per_unit',
nickname=Plan.CLOUD_ANNUAL,
usage_type='licensed')
Plan.objects.create(nickname=Plan.CLOUD_ANNUAL, stripe_plan_id=plan.id)
coupon = stripe.Coupon.create(
duration='forever',
name='25% discount',
percent_off=25)
Coupon.objects.create(percent_off=25, stripe_coupon_id=coupon.id)
coupon = stripe.Coupon.create(
duration='forever',
name='85% discount',
percent_off=85)
Coupon.objects.create(percent_off=85, stripe_coupon_id=coupon.id)
| [
"[email protected]"
]
| |
4e7554623a7c9fe5cc32ddac57daea973229cb01 | 068ac579a7d0f5c70ee5cf35975d96d8943ae010 | /api/admin.py | 3fb45ad64da3768e06e21c04f5ab7c4854bbfdc4 | []
| no_license | Mesh-project/Briefing_Server | dcc84b50d01a78daf0e279793ea8e780ddfd5e95 | e287b83be4ab886e43b437cf4cbf6a6332f9030d | refs/heads/master | 2023-08-22T12:59:33.447914 | 2021-10-03T06:45:01 | 2021-10-03T06:45:01 | 340,539,528 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | # from rest_framework.authtoken.admin import TokenAdmin
#
# TokenAdmin.raw_id_fields = ['model_user'] | [
"[email protected]"
]
| |
f92d0a9d2ccfc69b1c81ccb440faf4bd514e741d | 7a2758a072cc72e4373b30232703948ce8226597 | /coin.py | 77d8b64eb531859dd0171c1356f40c8bccb7de16 | []
| no_license | lHunniche/WorldsHardestGame | 1fccf51e02aee017625873c5a8818e0dd5de1a8b | 2ab8808a2d4034af889a88104a593bbd79d1331c | refs/heads/main | 2023-04-08T12:48:29.400314 | 2021-04-04T20:42:26 | 2021-04-04T20:42:26 | 351,906,931 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | import pygame
from game import BLACK
COIN_YELLOW = (255, 213, 0)
class Coin:
def __init__(self, x, y):
self.outer_radius = 13
self.inner_radius = 7
self.rect = pygame.Rect(0,0,self.outer_radius*2, self.outer_radius*2)
self.rect.centerx = x
self.rect.centery = y
self.inner_color = COIN_YELLOW
self.outer_color = BLACK
class CoinLevel:
def __init__(self):
self.coins = []
def add_coin(self, coin):
self.coins.append(coin)
def level_two():
level = CoinLevel()
c1 = Coin(700, 550)
level.add_coin(c1)
return level.coins | [
"[email protected]"
]
| |
f4f8273a566211572f2d95eaeb61f48094a29f41 | 7be63391f6ad1d084a4d1ff3ba511ddc84b10d74 | /atomic/pec.py | 66733f27bf608a0bb845a419318e87eaeb3e7e9d | [
"MIT"
]
| permissive | TBody/atomic1D | 918b9bdcad8fb9312b56e04cbead042b1593a0e9 | fcab88f3b303468f23ac75b847c76244593f4b7f | refs/heads/master | 2020-06-27T12:06:27.915332 | 2017-07-26T17:13:08 | 2017-07-26T17:13:08 | 97,054,421 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,020 | py | from __future__ import absolute_import
import glob
import numpy as np
from scipy.interpolate import RectBivariateSpline
import scipy.constants as constants
from .adf15 import Adf15
from .atomic_data import RateCoefficient
class Transition(object):
def __init__(self, type_, element, nuclear_charge, charge, wavelength,
temperature, density, pec):
self.element = element
self.nuclear_charge = nuclear_charge
self.charge = charge
self.wavelength = wavelength
self.type_ = type_
self.electron_density = density
self.electron_temperature = temperature
self.photon_emissivity = pec
def interpolate(self, temperature_grid, density_grid):
x = np.log10(self.electron_temperature)
y = np.log10(self.electron_density)
z = np.log10(self.photon_emissivity)
sp = RectBivariateSpline(x, y, z)
pec = sp(np.log10(temperature_grid), np.log10(density_grid))
pec = 10**pec
return self._on_new_grids(temperature_grid, density_grid, pec)
def _on_new_grids(self, new_temperature, new_density, new_pec):
return self.__class__(self.type_, self.element, self.nuclear_charge,
self.charge, self.wavelength, new_temperature, new_density,
new_pec)
@property
def energy(self):
h, c = constants.h, constants.c
lambda_ = self.wavelength
return h * c / lambda_
class TransitionPool(object):
def __init__(self, transitions=None):
if transitions == None: transitions = []
self.transitions = transitions
@classmethod
def from_adf15(cls, files):
obj = cls()
obj.append_files(files)
return obj
def create_atomic_data(self, ad):
keys = [('ex', 'line_power'), ('rec', 'continuum_power'),
('cx', 'cx_power')]
coeffs = {}
for from_, to_ in keys:
te = ad.coeffs[to_].temperature_grid
ne = ad.coeffs[to_].density_grid
fact = CoefficientFactory(ad, self.filter_type(from_))
c = fact.create(te, ne)
coeffs[to_] = c
filtered_ad = ad.copy()
filtered_ad.coeffs.update(coeffs)
return filtered_ad
def append_files(self, files):
for f in glob.glob(files):
self.append_file(f)
def append_file(self, filename):
f = Adf15(filename).read()
element = f['element']
nuclear_charge = f['nuclear_charge']
charge = f['charge']
datablocks = f['datablocks']
for d in datablocks:
wavelength = d['wavelength']
temperature = d['temperature']
density = d['density']
pec = d['pec']
type_ = d['type']
t = Transition(type_, element, nuclear_charge, charge, wavelength,
temperature, density, pec)
self.transitions.append(t)
def filter_type(self, *type_names):
names = self._interpret_type(*type_names)
new_transitions = filter(lambda t: t.type_ in names, self.transitions)
return self.__class__(new_transitions)
def filter_energy(self, lo, hi, unit='eV'):
lo_ = lo * constants.elementary_charge
hi_ = hi * constants.elementary_charge
in_roi = lambda t: (lo_ <= t.energy) and (t.energy < hi_)
new_transitions = filter(in_roi, self.transitions)
return self.__class__(new_transitions)
def _interpret_type(self, *type_names):
return map(self._figure_out_type, type_names)
def _figure_out_type(self, type_):
if type_ in ['excitation', 'excit', 'ex']:
name = 'excit'
elif type_ in ['recombination', 'recom', 'rec']:
name = 'recom'
elif type_ in ['charge_exchange', 'chexc', 'cx']:
name = 'chexc'
else:
raise ValueError('invalid type: %s.' % type_)
return name
def sum_transitions(self):
energies = self.energies
coeffs = self.coeffs
energies = energies[:, np.newaxis, np.newaxis]
power = energies * coeffs
power = power.sum(0)
assert np.all(np.isinf(power)) == False
assert power.all() > 0
return power
def interpolate(self, temperature_grid, density_grid):
new_transitions = [t.interpolate(temperature_grid, density_grid) for t
in self.transitions]
return self.__class__(new_transitions)
@property
def wavelengths(self):
return np.array([t.wavelength for t in self.transitions])
@property
def energies(self):
return np.array([t.energy for t in self.transitions])
@property
def coeffs(self):
return np.array([t.photon_emissivity for t in self.transitions])
def __iter__(self):
return self.transitions.__iter__()
@property
def size(self):
return len(self.transitions)
def P_bremsstrahlung(k, Te, ne):
"""
W m^3
"""
return 1.53e-38 * Te**0.5 * (k + 1)**2
from collections import defaultdict
class CoefficientFactory(object):
def __init__(self, atomic_data, transition_pool, clip_limit=1e-80):
self.atomic_data = atomic_data
self.element = atomic_data.element
self.nuclear_charge = atomic_data.nuclear_charge
self.transition_pool = transition_pool
self.ionisation_stages = {}
self.rate_coefficients = None
self.temperature_grid = None
self.density_grid = None
self.clip_limit = clip_limit
def create(self, temperature_grid, density_grid):
self.temperature_grid = temperature_grid
self.density_grid = density_grid
self._sort_by_ionisation_stages()
self._sum_transitions()
return self.rate_coefficients
def _sort_by_ionisation_stages(self):
d = defaultdict(TransitionPool)
for t in self.transition_pool:
if not self._conforming(t): continue
d[t.charge].transitions.append(t)
self.ionisation_stages.update(d)
def _sum_transitions(self):
coeffs = []
for i in range(self.nuclear_charge):
c = self.ionisation_stages.get(i, None)
if c is None:
pec = np.zeros(self.temperature_grid.shape +
self.density_grid.shape)
else:
c = c.interpolate(self.temperature_grid, self.density_grid)
pec = c.sum_transitions()
coeffs.append(pec)
coeffs = np.array(coeffs)
data = {}
coeffs = coeffs.clip(self.clip_limit)
log_temperature = np.log10(self.temperature_grid)
log_density = np.log10(self.density_grid)
log_coeff = np.log10(coeffs)
self.rate_coefficients = RateCoefficient(self.nuclear_charge,
self.element, log_temperature, log_density, log_coeff)
def _conforming(self, t):
return t.nuclear_charge == self.nuclear_charge
| [
"[email protected]"
]
| |
3370ae9c427b633082564939bdc494177a90f86f | 670b5e67f7d818bad82f060d7c25a782038d6deb | /user_modules/catalog_matching_scatter_plot.py | 7bd6e046fbe47d29832a6d82a6f0d98ae3a4e017 | []
| no_license | rongpu/Python | 087ed1784f7f1d584867638c6cb28e5a8871ae80 | e0ddf26e7490de77ed62606b30a83c312542e5f9 | refs/heads/master | 2023-08-23T19:13:42.229304 | 2023-08-11T17:06:16 | 2023-08-11T17:06:16 | 65,158,528 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,280 | py | from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
def scatter_plot(d_ra, d_dec, title='', x_label='$\\mathbf{RA_{cat2} - RA_{cat1}(arcsec)}$', y_label='$\\mathbf{dec_{cat2} - dec_{cat1}(arcsec)}$'):
'''
INPUTS:
d_ra, d_dec: array of RA and Dec difference in arcsec
OUTPUTS:
axScatter: scatter-histogram plot
'''
nullfmt = NullFormatter() # no labels
# definitions for the axes
left, width = 0.1, 0.85
bottom, height = 0.1, 0.85
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom, width, 0.3]
rect_histy = [left, bottom, 0.3, height]
# start with a rectangular Figure
plt.figure(figsize=(8,8))
axScatter = plt.axes(rect_scatter)
axHistx = plt.axes(rect_histx)
axHisty = plt.axes(rect_histy)
# # no labels
# axHistx.xaxis.set_major_formatter(nullfmt)
# axHisty.yaxis.set_major_formatter(nullfmt)
# the scatter plot
# mask = np.logical_and(np.abs(d_ra)<1.66, np.abs(d_dec)<1.)
# axScatter.plot(d_ra[mask], d_dec[mask], 'k.', markersize=1)
axScatter.plot(d_ra, d_dec, 'k.', markersize=1)
axHistx.hist(d_ra, bins=100, histtype='step', color='k', linewidth=2)
axHisty.hist(d_dec, bins=100, histtype='step', color='k', linewidth=2, orientation='horizontal')
axHistx.set_xlim(axScatter.get_xlim())
axHisty.set_ylim(axScatter.get_ylim())
axHistx.axis('off')
axHisty.axis('off')
axScatter.axhline(0, color='k', linestyle='--', linewidth=1.2)
axScatter.axvline(0, color='k', linestyle='--', linewidth=1.2)
axScatter.set_xlabel(('$\\mathbf{RA_{cat2} - RA_{cat1}(arcsec)}$'))
axScatter.set_ylabel(('$\\mathbf{dec_{cat2} - dec_{cat1}(arcsec)}$'))
return(axScatter)
# #--------------- ra dec histogram ---------------------
# plt.figure()
# plt.hist(d_ra,bins=50)
# plt.title('RA difference between cat2/3 and Terapix catalog')
# plt.xlabel('RA_cat2 - RA_cat1 (arcsec)')
# plt.grid()
# plt.figure()
# plt.hist(d_dec,50)
# plt.title('Dec difference between cat2/3 and Terapix catalog')
# plt.xlabel('Dec_cat2 - Dec_cat1 (arcsec)')
# plt.grid()
# plt.show()
| [
"[email protected]"
]
| |
a3d10f9625b8970533beb336305ff9a5b2b9f93b | 9a6b37861373187b26f95822ebc1a98b25b3de22 | /requirements/pypy2-v6.0.0-linux-armhf-raspbian/lib-python/2.7/ctypes/test/test_values.py | 1f56122caf24558a2f1254121ce878c1387651d3 | [
"LicenseRef-scancode-unicode",
"OpenSSL",
"GPL-1.0-or-later",
"Apache-2.0",
"MIT",
"LGPL-3.0-only",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | AhmedBHameed/TB_NFC_Cardreader | 0295c389e2a6cbd82ffcb497a636395518c9f5d8 | 9533212094ee2ba2403a37535550d9cf5c0184ea | refs/heads/master | 2022-11-03T13:48:38.146793 | 2019-05-18T17:01:55 | 2019-05-18T17:01:55 | 131,431,998 | 0 | 0 | MIT | 2022-10-18T18:26:03 | 2018-04-28T17:56:09 | Python | UTF-8 | Python | false | false | 2,988 | py | """
A testcase which accesses *values* in a dll.
"""
import unittest
import sys
from ctypes import *
from ctypes.test import xfail
import _ctypes_test
class ValuesTestCase(unittest.TestCase):
def test_an_integer(self):
ctdll = CDLL(_ctypes_test.__file__)
an_integer = c_int.in_dll(ctdll, "an_integer")
x = an_integer.value
self.assertEqual(x, ctdll.get_an_integer())
an_integer.value *= 2
self.assertEqual(x*2, ctdll.get_an_integer())
def test_undefined(self):
ctdll = CDLL(_ctypes_test.__file__)
self.assertRaises(ValueError, c_int.in_dll, ctdll, "Undefined_Symbol")
class PythonValuesTestCase(unittest.TestCase):
"""This test only works when python itself is a dll/shared library"""
@xfail
def test_optimizeflag(self):
# This test accesses the Py_OptimizeFlag intger, which is
# exported by the Python dll.
# It's value is set depending on the -O and -OO flags:
# if not given, it is 0 and __debug__ is 1.
# If -O is given, the flag is 1, for -OO it is 2.
# docstrings are also removed in the latter case.
opt = c_int.in_dll(pythonapi, "Py_OptimizeFlag").value
if __debug__:
self.assertEqual(opt, 0)
elif ValuesTestCase.__doc__ is not None:
self.assertEqual(opt, 1)
else:
self.assertEqual(opt, 2)
@xfail
def test_frozentable(self):
# Python exports a PyImport_FrozenModules symbol. This is a
# pointer to an array of struct _frozen entries. The end of the
# array is marked by an entry containing a NULL name and zero
# size.
# In standard Python, this table contains a __hello__
# module, and a __phello__ package containing a spam
# module.
class struct_frozen(Structure):
_fields_ = [("name", c_char_p),
("code", POINTER(c_ubyte)),
("size", c_int)]
FrozenTable = POINTER(struct_frozen)
ft = FrozenTable.in_dll(pythonapi, "PyImport_FrozenModules")
# ft is a pointer to the struct_frozen entries:
items = []
for entry in ft:
# This is dangerous. We *can* iterate over a pointer, but
# the loop will not terminate (maybe with an access
# violation;-) because the pointer instance has no size.
if entry.name is None:
break
items.append((entry.name, entry.size))
expected = [("__hello__", 104),
("__phello__", -104),
("__phello__.spam", 104)]
self.assertEqual(items, expected)
from ctypes import _pointer_type_cache
del _pointer_type_cache[struct_frozen]
@xfail
def test_undefined(self):
self.assertRaises(ValueError, c_int.in_dll, pythonapi,
"Undefined_Symbol")
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
8730fdfbfb8689cdcb58e11f1b0caa7d861f6fc5 | 1f0a79249de6a3088cec2b335667f50819f8cda5 | /www/models.py | ae18caddc916888df129fa8f3e7801e102a7b5ad | []
| no_license | FineArtz/PythonPractice | b08516649fda56c02b4ee76ef61afadc701c5969 | b9d259fb1a07ac2c260c1b7e532ffc4ce4becb1d | refs/heads/master | 2021-05-10T07:32:38.128990 | 2018-01-26T13:25:29 | 2018-01-26T13:25:29 | 118,839,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,487 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import time, uuid
from orm import Model, StringField, BooleanField, FloatField, TextField
def next_id():
return '%015d%s000' % (int(time.time() * 1000), uuid.uuid4().hex)
#=====models=====
class User(Model):
__table__ = 'users'
id = StringField(primary_key = True, default = next_id, ddl = 'varchar(50)')
email = StringField(ddl = 'varchar(50)')
password = StringField(ddl = 'varchar(50)')
admin = BooleanField()
name = StringField(ddl = 'varchar(50)')
image = StringField(ddl = 'varchar(500)')
created_at = FloatField(default = time.time)
#created_at records the time the model is created at
class Blog(Model):
__table__ = 'blogs'
id = StringField(primary_key = True, default = next_id, ddl = 'varchar(50)')
user_id = StringField(ddl = 'varchar(50)')
user_name = StringField(ddl = 'varchar(50)')
user_image = StringField(ddl = 'varchar(500)')
name = StringField(ddl = 'varchar(50)')
summary = StringField(ddl = 'varchar(200)')
content = TextField()
created_at = FloatField(default = time.time)
class Comment(Model):
__table__ = 'comments'
id = StringField(primary_key = True, default = next_id, ddl = 'varchar(50)')
blog_id = StringField(ddl = 'varchar(50)')
user_id = StringField(ddl = 'varchar(50)')
user_name = StringField(ddl = 'varchar(50)')
user_image = StringField(ddl = 'varchar(500)')
content = TextField()
created_at = FloatField(default = time.time)
| [
"[email protected]"
]
| |
b573863aabf604ac99ef02e732e3b1cfac6e70d3 | 68439002c2919a677a37657bf5eb5a014bf4236d | /KeepAccount.py | 0923782ca65a75e29b40b1bae76471926947dd14 | []
| no_license | waadene-wi/KeepAccount | 358f00f3663b4eba0b69c7274e768ea53ab83046 | 96c9ee2924a015a18352de7a298508576d3099ef | refs/heads/master | 2023-04-20T00:58:00.946995 | 2021-04-23T15:15:36 | 2021-04-23T15:15:36 | 366,339,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,078 | py | import flask
import json
import copy
from Account import *
from Category import *
from Record import *
from Statistic import *
from Budget import *
from Currency import *
from Common import *
from URLBackup import *
app = flask.Flask(__name__)
app.debug = True
url_backup = URLBackup()
def executeService(typeOfClass, service_name, method_name):
instance = typeOfClass(app.logger)
if hasattr(instance, method_name) == False:
return json.dumps(makeReturn(Error.ILLEGAL_URL))
args = flask.request.args.to_dict()
ori_args = copy.deepcopy(args)
ret = getattr(instance, method_name)(args)
url_backup.backup(service_name, method_name, ori_args)
return json.dumps(ret)
@app.route('/')
def default_index():
return flask.send_file('web/add_record.html')
@app.route('/favicon.ico')
def favicon():
return flask.send_file('web/favicon.ico')
@app.route('/test')
def test():
return 'Hello, World!'
@app.route('/<page_name>')
def get_page(page_name):
# 文件不存在时需要做处理
return flask.send_file('web/' + page_name + '.html')
@app.route('/res/<file_name>')
def get_resource(file_name):
return flask.send_file('web/' + file_name)
@app.route('/service/account/<method_name>')
def account_service(method_name):
return executeService(Account, 'account', method_name)
@app.route('/service/category/<method_name>')
def category_service(method_name):
return executeService(Category, 'category', method_name)
@app.route('/service/record/<method_name>')
def record_service(method_name):
return executeService(Record, 'record', method_name)
@app.route('/service/statistic/<method_name>')
def statistic_service(method_name):
return executeService(Statistic, 'statistic', method_name)
@app.route('/service/budget/<method_name>')
def budget_service(method_name):
return executeService(Budget, 'budget', method_name)
@app.route('/service/currency/<method_name>')
def currency_service(method_name):
return executeService(Currency, 'currency', method_name)
if __name__ == '__main__':
app.run() | [
"[email protected]"
]
| |
67f4499a0e0385f095e27a510268471bbe024066 | 1dced307e8aec0f895a201fec060e09e8b8ab901 | /instructor/Demo_Django실습완료/django/Scripts/mysite/mysite/settings.py | 4ed5fc7e60fef2b7927c7032014dc99d5bd714a7 | []
| no_license | CheolminConanShin/PythonTakeNote | 6171936541b49c77878094cd8173fff4ec1d3841 | 9ec506861066f91e5584a0b20b6cd7267b1eb12e | refs/heads/master | 2021-01-20T10:06:46.862051 | 2016-12-30T07:03:26 | 2016-12-30T07:03:26 | 77,347,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,302 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'msxse)exbay9+66c45=sr$cwbl_nuv8a8rjvzee1!4ubp^yku_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'polls',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
'c:/django/Scripts/mysite/mytemplates/admin',
'c:/django/Scripts/mysite/mytemplates',
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'sample.db'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'KST'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
]
| |
b9a49fb55acddea878bf534c42bafd4cb724bb70 | d4de204801a3aa63ae6dbfdb0afbe8b7bb362c8c | /organizer_files_timit.py | 171aa2247139bf44ac35326f41bfaa03d467a342 | []
| no_license | luigifaticoso/Quaternion-speech-recognition | b3ae1cf12a23e844e2faf582dafc39d9dcad50df | ce624d098014deb555a56e91c128734afcd8a39c | refs/heads/master | 2020-04-29T05:20:24.106340 | 2019-05-13T23:50:16 | 2019-05-13T23:50:16 | 175,879,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | import os, sys
from shutil import copy
# Open a file
os.makedirs("preprocessed_files")
preprocessed_path = path = os.path.abspath("preprocessed_files")
path = os.path.abspath("TRAIN")
dirs = os.listdir( path )
count = 0
# This would print all the files and directories
for file in dirs:
dialectpath = os.path.join(path,file)
if os.path.isdir(dialectpath):
dialectdirs = os.listdir( dialectpath )
for e in dialectdirs:
count += 1
speakerpath = os.path.join(dialectpath,e)
if os.path.isdir(speakerpath):
speakerdirs = os.listdir( speakerpath )
for i in speakerdirs:
new_name = i.split(".")[0] +"_"+ str(count) +"."+ i.split(".")[1]
os.rename(os.path.join(speakerpath,i), os.path.join(speakerpath,new_name))
copy(os.path.join(speakerpath,new_name), preprocessed_path)
| [
"[email protected]"
]
| |
8cbf511b3b0612a358037cc06eb24661fab2b0c1 | 05b80d92bb2efec76f898c527cc803f931031266 | /Airbnb Interview ImplementQueuewithFixedSizeofArrays.py | aad631853256c05eca1636f0c70951bac37fada3 | []
| no_license | PriyankaKhire/ProgrammingPracticePython | b5a6af118f3d4ec19de6fcccb7933d84f7522d1a | 8dd152413dce2df66957363ff85f0f4cefa836e8 | refs/heads/master | 2022-08-28T00:44:34.595282 | 2022-08-12T19:08:32 | 2022-08-12T19:08:32 | 91,215,578 | 18 | 11 | null | null | null | null | UTF-8 | Python | false | false | 1,908 | py | '''
Implement a Queue with help of fix sized arrays.
Such that, if the array runs out of space,
then the queue should duplicate same sized array and keep on adding elements further.
Example:
Q with fixed array size 3
Q
[[None, None, None]]
push(1) push(2) push(3)
Q
[[1,2,3]]
push(4) push(5)
Q
[[1,2,3], [4, 5, None]]
As you can see, initially the queue is made of array with fixed size 3,
as soon as queue runs out of space, it adds another array with size 3 to accomodate new elements
'''
class Array(object):
def __init__(self, size):
self.array = [None for i in range(size)]
self.numElements = 0
class Q(object):
def __init__(self, size):
self.size = size
self.arrays = []
self.head = 0
self.tail = None
def push(self, element):
if(not self.arrays or (self.arrays[-1].numElements == self.size)):
array = Array(self.size)
self.arrays.append(array)
self.tail = 0
self.arrays[-1].array[self.tail] = element
self.tail = self.tail + 1
self.arrays[-1].numElements = self.arrays[-1].numElements + 1
print [a.array for a in self.arrays]
def pop(self):
if(not self.arrays):
print "Q empty"
return
print self.arrays[0].array[self.head]
self.arrays[0].array[self.head] = None
self.head = self.head + 1
self.arrays[0].numElements = self.arrays[0].numElements - 1
if(self.arrays[0].numElements == 0):
self.arrays.pop(0)
self.head = 0
print [a.array for a in self.arrays]
# Main
obj = Q(3)
obj.push(1)
obj.push(2)
obj.push(3)
obj.push(4)
obj.push(5)
obj.push(6)
obj.push(7)
obj.pop()
obj.pop()
obj.pop()
obj.pop()
obj.push(8)
obj.pop()
obj.pop()
obj.pop()
obj.pop()
obj.pop()
obj.push(1)
| [
"[email protected]"
]
| |
09e77c14accf469d7e3df8c14423a66fda5c0299 | b7fef448efdfd7d2cb5235c35ec1f04c6cb97414 | /official_TensorFlow_tutorials/cnn_mnist/cnn_mnist.py | 65d102a8e1b983fbb73508ff48be2c57730cec3a | [
"Apache-2.0"
]
| permissive | PwnySQL/learning_tensorflow_for_GANs | 578c2cea0469b2173ac339f263ee78b7f31674b0 | bf0794726407fd9caf9030732381f5954beedad8 | refs/heads/master | 2020-05-31T15:13:01.473919 | 2019-06-05T08:15:53 | 2019-06-05T08:15:53 | 190,351,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,781 | py | import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
# ARCHITECTURE ################################################################
"""
Conv Layer 1: applies 32 5x5 filters with ReLU activation function
Pool Layer 1: max pooling with 2x2 filter and stride of 2 (pooling regions do
not overlap)
Conv Layer 2: Applies 64 5x5 filters with ReLU activation function
Pool Layer 2: max pooling with 2x2 filter and stride of 2
Dense Layer 1: 1024 neurons with dropout of 0.4
Dense Layer 2: 10 neurons, one for each target class
=> can use:
tf.layers.conv2d()
tf.layers.max_pooling2d()
tf.layers.dense()
"""
# CNN Model ###################################################################
def cnn_model_fn(features, labels, mode):
"""
Model function for CNN, configures the CNN
arguments:
features: takes MNIST feature data
labels: MNIST labels
mode: TRAIN, EVAL, PREDICT
returns:
predictions
loss
training operation
"""
# Input layer #####
# -1 for batch_size to specify dynamical computation based on input values
# in features["x"] to treat batch_size as tunable hyper-parameter
# e.g. in batches of 5, input_layer will contain 5*784 = 3920 values
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
# conv layer 1 #####
conv1 = tf.layers.conv2d(
inputs=input_layer, # must have shape [batch_size, image_height, image_width, channels]
filters=32, # number of filters used
kernel_size=[5, 5], # since both dimensions are same, one could write kernel_size=5
padding='SAME', # output Tensor has same height and width values as input Tensor
activation=tf.nn.relu)
# output tensor has shape [batch_size, 28, 28, 32]
# pool layer 1 #####
pool1 = tf.layers.max_pooling2d(
inputs=conv1, # must have shape [batch_size, image_height, image_width, channels]
pool_size=[2, 2],
strides=2) # extracted sub-regions are separated by 2 pixels, for different
# stride values for height and width, specify tuple or list, eg. stride=[3, 6]
# output tensor has shape [batch_size, 14, 14, 32] => 2x2 filter reduces
# height and width by 50%
# conv layer 2 #####
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding='SAME',
activation=tf.nn.relu)
# output tensor has shape [batch_size, 14, 14, 64]
# pool layer 2 #####
pool2 = tf.layers.max_pooling2d(
inputs=conv2,
pool_size=[2, 2],
strides=2)
# output tensor has shape [batch_size, 7, 7, 64]
# dense layer #####
# firstly need to flatten feature map to shape [batch_size, features]
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64]) # output shape: [batch_size, 3136]
dense = tf.layers.dense(
inputs=pool2_flat,
units=1024, # numbers of neurons in dense layer
activation=tf.nn.relu)
dropout = tf.layers.dropout(
inputs=dense,
rate=0.4, # randomly dropout 40% of the elements during training
training=(mode == tf.estimator.ModeKeys.TRAIN))
# output tensor has shape [batch_size, 1024]
# logits layer #####
# returns "raw" values for predictions => use dense layer with linear activation
logits = tf.layers.dense(inputs=dropout, units=10)
# output tensor has shape [batch_size, 10]
# generate predictions for PREDICT and EVAL mode #####
predictions = {
"classes": tf.argmax(input=logits, axis=1), # predicted class, digit from 0-9
# add softmax_tensor to graph, it is used for PREDICT and
# "logging_hook"
"probabilities" : tf.nn.softmax(logits, name="softmax_tensor")
# probability of being in class 0, 1, ..., 9
# explicitly set a name to be able to set up the logging_hook later
}
pred_metrics_ops = {
"train_accuracy": tf.metrics.accuracy(
labels=labels,
predictions=predictions["classes"]
)
}
tf.summary.scalar("train_accuracy", pred_metrics_ops["train_accuracy"][1])
tf.summary.histogram("probabilities", predictions["probabilities"])
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# calculate loss for TRAIN and EVAL modes #####
# for multi-class classification, often cross_entropy is used
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# another way: do a one_hot encoding of the labels and apply softmax_cross_entropy
# onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=10)
# loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits)
tf.summary.scalar("cross_entropy", loss)
# configure training operation for TRAIN mode #####
if mode == tf.estimator.ModeKeys.TRAIN:
# build optimizer
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
# the training operation is using the minimize method on the loss
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
# using the global_step parameter is essential for TensorBoard Graphs
# to work properly, it counts the number of training steps
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels,
predictions=predictions["classes"])
}
# tf.summary.scalar("eval_accuracy", eval_metric_ops["accuracy"][1])
return tf.estimator.EstimatorSpec(mode=mode, loss=loss,
eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# load training and eval data #####
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images # returns np.array, 55000 images
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
eval_data = mnist.test.images #returns np.array, 10000 images
eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
# create the Estimator #####
mnist_classifier= tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="./mnist_convnet_model")
# set up logging hook #####
# key in following dict is of our choice
tensors_to_log ={"probabilities" : "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# Train the model #####
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=100,
num_epochs=None, # model will train, until specified steps are reached
shuffle=True) # shuffle the training data
mnist_classifier.train(
input_fn=train_input_fn,
steps=20000,
hooks=[logging_hook])
print("\nNow going into evaluation\n")
# Evaluate the model and print results #####
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1, # model evaluates the metrics over one epoch of data
shuffle=False) # iterate through data sequentially
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
if __name__ == "__main__":
tf.app.run()
| [
"[email protected]"
]
| |
91d3def9f282c678febe2a5bbf11d0bfcf1a8960 | 75d9934b566016aa86abc87a5db018c8c855ba65 | /lenet.py | c7ed9385ca8629133618a362109726c403fbe60d | []
| no_license | robustml-eurecom/model_monitoring_selection | c05e5caccbf1758c6a434c88924402b4cf767c49 | 42a15c6f5d4fae29a3583272ace13f99e26f711b | refs/heads/master | 2023-09-04T02:23:49.725444 | 2021-11-04T11:53:25 | 2021-11-04T11:53:25 | 273,566,555 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,122 | py | from numpy.random import seed
from tensorflow import set_random_seed
from math import sqrt
import numpy as np
import pandas as pd
import seaborn as sns
from numpy import array
from keras import Input, Model
from keras.models import InputLayer
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.convolutional import AveragePooling2D
from keras.layers.normalization import BatchNormalization
from keras import regularizers
from keras import optimizers
from sklearn.metrics import mean_absolute_error
from sklearn import preprocessing
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--observations', default=None)
parser.add_argument("--true_values", default=None)
parser.add_argument("--forecasts", default=None)
args = parser.parse_args()
seed(42)
set_random_seed(42)
def smape(a, b):
"""
Calculates sMAPE
:param a: actual values
:param b: predicted values
:return: sMAPE
"""
a = np.reshape(a, (-1,))
b = np.reshape(b, (-1,))
return np.mean(2.0 * np.abs(a - b) / (np.abs(a) + np.abs(b))).item()
# Observations
df_series = pd.read_csv(args.observations)
df_series = df_series.drop(['V1'],axis=1)
# True forecasts
df_obs = pd.read_csv(args.true_values)
df_obs = df_obs.drop(['V1'],axis=1)
# Forecats given by comb monitored model
df_preds = pd.read_csv(args.forecasts)
df_preds = df_preds.drop(['Unnamed: 0'],axis=1).T
df_preds.index = df_obs.index
series_matr = df_series.values
obs_matr = df_obs.values
preds = df_preds.values
# Evaluate sMAPE between true values and forecasts
smape_arr = np.zeros(obs_matr.shape[0])
for i in range(len(obs_matr)):
smape_arr[i] = smape(obs_matr[i],preds[i])
df_all = pd.concat([df_series,df_preds],axis=1,join='inner')
max_l = df_all.shape[1]
data_all = df_all.values
# Padding time-series
for i in range(len(data_all)):
if i % 1000 == 0:
print("Padding row {}".format(i))
ts = data_all[i,:]
ts = ts[~np.isnan(ts)]
if len(ts)<max_l:
diff = max_l - len(ts)
padd = np.zeros((1,diff)).flatten()
ts = np.hstack((ts,padd))
if i == 0:
X_mat = ts
else:
X_mat = np.vstack((X_mat,ts))
min_max_scaler = preprocessing.MinMaxScaler()
X_mat = min_max_scaler.fit_transform(X_mat)
smape_arr = np.log(smape_arr)
test_percentage = 0.25
total = X_mat.shape[0]
train_samples = int(np.ceil(total) * (1-test_percentage))
test_samples = total - train_samples
train_X = X_mat[:train_samples]
train_y = smape_arr[:train_samples]
test_X = X_mat[train_samples:]
test_y = smape_arr[train_samples:]
# Run LeNet monitoring model
# reshape from [samples, timesteps] into [samples, timesteps, features]
train_Xr = train_X.reshape(train_X.shape[0], 1, train_X.shape[1], 1)
print(train_Xr.shape)
input_shape = (1,train_X.shape[1],1)
# define model
model = Sequential()
model.add(Conv2D(6, (1,5), activation='relu', input_shape=input_shape, padding='same',kernel_regularizer=regularizers.l2(1.e-4)))
model.add(BatchNormalization())
model.add(AveragePooling2D(pool_size=(1,2)))
model.add(Dropout(0.5))
model.add(Conv2D(16, (1,5), activation='relu', input_shape=input_shape, padding='same',kernel_regularizer=regularizers.l2(1.e-4)))
model.add(BatchNormalization())
model.add(AveragePooling2D(pool_size=(1,2)))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(120, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(84, activation='relu'))
#model.add(Dropout(0.5))
model.add(Dense(1))
adam = optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(optimizer=adam, loss='mse')
# fit model
model.fit(train_Xr, train_y, epochs=1000, verbose=2, batch_size=32)
test_Xr = test_X.reshape(test_X.shape[0], 1, test_X.shape[1], 1)
yhat = model.predict(test_Xr, verbose=0)
| [
"[email protected]"
]
| |
5191cde8a942de5efe1de5976fd48b264736fa0d | 6531a7276dca2124f726f32166196c2102e273b2 | /src/helpers/exceptions.py | 98a030808a2ff931af0e68729aa94b19829f32ac | []
| no_license | antongorshkov/nycsamosa | e4e2b71b7c5465c514f9d204dab36778ca37abe3 | c0fc9661bc64a7dc5b4ffff67a6be40e7c0a0656 | refs/heads/master | 2021-01-23T08:56:20.716251 | 2010-01-05T06:31:28 | 2010-01-05T06:31:28 | 32,231,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | '''
Created on Dec 10, 2009
@author: Anton Gorshkov
'''
class ValidationError(Exception):
def __init__(self, value):
self.value=value
def __str__(self):
return repr(self.value)
| [
"antong@80da951e-da95-11de-849f-b362a63f070a"
]
| antong@80da951e-da95-11de-849f-b362a63f070a |
3624be7a79db8cc672aba9b8efa9ddceecc3d57b | 3630d6577d8995efc29aa72705ff63850d6008ad | /TwentySix.py | 229a8fbc0b2bac8e5b6cd0556f353850cc26737d | []
| no_license | egflo/Informatics | fbf76d7ce7335916f6489edaabc823e9b5489d05 | 41ce8e1013b46a72d9c2e6f734ccadd82bfa7edc | refs/heads/main | 2023-06-15T11:14:19.339756 | 2021-07-12T23:52:48 | 2021-07-12T23:52:48 | 385,415,204 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,701 | py | import sys, re, itertools, operator
#
# The columns. Each column is a data element and a formula.
# The first 2 columns are the input data, so no formulas.
#
all_words = [(), None]
stop_words = [(), None]
non_stop_words = [(), lambda : \
map(lambda w : \
w if w not in stop_words[0] else '',\
all_words[0])]
unique_words = [(),lambda :
set([w for w in non_stop_words[0] if w!=''])]
counts = [(), lambda :
map(lambda w, word_list : word_list.count(w), \
unique_words[0], \
itertools.repeat(non_stop_words[0], \
len(unique_words[0])))]
sorted_data = [(), lambda : sorted(zip(list(unique_words[0]), \
counts[0]), \
key=operator.itemgetter(1),
reverse=True)]
# The entire spreadsheet
all_columns = [all_words, stop_words, non_stop_words,\
unique_words, counts, sorted_data]
#
# The active procedure over the columns of data.
# Call this everytime the input data changes, or periodically.
#
def update():
global all_columns
# Apply the formula in each column
for c in all_columns:
if c[1] != None:
c[0] = c[1]()
# Load the fixed data into the first 2 columns
all_words[0] = re.findall('[a-z]{2,}', open(sys.argv[1]).read().lower())
stop_words[0] = set(open('../stop_words.txt').read().split(','))
# Update the columns with formulas
update()
for (w, c) in sorted_data[0][:25]:
print(w, '-', c)
| [
"[email protected]"
]
| |
6a60cbbecdcf92b44ed9d25a3c28598a01d0c4c4 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/containerinstance/azure-mgmt-containerinstance/generated_samples/subnet_service_association_link_delete.py | 6d45c381ce3777604c49fcbb463f9927408cbe7d | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
]
| permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,676 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.containerinstance import ContainerInstanceManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-containerinstance
# USAGE
python subnet_service_association_link_delete.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ContainerInstanceManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.subnet_service_association_link.begin_delete(
resource_group_name="demo",
virtual_network_name="demo2",
subnet_name="demo3",
).result()
print(response)
# x-ms-original-file: specification/containerinstance/resource-manager/Microsoft.ContainerInstance/stable/2023-05-01/examples/SubnetServiceAssociationLinkDelete.json
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
c25cab2fe35b81cbb862252c8a516d4b0214269b | 2acb5f7b8064e7bdfd1a7e5c6bd9c1b42d6db686 | /src/homework2/task2.py | c596d4bb5677a8db5ade5024ffc7139dc0195cee | []
| no_license | sailorbl/Homework2-Anastasia_Valkevich | 5158f9c2db338107f6bf2b8100869b5c8abe0251 | 796e2d65a5d8e8650d12549e447d6ee3fabec5e1 | refs/heads/master | 2023-01-21T19:31:56.864262 | 2020-12-06T15:41:02 | 2020-12-06T15:41:02 | 318,797,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | """2. Найти самое длинное слово в введенном предложении.
Учтите что в предложении есть знаки препинания.
"""
str_ = input('Введите текст: ')
new_str = str_.split()
long_word = ''
for words in new_str:
if len(words) > len(long_word):
long_word = words
print(long_word)
| [
"[email protected]"
]
| |
42418f555306c5613a0d66270dc1dde68a17ab40 | f4a3bc0399b0e087e97c01d5b26740ce88cd2466 | /server/backend/geom/gait2392geomcustomiser.py | d8136c54d6213576949bbfc44478ac9d0010d899 | []
| no_license | mkeo2524/MC | 94f194bb4d4fca16115d8a527e193316921cf6a3 | 0774f503f8b33309766530cae1a2f025355ea9b8 | refs/heads/master | 2023-01-29T13:20:22.091575 | 2020-10-12T19:33:23 | 2020-10-12T19:33:23 | 253,175,728 | 0 | 1 | null | 2022-12-12T09:40:20 | 2020-04-05T07:05:30 | Python | UTF-8 | Python | false | false | 51,420 | py | """
MAP Client, a program to generate detailed musculoskeletal models for OpenSim.
Copyright (C) 2012 University of Auckland
This file is part of MAP Client. (http://launchpad.net/mapclient)
MAP Client is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MAP Client is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MAP Client. If not, see <http://www.gnu.org/licenses/>..
"""
"""
OpenSim Gait2392 bodies customisation
"""
import os
import numpy as np
import copy
import mayavi
import gias2
from gias2.common import transform3D
from gias2.mesh import vtktools
from gias2.fieldwork.field import geometric_field
from gias2.musculoskeletal import mocap_landmark_preprocess
from gias2.musculoskeletal.bonemodels import bonemodels
from gias2.musculoskeletal.bonemodels import lowerlimbatlas
from gias2.musculoskeletal import osim
from gias2.musculoskeletal import fw_model_landmarks as fml
from transforms3d.euler import mat2euler
import opensim
import scaler
import pdb
#=============================================================================#
SELF_DIR = os.path.split(os.path.realpath(__file__))[0]
TEMPLATE_OSIM_PATH = os.path.join(SELF_DIR, 'data', 'gait2392_simbody.osim')
OSIM_FILENAME = 'gait2392_simbody.osim'
OSIM_BODY_NAME_MAP = {'pelvis': 'pelvis',
'femur-l': 'femur_l',
'femur-r': 'femur_r',
'tibiafibula-l': 'tibia_l',
'tibiafibula-r': 'tibia_r',
}
PELVIS_SUBMESHES = ('RH', 'LH', 'sac')
PELVIS_SUBMESH_ELEMS = {'RH': range(0, 73),
'LH': range(73,146),
'sac': range(146, 260),
}
PELVIS_BASISTYPES = {'tri10':'simplex_L3_L3','quad44':'quad_L3_L3'}
TIBFIB_SUBMESHES = ('tibia', 'fibula')
TIBFIB_SUBMESH_ELEMS = {'tibia': range(0, 46),
'fibula': range(46,88),
}
TIBFIB_BASISTYPES = {'tri10':'simplex_L3_L3','quad44':'quad_L3_L3'}
GEOM_DIR = 'geom'
SACRUM_FILENAME = 'sacrum.vtp'
HEMIPELVIS_RIGHT_FILENAME = 'pelvis.vtp'
HEMIPELVIS_LEFT_FILENAME = 'l_pelvis.vtp'
FEMUR_LEFT_FILENAME = 'l_femur.vtp'
TIBIA_LEFT_FILENAME = 'l_tibia.vtp'
FIBULA_LEFT_FILENAME = 'l_fibula.vtp'
FEMUR_RIGHT_FILENAME = 'r_femur.vtp'
TIBIA_RIGHT_FILENAME = 'r_tibia.vtp'
FIBULA_RIGHT_FILENAME = 'r_fibula.vtp'
VALID_UNITS = ('nm', 'um', 'mm', 'cm', 'm', 'km')
# SIDES = ('left', 'right', 'both')
VALID_MODEL_MARKERS = sorted(list(scaler.virtualmarker.markers.keys()))
#=============================================================================#
def dim_unit_scaling(in_unit, out_unit):
"""
Calculate the scaling factor to convert from the input unit (in_unit) to
the output unit (out_unit). in_unit and out_unit must be a string and one
of ['nm', 'um', 'mm', 'cm', 'm', 'km'].
inputs
======
in_unit : str
Input unit
out_unit :str
Output unit
returns
=======
scaling_factor : float
"""
unit_vals = {
'nm': 1e-9,
'um': 1e-6,
'mm': 1e-3,
'cm': 1e-2,
'm': 1.0,
'km': 1e3,
}
if in_unit not in unit_vals:
raise ValueError(
'Invalid input unit {}. Must be one of {}'.format(
in_unit, list(unit_vals.keys())
)
)
if out_unit not in unit_vals:
raise ValueError(
'Invalid input unit {}. Must be one of {}'.format(
in_unit, list(unit_vals.keys())
)
)
return unit_vals[in_unit]/unit_vals[out_unit]
# Opensim coordinate systems for bodies
def update_femur_opensim_acs(femur_model):
femur_model.acs.update(
*bonemodels.model_alignment.createFemurACSOpenSim(
femur_model.landmarks['femur-HC'],
femur_model.landmarks['femur-MEC'],
femur_model.landmarks['femur-LEC'],
side=femur_model.side
)
)
def update_tibiafibula_opensim_acs(tibiafibula_model):
tibiafibula_model.acs.update(
*bonemodels.model_alignment.createTibiaFibulaACSOpenSim(
tibiafibula_model.landmarks['tibiafibula-MM'],
tibiafibula_model.landmarks['tibiafibula-LM'],
tibiafibula_model.landmarks['tibiafibula-MC'],
tibiafibula_model.landmarks['tibiafibula-LC'],
side=tibiafibula_model.side
)
)
def _splitTibiaFibulaGFs(tibfibGField):
tib = tibfibGField.makeGFFromElements(
'tibia',
TIBFIB_SUBMESH_ELEMS['tibia'],
TIBFIB_BASISTYPES,
)
fib = tibfibGField.makeGFFromElements(
'fibula',
TIBFIB_SUBMESH_ELEMS['fibula'],
TIBFIB_BASISTYPES,
)
return tib, fib
def _splitPelvisGFs(pelvisGField):
"""
Given a flattened pelvis model, create left hemi, sacrum,
and right hemi meshes
"""
lhgf = pelvisGField.makeGFFromElements(
'hemipelvis-left',
PELVIS_SUBMESH_ELEMS['LH'],
PELVIS_BASISTYPES
)
sacgf = pelvisGField.makeGFFromElements(
'sacrum',
PELVIS_SUBMESH_ELEMS['sac'],
PELVIS_BASISTYPES
)
rhgf = pelvisGField.makeGFFromElements(
'hemipelvis-right',
PELVIS_SUBMESH_ELEMS['RH'],
PELVIS_BASISTYPES
)
return lhgf, sacgf, rhgf
def calc_pelvis_ground_angles(pelvis):
"""
returns pelvis tilt, list, rotation relative to ground
"""
globalCS = np.array(
[[0,0,0],
[0,0,1],
[1,0,0],
[0,1,0],
])
pelvisACS = pelvis.acs.unit_array
# calc rotation matrix mapping pelvis ACS to femur ACS
R = transform3D.directAffine(globalCS, pelvisACS)[:3,:3]
# calculate euler angles from rotation matrix
_list, tilt, rot = mat2euler(R, 'szxy')
return -tilt, -_list, -rot
def calc_hip_angles(pelvis, femur, side):
"""
returns hip flexion, adduction, rotation
"""
pelvisACS = pelvis.acs.unit_array
femurACS = femur.acs.unit_array
# calc rotation matrix mapping pelvis ACS to femur ACS
R = transform3D.directAffine(pelvisACS, femurACS)[:3,:3]
# calculate euler angles from rotation matrix
rot, flex, add = mat2euler(R, 'szxy')
if side=='l':
return -flex, -rot, add
else:
return -flex, rot, -add
def calc_knee_angles(femur, tibfib, side):
"""
returns knee flexion, adduction, rotation
"""
femurACS = femur.acs.unit_array
tibfibACS = tibfib.acs.unit_array
# calc rotation matrix mapping pelvis ACS to femur ACS
R = transform3D.directAffine(femurACS, tibfibACS)[:3,:3]
# calculate euler angles from rotation matrix
rot, flex, add = mat2euler(R, 'szxy')
if side=='l':
return -flex, rot, -add
else:
return -flex, -rot, add
def _calc_knee_spline_coords(ll, flex_angles):
"""
Calculates the cubic spline values for the knee joint through specified
angles. The values are the coordinates of the tibia frame origin relative
to the femur frame.
inputs
======
ll : LowerLimbLeftAtlas instance
flex_angles : 1d ndarray
a list of n knee angles at which to sample tibia location relative to the
femur ACS. Only flexion supported in 2392.
returns
=======
y : n x 3 ndarray
Array of the tibia frame origin relative to the femur frame at each
knee angle.
"""
_ll = copy.deepcopy(ll)
# restore original ACSs
_ll.models['femur'].update_acs()
_ll.models['tibiafibula'].update_acs()
# sample tibia ACS origin at each flexion angle
tib_os = []
for a in flex_angles:
_ll.update_tibiafibula([a,0,0])
tib_o = 0.5*(_ll.models['tibiafibula'].landmarks['tibiafibula-LC'] +
_ll.models['tibiafibula'].landmarks['tibiafibula-MC']
)
tib_os.append(tib_o)
update_femur_opensim_acs(_ll.models['femur'])
y = _ll.models['femur'].acs.map_local(np.array(tib_os))
# y = np.array([y[:,2], y[:,1], y[:,0]]).T # reverse dims
return y
#=============================================================================#
class Gait2392GeomCustomiser(object):
gfield_disc = (6,6)
ankle_offset = np.array([0., -0.01, 0.])
back_offset = np.array([0., 0.01, 0.])
# back_offset = np.array([0., 0.0, 0.])
_body_scalers = {
'torso': scaler.calc_whole_body_scale_factors,
'pelvis': scaler.calc_pelvis_scale_factors,
'femur_l': scaler.calc_femur_scale_factors,
'femur_r': scaler.calc_femur_scale_factors,
'tibia_l': scaler.calc_tibia_scale_factors,
'tibia_r': scaler.calc_tibia_scale_factors,
'talus_l': scaler.calc_whole_body_scale_factors,
'talus_r': scaler.calc_whole_body_scale_factors,
'calcn_l': scaler.calc_whole_body_scale_factors,
'calcn_r': scaler.calc_whole_body_scale_factors,
'toes_l': scaler.calc_whole_body_scale_factors,
'toes_r': scaler.calc_whole_body_scale_factors,
}
def __init__(self, config, gfieldsdict=None, ll=None, verbose=True):
"""
Class for customising the OpenSim Gait2392 model's bodies and joints.
Customisation is based on either an input LowerLimbAtlas instance or
a dictionary of fieldwork geometric fields of each bone. Only one at
most should be defined.
inputs
======
config : dict
Dict of configurable options:
'osim_output_dir' : str
Path to write out the customised .osim file.
'write_osim_file' : bool
If True, write customised .osim file to osim_output_dir.
'in_unit' : str
Input model's coordinate units
'out_unit' : str
Output model's coordinate units
'side' : str
Which limb to customised. Currently 'left' or 'right'.
gfieldsdict : dict [optional]
Expected geometric field dict keys:
pelvis
femur-l
femur-r
patella-l
patella-r
tibiafibula-l
tibiafibula-r
ll : LowerLimbAtlas instance [optional]
"""
self.config = config
# self.ll_transform = None
# self._pelvisRigid = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
# self._hipRot = np.array([0.0, 0.0, 0.0])
# self._kneeRot = np.array([0.0, 0.0, 0.0])
self.uniform_scaling = 1.0
self.pelvis_scaling = 1.0
self.femur_scaling = 1.0
self.petalla_scaling = 1.0
self.tibfib_scaling = 1.0
self.LL = None # lowerlimb object
self._hasInputLL = False
self.osimmodel = None # opensim model
self.markerset = None # markerset associated with opensim model
self.input_markers = {} # input marker name : input marker coords
self.verbose = verbose
self._unit_scaling = dim_unit_scaling(
self.config['in_unit'], self.config['out_unit']
)
if gfieldsdict is not None:
self.set_lowerlimb_gfields(gfieldsdict)
if ll is not None:
self.set_lowerlimb_atlas(ll)
self._body_scale_factors = {}
def init_osim_model(self):
self.osimmodel = osim.Model(TEMPLATE_OSIM_PATH)
self._osimmodel_init_state = self.osimmodel._model.initSystem()
self._original_segment_masses = dict([(b.name, b.mass) for b in self.osimmodel.bodies.values()])
def _check_geom_path(self):
"""
Check that the directory for geom meshes exists. If not, create it.
"""
geom_dir = os.path.join(self.config['osim_output_dir'], GEOM_DIR)
if not os.path.isdir(geom_dir):
os.mkdir(geom_dir)
def set_lowerlimb_atlas(self, ll):
self.LL = ll
self._hasInputLL = True
update_femur_opensim_acs(self.LL.ll_l.models['femur'])
update_tibiafibula_opensim_acs(self.LL.ll_l.models['tibiafibula'])
update_femur_opensim_acs(self.LL.ll_r.models['femur'])
update_tibiafibula_opensim_acs(self.LL.ll_r.models['tibiafibula'])
def set_lowerlimb_gfields(self, gfieldsdict):
"""
Instantiate the lower limb object using input models
"""
self.set_2side_lowerlimb_gfields(gfieldsdict)
# if self.config['side']=='left':
# self.set_left_lowerlimb_gfields(gfieldsdict)
# elif self.config['side']=='right':
# self.set_right_lowerlimb_gfields(gfieldsdict)
# elif self.config['side']=='both':
# self.set_2side_lowerlimb_gfields(gfieldsdict)
# def set_left_lowerlimb_gfields(self, gfieldsdict):
# """
# Instantiate the lower limb object using input models
# """
# self.LL = bonemodels.LowerLimbLeftAtlas('left lower limb')
# self.LL.set_bone_gfield('pelvis', gfieldsdict['pelvis'])
# self.LL.set_bone_gfield('femur', gfieldsdict['femur'])
# self.LL.set_bone_gfield('patella', gfieldsdict['patella'])
# self.LL.set_bone_gfield('tibiafibula', gfieldsdict['tibiafibula'])
# self.LL.models['pelvis'].update_acs()
# update_femur_opensim_acs(self.LL.models['femur'])
# update_tibiafibula_opensim_acs(self.LL.models['tibiafibula'])
# def set_right_lowerlimb_gfields(self, gfieldsdict):
# """
# Instantiate the lower limb object using input models
# """
# self.LL = bonemodels.LowerLimbRightAtlas('right lower limb')
# self.LL.set_bone_gfield('pelvis', gfieldsdict['pelvis'])
# self.LL.set_bone_gfield('femur', gfieldsdict['femur'])
# self.LL.set_bone_gfield('patella', gfieldsdict['patella'])
# self.LL.set_bone_gfield('tibiafibula', gfieldsdict['tibiafibula'])
# self.LL.models['pelvis'].update_acs()
# update_femur_opensim_acs(self.LL.models['femur'])
# update_tibiafibula_opensim_acs(self.LL.models['tibiafibula'])
def set_2side_lowerlimb_gfields(self, gfieldsdict):
"""
Instantiate the lower limb object using input models
"""
# left
if not self._hasInputLL:
ll_l = bonemodels.LowerLimbLeftAtlas('left lower limb')
ll_l.set_bone_gfield('pelvis', gfieldsdict['pelvis'])
ll_l.set_bone_gfield('femur', gfieldsdict['femur-l'])
ll_l.set_bone_gfield('patella', gfieldsdict['patella-l'])
ll_l.set_bone_gfield('tibiafibula', gfieldsdict['tibiafibula-l'])
else:
ll_l = self.LL.ll_l
if 'pelvis' in gfieldsdict:
ll_l.set_bone_gfield('pelvis', gfieldsdict['pelvis'])
if 'femur-l' in gfieldsdict:
ll_l.set_bone_gfield('femur', gfieldsdict['femur-l'])
if 'patella-l' in gfieldsdict:
ll_l.set_bone_gfield('patella', gfieldsdict['patella-l'])
if 'tibiafibula-l' in gfieldsdict:
ll_l.set_bone_gfield('tibiafibula', gfieldsdict['tibiafibula-l'])
update_femur_opensim_acs(ll_l.models['femur'])
update_tibiafibula_opensim_acs(ll_l.models['tibiafibula'])
# right
if not self._hasInputLL:
ll_r = bonemodels.LowerLimbRightAtlas('right lower limb')
ll_r.set_bone_gfield('pelvis', gfieldsdict['pelvis'])
ll_r.set_bone_gfield('femur', gfieldsdict['femur-r'])
ll_r.set_bone_gfield('patella', gfieldsdict['patella-r'])
ll_r.set_bone_gfield('tibiafibula', gfieldsdict['tibiafibula-r'])
else:
ll_r = self.LL.ll_r
if 'pelvis' in gfieldsdict:
ll_r.set_bone_gfield('pelvis', gfieldsdict['pelvis'])
if 'femur-r' in gfieldsdict:
ll_r.set_bone_gfield('femur', gfieldsdict['femur-r'])
if 'patella-r' in gfieldsdict:
ll_r.set_bone_gfield('patella', gfieldsdict['patella-r'])
if 'tibiafibula-r' in gfieldsdict:
ll_r.set_bone_gfield('tibiafibula', gfieldsdict['tibiafibula-r'])
update_femur_opensim_acs(ll_r.models['femur'])
update_tibiafibula_opensim_acs(ll_r.models['tibiafibula'])
# 2side
if not self._hasInputLL:
self.LL = lowerlimbatlas.LowerLimbAtlas('lower limb')
self.LL.ll_l = ll_l
self.LL.ll_r = ll_r
self.LL._update_model_dict()
def _save_vtp(self, gf, filename, bodycoordmapper):
v, f = gf.triangulate(self.gfield_disc)
'''if (gf.name == 'femur_right_mirrored_from_left_mean_rigid_LLP26'):
gf_ = copy.deepcopy(gf)
rfemur = bonemodels.FemurModel('femur-r', gf_)
rfemur.side = 'right'
update_femur_opensim_acs(rfemur)
gfpoints = rfemur.acs.map_local(rfemur.gf.get_all_point_positions())
rfemur.gf.set_field_parameters(gfpoints.T[:,:,np.newaxis])
rfemur.gf.save_geometric_field('femur-r_pre.geof', 'femur-r_pre.ens', 'femur-r_pre.mesh', '/people/acar246/Desktop/')
#~ pdb.set_trace()
'''
# f = f[:,::-1]
if (gf.name == 'femur_right_mirrored_from_left_mean_rigid_LLP26'):
gf_ = copy.deepcopy(gf)
rfemur = bonemodels.FemurModel('femur-r', gf_)
rfemur.side = 'right'
update_femur_opensim_acs(rfemur)
v_local = rfemur.acs.map_local(v)
else:
v_local = bodycoordmapper(v)
v_local *= self._unit_scaling
vtkwriter = vtktools.Writer(
v=v_local,
f=f,
filename=filename,
)
vtkwriter.writeVTP()
def _get_osimbody_scale_factors(self, bodyname):
"""
Returns the scale factor for a body. Caches scale factors
that have already been calculated
inputs
------
bodyname : str
Gait2392 name of a body
returns
-------
sf : length 3 ndarray
scale factor array
"""
if bodyname not in self._body_scale_factors:
sf = self._body_scalers[bodyname](self.LL, self._unit_scaling)
self._body_scale_factors[bodyname] = sf
return self._body_scale_factors[bodyname]
def cust_osim_pelvis(self):
if self.verbose:
print('\nCUSTOMISING PELVIS...')
pelvis = self.LL.models['pelvis']
osim_pelvis = self.osimmodel.bodies[OSIM_BODY_NAME_MAP['pelvis']]
# scale inertial properties
# sf = scaler.calc_pelvis_scale_factors(
# self.LL, self._unit_scaling,
# )
sf = self._get_osimbody_scale_factors('pelvis')
scaler.scale_body_mass_inertia(osim_pelvis, sf)
if self.verbose:
print('scale factor: {}'.format(sf))
# update ground-pelvis joint
if self.verbose:
print('updating pelvis-ground joint...')
pelvis_origin = pelvis.acs.o
self.osimmodel.joints['ground_pelvis'].locationInParent = \
pelvis_origin*self._unit_scaling # in ground CS
self.osimmodel.joints['ground_pelvis'].location = \
np.array((0,0,0), dtype=float)*self._unit_scaling # in pelvis CS
if self.verbose:
print(
'location in parent: {}'.format(
self.osimmodel.joints['ground_pelvis'].locationInParent
)
)
print(
'location: {}'.format(
self.osimmodel.joints['ground_pelvis'].location
)
)
# update coordinate defaults
pelvis_ground_joint = self.osimmodel.joints['ground_pelvis']
if self._hasInputLL:
tilt, _list, rot = self.LL.pelvis_rigid[3:]
else:
tilt, _list, rot = calc_pelvis_ground_angles(pelvis)
## tilt
pelvis_ground_joint.coordSets['pelvis_tilt'].defaultValue = tilt
## list
pelvis_ground_joint.coordSets['pelvis_list'].defaultValue = _list
## rotation
pelvis_ground_joint.coordSets['pelvis_rotation'].defaultValue = rot
if self.verbose:
print(
'pelvis tilt, list, rotation: {:5.2f}, {:5.2f}, {:5.2f}'.format(
pelvis_ground_joint.coordSets['pelvis_tilt'].defaultValue,
pelvis_ground_joint.coordSets['pelvis_list'].defaultValue,
pelvis_ground_joint.coordSets['pelvis_rotation'].defaultValue,
)
)
# update mesh
if self.verbose:
print('updating visual geometry...')
lhgf, sacgf, rhgf = _splitPelvisGFs(self.LL.models['pelvis'].gf)
self._check_geom_path()
## sacrum.vtp
sac_vtp_full_path = os.path.join(
self.config['osim_output_dir'], GEOM_DIR, SACRUM_FILENAME
)
sac_vtp_osim_path = os.path.join(GEOM_DIR, SACRUM_FILENAME)
self._save_vtp(sacgf, sac_vtp_full_path, pelvis.acs.map_local)
## pelvis.vtp
rh_vtp_full_path = os.path.join(
self.config['osim_output_dir'], GEOM_DIR, HEMIPELVIS_RIGHT_FILENAME
)
rh_vtp_osim_path = os.path.join(GEOM_DIR, HEMIPELVIS_RIGHT_FILENAME)
self._save_vtp(rhgf, rh_vtp_full_path, pelvis.acs.map_local)
## l_pelvis.vtp
lh_vtp_full_path = os.path.join(
self.config['osim_output_dir'], GEOM_DIR, HEMIPELVIS_LEFT_FILENAME
)
lh_vtp_osim_path = os.path.join(GEOM_DIR, HEMIPELVIS_LEFT_FILENAME)
self._save_vtp(lhgf, lh_vtp_full_path, pelvis.acs.map_local)
osim_pelvis.setDisplayGeometryFileName(
[sac_vtp_osim_path, rh_vtp_osim_path, lh_vtp_osim_path]
)
def cust_osim_femur_l(self):
self._cust_osim_femur('l')
def cust_osim_femur_r(self):
self._cust_osim_femur('r')
def _cust_osim_femur(self, side):
if self.verbose:
print('\nCUSTOMISING FEMUR {}'.format(side.upper()))
if (side!='l') and (side!='r'):
raise ValueError('Invalid side')
femur = self.LL.models['femur-'+side]
pelvis = self.LL.models['pelvis']
osim_femur = self.osimmodel.bodies[
OSIM_BODY_NAME_MAP[
'femur-'+side
]
]
# scale inertial properties
# sf = scaler.calc_femur_scale_factors(
# self.LL, self._unit_scaling,
# side=None,
# )
sf = self._get_osimbody_scale_factors('femur_'+side)
scaler.scale_body_mass_inertia(osim_femur, sf)
if self.verbose:
print('scale factor: {}'.format(sf))
# remove multiplier functions from hip joint translations
hip = self.osimmodel.joints['hip_{}'.format(side)]
_remove_multiplier(hip.spatialTransform.get_translation1())
_remove_multiplier(hip.spatialTransform.get_translation2())
_remove_multiplier(hip.spatialTransform.get_translation3())
# update hip joint
if self.verbose:
print('updating hip {} joint...'.format(side))
if side=='l':
hjc = pelvis.landmarks['pelvis-LHJC']
else:
hjc = pelvis.landmarks['pelvis-RHJC']
self.osimmodel.joints['hip_{}'.format(side)].locationInParent = \
pelvis.acs.map_local(hjc[np.newaxis])[0] * self._unit_scaling
self.osimmodel.joints['hip_{}'.format(side)].location = \
femur.acs.map_local(hjc[np.newaxis])[0] * self._unit_scaling
if self.verbose:
print(
'location in parent: {}'.format(
self.osimmodel.joints['hip_{}'.format(side)].locationInParent
)
)
print(
'location: {}'.format(
self.osimmodel.joints['hip_{}'.format(side)].location
)
)
# update coordinate defaults
if self._hasInputLL:
if side=='l':
flex, rot, add = self.LL.hip_rot_l
else:
flex, rot, add = self.LL.hip_rot_r
else:
flex, rot, add = calc_hip_angles(pelvis, femur, side)
hip_joint = self.osimmodel.joints['hip_{}'.format(side)]
## hip_flexion_l
hip_joint.coordSets['hip_flexion_{}'.format(side)].defaultValue = flex
## hip_adduction_l
hip_joint.coordSets['hip_adduction_{}'.format(side)].defaultValue = add
## hip_rotation_l
hip_joint.coordSets['hip_rotation_{}'.format(side)].defaultValue = rot
if self.verbose:
print(
'hip flexion, adduction, rotation: {:5.2f}, {:5.2f}, {:5.2f}'.format(
hip_joint.coordSets['hip_flexion_{}'.format(side)].defaultValue,
hip_joint.coordSets['hip_adduction_{}'.format(side)].defaultValue,
hip_joint.coordSets['hip_rotation_{}'.format(side)].defaultValue,
)
)
#with the HJC updated we can relocate the wrap objects
oldModel = copy.copy(osim.Model(TEMPLATE_OSIM_PATH))
oldHJC = oldModel.joints['hip_{}'.format(side)].locationInParent
for i in self.osimmodel.wrapObjects:
#if the wrap object is on the same side as the femur
if(i[-1] == side):
#we need the joint location of the child joint in term of the main body coords
wrap = oldModel.wrapObjects[i]
point = np.array(wrap.translation)
#find a vector from the hip joint centre to the centre of the wrap object
vec = point - oldHJC
scale = self._get_osimbody_scale_factors('pelvis')
#scale the vector
scaleVec = scale*vec
#add this vector to the updated HJC
updatedJC = self.osimmodel.joints['hip_{}'.format(side)]
updatedJClocation = updatedJC.locationInParent
#newWrapCentre = updatedJClocation + scaleVec
newWrapCentre = updatedJClocation + vec
#set this as the centre of the updated wrap object
wrap2 = self.osimmodel.wrapObjects[i]
wrap2.getDimensions = newWrapCentre
# update mesh l_femur.vtp
if self.verbose:
print('updating visual geometry...')
self._check_geom_path()
if side=='l':
femur_vtp_full_path = os.path.join(
self.config['osim_output_dir'], GEOM_DIR, FEMUR_LEFT_FILENAME
)
femur_vtp_osim_path = os.path.join(GEOM_DIR, FEMUR_LEFT_FILENAME)
elif side=='r':
femur_vtp_full_path = os.path.join(
self.config['osim_output_dir'], GEOM_DIR, FEMUR_RIGHT_FILENAME
)
femur_vtp_osim_path = os.path.join(GEOM_DIR, FEMUR_RIGHT_FILENAME)
self._save_vtp(femur.gf, femur_vtp_full_path, femur.acs.map_local)
osim_femur.setDisplayGeometryFileName([femur_vtp_osim_path,])
def _get_osim_knee_spline_xk(self, side):
"""
Get the SimmSpline x values from the translation functions
of the gati2392 knee
"""
if (side!='l') and (side!='r'):
raise ValueError('Invalid side')
if side=='l':
kj = self.osimmodel.joints['knee_l']
else:
kj = self.osimmodel.joints['knee_r']
t1x = kj.getSimmSplineParams('translation1')[0]
t2x = kj.getSimmSplineParams('translation2')[0]
return t1x, t2x
def _set_osim_knee_spline_xyk(self, x, y, side):
if (side!='l') and (side!='r'):
raise ValueError('Invalid side')
if side=='l':
kj = self.osimmodel.joints['knee_l']
else:
kj = self.osimmodel.joints['knee_r']
kj.updateSimmSplineParams('translation1', x[0], y[0])
kj.updateSimmSplineParams('translation2', x[1], y[1])
def cust_osim_tibiafibula_l(self):
self._cust_osim_tibiafibula('l')
def cust_osim_tibiafibula_r(self):
self._cust_osim_tibiafibula('r')
def _cust_osim_tibiafibula(self, side):
if self.verbose:
print('\nCUSTOMISING TIBIA {}'.format(side.upper()))
if (side!='l') and (side!='r'):
raise ValueError('Invalid side')
tibfib = self.LL.models['tibiafibula-'+side]
femur = self.LL.models['femur-'+side]
osim_tibfib = self.osimmodel.bodies[
OSIM_BODY_NAME_MAP['tibiafibula-'+side]
]
# scale inertial properties
# sf = scaler.calc_tibia_scale_factors(
# self.LL, self._unit_scaling,
# side=None,
# )
sf = self._get_osimbody_scale_factors('tibia_'+side)
scaler.scale_body_mass_inertia(osim_tibfib, sf)
if self.verbose:
print('scale factor: {}'.format(sf))
# recover knee joint simmspline
knee = self.osimmodel.joints['knee_{}'.format(side)]
_remove_multiplier(knee.spatialTransform.get_translation1())
_remove_multiplier(knee.spatialTransform.get_translation2())
_remove_multiplier(knee.spatialTransform.get_translation3())
# update knee_l joint
if self.verbose:
print('updating knee {} joint...'.format(side))
kjc = 0.5*(femur.landmarks['femur-MEC'] + femur.landmarks['femur-LEC'])
tpc = 0.5*(tibfib.landmarks['tibiafibula-MC'] + tibfib.landmarks['tibiafibula-LC'])
_d = -np.sqrt(((kjc - tpc)**2.0).sum())
# Knee trans spline params are relative to the femoral head origin
self.osimmodel.joints['knee_{}'.format(side)].locationInParent = \
np.array([0,0,0], dtype=float)*self._unit_scaling
self.osimmodel.joints['knee_{}'.format(side)].location = \
np.array([0,0,0], dtype=float)*self._unit_scaling
# Knee spline values
# get spline xk from osim files
knee_spline_xk_1, knee_spline_xk_2 = self._get_osim_knee_spline_xk(side)
knee_spline_xk = [knee_spline_xk_1, knee_spline_xk_2]
# evaluate tib coord at xks
if side=='l':
knee_spline_yk_1 = _calc_knee_spline_coords(self.LL.ll_l, knee_spline_xk_1)*self._unit_scaling
knee_spline_yk_2 = _calc_knee_spline_coords(self.LL.ll_l, knee_spline_xk_2)*self._unit_scaling
else:
knee_spline_yk_1 = _calc_knee_spline_coords(self.LL.ll_r, knee_spline_xk_1)*self._unit_scaling
knee_spline_yk_2 = _calc_knee_spline_coords(self.LL.ll_r, knee_spline_xk_2)*self._unit_scaling
knee_spline_yk = [knee_spline_yk_1[:,0], knee_spline_yk_2[:,1]]
# set new spline yks
self._set_osim_knee_spline_xyk(knee_spline_xk, knee_spline_yk, side)
if self.verbose:
print('knee {} splines:'.format(side))
print(knee_spline_xk)
print(knee_spline_yk)
# Set input knee angle
knee_joint = self.osimmodel.joints['knee_{}'.format(side)]
if self._hasInputLL:
if side=='l':
flex, rot, add = self.LL._knee_rot_l
else:
flex, rot, add = self.LL._knee_rot_r
else:
flex, rot, add = calc_knee_angles(femur, tibfib, side)
## hip_flexion_l
knee_joint.coordSets['knee_angle_{}'.format(side)].defaultValue = flex
if self.verbose:
print(
'knee flexion: {:5.2f}'.format(
knee_joint.coordSets['knee_angle_{}'.format(side)].defaultValue
)
)
# update mesh
if self.verbose:
print('updating visual geometry...')
tibgf, fibgf = _splitTibiaFibulaGFs(self.LL.models['tibiafibula-'+side].gf)
self._check_geom_path()
# update mesh l_tibia.vtp
if side=='l':
tibia_filename = TIBIA_LEFT_FILENAME
if side=='r':
tibia_filename = TIBIA_RIGHT_FILENAME
self._check_geom_path()
tib_vtp_full_path = os.path.join(
self.config['osim_output_dir'],
GEOM_DIR,
tibia_filename,
)
tib_vtp_osim_path = os.path.join(
GEOM_DIR,
tibia_filename,
)
self._save_vtp(tibgf, tib_vtp_full_path, tibfib.acs.map_local)
# update mesh l_fibula.vtp
if side=='l':
fibula_filename = FIBULA_LEFT_FILENAME
if side=='r':
fibula_filename = FIBULA_RIGHT_FILENAME
fib_vtp_full_path = os.path.join(
self.config['osim_output_dir'],
GEOM_DIR,
fibula_filename,
)
fib_vtp_osim_path = os.path.join(
GEOM_DIR,
fibula_filename,
)
self._save_vtp(fibgf, fib_vtp_full_path, tibfib.acs.map_local)
osim_tibfib.setDisplayGeometryFileName(
[tib_vtp_osim_path, fib_vtp_osim_path]
)
def cust_osim_ankle_l(self):
# self._cust_osim_ankle('l')
self._cust_osim_foot('l')
def cust_osim_ankle_r(self):
# self._cust_osim_ankle('r')
self._cust_osim_foot('r')
def _cust_osim_foot(self, side):
"""
Customises foot models by applying opensim scaling to the foot segments,
joints, and muscle sites.
Segment topology in the foot is
tibia -> ankle(j) -> talus -> subtalar(j) -> calcaneus -> mtp(j) -> toes
"""
if self.verbose:
print('\nCUSTOMISING FOOT {}'.format(side.upper()))
if (side!='l') and (side!='r'):
raise ValueError('Invalid side')
tibfib = self.LL.models['tibiafibula-'+side]
femur = self.LL.models['femur-'+side]
# scale foot bodies and joints
# sf = scaler.calc_whole_body_scale_factors(
# self.LL, self._unit_scaling,
# )
scaler.scale_body_mass_inertia(
self.osimmodel.bodies['talus_{}'.format(side)],
self._get_osimbody_scale_factors('talus_{}'.format(side))
)
scaler.scale_body_mass_inertia(
self.osimmodel.bodies['calcn_{}'.format(side)],
self._get_osimbody_scale_factors('calcn_{}'.format(side))
)
scaler.scale_body_mass_inertia(
self.osimmodel.bodies['toes_{}'.format(side)],
self._get_osimbody_scale_factors('toes_{}'.format(side))
)
scaler.scale_joint(
self.osimmodel.joints['subtalar_{}'.format(side)],
[
self._get_osimbody_scale_factors('talus_{}'.format(side)),
self._get_osimbody_scale_factors('calcn_{}'.format(side)),
],
['talus_{}'.format(side), 'calcn_{}'.format(side)]
)
scaler.scale_joint(
self.osimmodel.joints['mtp_{}'.format(side)],
[
self._get_osimbody_scale_factors('calcn_{}'.format(side)),
self._get_osimbody_scale_factors('toes_{}'.format(side)),
],
['calcn_{}'.format(side), 'toes_{}'.format(side)]
)
if self.verbose:
print('scale factor: {}'.format(
self._get_osimbody_scale_factors('talus_{}'.format(side))
)
)
# remove multiplier functions from joint translations
ankle = self.osimmodel.joints['ankle_{}'.format(side)]
_remove_multiplier(ankle.spatialTransform.get_translation1())
_remove_multiplier(ankle.spatialTransform.get_translation2())
_remove_multiplier(ankle.spatialTransform.get_translation3())
subtalar = self.osimmodel.joints['subtalar_{}'.format(side)]
_remove_multiplier(subtalar.spatialTransform.get_translation1())
_remove_multiplier(subtalar.spatialTransform.get_translation2())
_remove_multiplier(subtalar.spatialTransform.get_translation3())
mtp = self.osimmodel.joints['mtp_{}'.format(side)]
_remove_multiplier(mtp.spatialTransform.get_translation1())
_remove_multiplier(mtp.spatialTransform.get_translation2())
_remove_multiplier(mtp.spatialTransform.get_translation3())
# set ankle joint parent location in custom tibiafibula
if self.verbose:
print('updating ankle {} joint...'.format(side))
ankle_centre = 0.5*(
tibfib.landmarks['tibiafibula-MM'] + tibfib.landmarks['tibiafibula-LM']
)
self.osimmodel.joints['ankle_{}'.format(side)].locationInParent = \
(tibfib.acs.map_local(ankle_centre[np.newaxis]).squeeze()*self._unit_scaling)+\
self.ankle_offset
if self.verbose:
print(
'location in parent: {}'.format(
self.osimmodel.joints['ankle_{}'.format(side)].locationInParent
)
)
def cust_osim_torso(self):
if self.verbose:
print('\nCUSTOMISING TORSO')
pelvis = self.LL.models['pelvis']
# scale torso inertial
# sf = scaler.calc_whole_body_scale_factors(
# self.LL, self._unit_scaling,
# )
sf = self._get_osimbody_scale_factors('torso')
scaler.scale_body_mass_inertia(
self.osimmodel.bodies['torso'], sf
)
if self.verbose:
print('scale factor: {}'.format(sf))
# remove multiplier functions from joint translations
back = self.osimmodel.joints['back']
_remove_multiplier(back.spatialTransform.get_translation1())
_remove_multiplier(back.spatialTransform.get_translation2())
_remove_multiplier(back.spatialTransform.get_translation3())
# set back joint parent location in custom pelvis
if self.verbose:
print('updating back joint...')
sacrum_top = pelvis.landmarks['pelvis-SacPlat']
self.osimmodel.joints['back'].locationInParent = \
(pelvis.acs.map_local(sacrum_top[np.newaxis]).squeeze()*self._unit_scaling)+\
self.back_offset
if self.verbose:
print(
'location in parent: {}'.format(
self.osimmodel.joints['back'].locationInParent
)
)
def write_cust_osim_model(self):
self.osimmodel.save(
os.path.join(str(self.config['osim_output_dir']), OSIM_FILENAME)
)
def customise(self):
# model_scale_factors = self.scale_model()
# self.scale_all_bodies()
# self.recover_simmsplines()
# model_scale_factors = self._calc_body_scale_factors()
# for debugging: get original muscle optimal fibre lengths and tendon
# slack lengths
init_muscle_ofl = dict([(m.name, m.optimalFiberLength) for m in self.osimmodel.muscles.values()])
init_muscle_tsl = dict([(m.name, m.tendonSlackLength) for m in self.osimmodel.muscles.values()])
# prescale muscles to save their unscaled lengths
self.prescale_muscles()
# for debugging: get pre-scaled muscle optimal fibre lengths and tendon
# slack lengths. Should not have changed
prescale_muscle_ofl = dict([(m.name, m.optimalFiberLength) for m in self.osimmodel.muscles.values()])
prescale_muscle_tsl = dict([(m.name, m.tendonSlackLength) for m in self.osimmodel.muscles.values()])
# for m in self.osimmodel.muscles.values():
# print('{} tsl {} ofl {}'.format(m.name, m.tendonSlackLength, m.optimalFiberLength))
# scale and modify bodies and joints
self.cust_osim_pelvis()
self.cust_osim_femur_l()
self.cust_osim_femur_r()
self.cust_osim_tibiafibula_l()
self.cust_osim_tibiafibula_r()
self.cust_osim_ankle_l()
self.cust_osim_ankle_r()
self.cust_osim_torso()
# normalise the mass of each body against total subject mass (if provided)
self.normalise_mass()
# post-scale muscles to calculate their scaled lengths
self.postscale_muscles()
# for debugging: get scaled muscle optimal fibre lengths and tendon
# slack lengths
postscale_muscle_ofl = dict([(m.name, m.optimalFiberLength) for m in self.osimmodel.muscles.values()])
postscale_muscle_tsl = dict([(m.name, m.tendonSlackLength) for m in self.osimmodel.muscles.values()])
# for debugging: print out OFL and TSL changes through scaling
if self.verbose:
print('\nSCALED MUSCLE FIBRE PROPERTIES')
for mn in sorted(self.osimmodel.muscles.keys()):
print('{} OFL: {:8.6f} -> {:8.6f} -> {:8.6f}'.format(
mn,
init_muscle_ofl[mn],
prescale_muscle_ofl[mn],
postscale_muscle_ofl[mn]
)
)
for mn in sorted(self.osimmodel.muscles.keys()):
print('{} TSL: {:8.6f} -> {:8.6f} -> {:8.6f}'.format(
mn,
init_muscle_tsl[mn],
prescale_muscle_tsl[mn],
postscale_muscle_tsl[mn]
)
)
# scale default markerset and add to model
self.add_markerset()
# write .osim file
if self.config['write_osim_file']:
self.write_cust_osim_model()
def prescale_muscles(self):
"""
Apply prescaling and scaling to muscles before bodies and joints are
customised
"""
state_0 = self.osimmodel._model.initSystem()
scale_factors = scaler.calc_scale_factors_all_bodies(
self.LL, self._unit_scaling, self.config['scale_other_bodies']
)
for m in self.osimmodel.muscles.values():
m.preScale(state_0, *scale_factors)
m.scale(state_0, *scale_factors)
def postscale_muscles(self):
"""
Postscale muscles after bodies and joints are customised to update
optimal fiber lengths and tendon slack lengths
"""
state_1 = self.osimmodel._model.initSystem()
scale_factors = scaler.calc_scale_factors_all_bodies(
self.LL, self._unit_scaling, self.config['scale_other_bodies']
)
for m in self.osimmodel.muscles.values():
m.postScale(state_1, *scale_factors)
def scale_model(self):
model_sfs = scaler.calc_scale_factors_all_bodies(
self.LL, self._unit_scaling, self.config['scale_other_bodies']
)
self.osimmodel.scale(self._osimmodel_init_state, *model_sfs)
return model_sfs
def add_markerset(self):
"""
Add the default 2392 markerset to the customised osim model
with customised marker positions.
Markers in config['adj_marker_pairs'].keys() are placed in their
corresponding input markers position.
Else markers with bony landmark equivalents on the fieldwork model
are assign the model landmarks with offset.
Markers not matching the two above criteria are scaled according their
body's scale factors.
"""
vm = scaler.virtualmarker
g2392_markers = vm._load_virtual_markers()[0]
# maps of opensim names to fw names for markers and bodies
osim2fw_markernames = dict([(it[1], it[0]) for it in vm.marker_name_map.items()])
osim2fw_bodynames = dict([(it[1], it[0]) for it in OSIM_BODY_NAME_MAP.items()])
adj_marker_pairs = self.config['adj_marker_pairs']
if adj_marker_pairs is None:
adj_marker_pairs = {}
adj_model_marker_names = set(list(adj_marker_pairs.keys()))
print('adj model markers:')
for mm, mi in adj_marker_pairs.items():
print('{} : {}'.format(mm,mi))
def _local_coords(bodyname, landmarkname, global_coord=None, apply_offset=True):
"""
Returns the local coordinates of a landmark
"""
if global_coord is None:
if landmarkname[-2:] in ('-l', '-r'):
_landmarkname = landmarkname[:-2]
else:
_landmarkname = landmarkname
global_coord = self.LL.models[bodyname].landmarks[_landmarkname]
local_coords = self.LL.models[bodyname].acs.map_local(
global_coord[np.newaxis,:]
).squeeze()
if apply_offset:
return self._unit_scaling*(local_coords + vm.marker_offsets[landmarkname])
else:
return self._unit_scaling*local_coords
def _scale_marker(marker):
"""
Scales the default opensim marker position by the scaling factor
for its body
"""
body_sf = self._get_osimbody_scale_factors(marker.bodyName)
return marker.offset*body_sf
self.markerset = opensim.MarkerSet()
for osim_marker_name, marker0 in g2392_markers.items():
new_offset = None
# if define, adjust marker position to input marker
if osim_marker_name in adj_model_marker_names:
# move marker to input marker coordinates
fw_body_name = osim2fw_bodynames[marker0.bodyName]
input_marker_name = adj_marker_pairs[osim_marker_name]
input_marker_coords = self.input_markers.get(input_marker_name)
if input_marker_coords is None:
print(
'WARNING: {} not found in input markers. {} will not be adjusted.'.format(
input_marker_name, osim_marker_name
)
)
else:
new_offset = _local_coords(
fw_body_name,
None,
global_coord=input_marker_coords,
apply_offset=False
)
# if new marker position has not been defined by adjustment, then either set
# as bony landmark coord or scale
if new_offset is None:
if osim_marker_name in osim2fw_markernames:
# if maker has fw equivalent move marker to fw landmark position with offset
fw_body_name = osim2fw_bodynames[marker0.bodyName]
fw_landmark_name = osim2fw_markernames[osim_marker_name]
new_offset = _local_coords(
fw_body_name,
fw_landmark_name,
apply_offset=True,
)
else:
# else scale default
new_offset = _scale_marker(marker0)
new_marker = osim.Marker(
bodyname=marker0.bodyName,
offset=new_offset
)
new_marker.name = marker0.name
self.markerset.adoptAndAppend(new_marker._osimMarker)
self.osimmodel._model.replaceMarkerSet(self._osimmodel_init_state, self.markerset)
def normalise_mass(self):
"""
Normalises the mass of each body so that total mass equals the given total mass.
Doesn't do anything if config['subject_mass'] is None.
"""
if self.config.get('subject_mass') is None:
return
if self.verbose:
print('\nNORMALISING BODY MASSES')
# if perserving reference model mass distribution, simply calculate a
# uniform scaling factor for all bodies from original and target
# subject mass
if self.config['preserve_mass_distribution'] is True:
if self.verbose:
print('Preserving mass distribution')
total_mass_0 = np.sum(self._original_segment_masses.values())
target_mass = float(self.config['subject_mass'])
mass_scaling = target_mass/total_mass_0
for bname in self._original_segment_masses:
b = self.osimmodel.bodies[bname]
b.mass = self._original_segment_masses[bname]*mass_scaling
if self.verbose:
print('{}: {:5.2f} kg'.format(b.name, b.mass))
else:
# calculate scaling factors for each body
target_mass = float(self.config['subject_mass'])
total_mass_0 = np.sum([float(b.mass) for b in self.osimmodel.bodies.values()])
mass_scaling = target_mass/total_mass_0
# scale mass for each body
for b in self.osimmodel.bodies.values():
b.mass = b.mass*mass_scaling
if self.verbose:
print('{}: {:5.2f} kg'.format(b.name, b.mass))
total_mass_1 = np.sum([float(b.mass) for b in self.osimmodel.bodies.values()])
if self.verbose:
print('Target Mass: {} kg'.format(target_mass))
print('Unnormalised Mass: {} kg'.format(total_mass_0))
print('Normalised Mass: {} kg'.format(total_mass_1))
def _get_foot_muscles(model, side):
"""
Return osim muscles instances of muscles in the foot
"""
foot_segs = set([
'talus_{}'.format(side),
'calcn_{}'.format(side),
'toes_{}'.format(side),
])
foot_muscles = []
for mus in model.muscles.values():
# check each path point to see if they are on a foot segment
for pp in mus.getAllPathPoints():
if pp.body.name in foot_segs:
foot_muscles.append(mus)
break
return foot_muscles
def _remove_multiplier(owner):
"""
Replace a components MultiplierFunction with the original function
found in the MultiplierFunction instance.
"""
newfunc = owner.getFunction()
if newfunc.getConcreteClassName()=='MultiplierFunction':
oldfunc = opensim.MultiplierFunction_safeDownCast(newfunc).getFunction()
owner.setFunction(oldfunc.clone())
| [
"[email protected]"
]
| |
a933800e64877f2791fd57bcc107e47a615570b4 | 8fb29868ab643ad6b4917bbcd0ebd7f14c269dd3 | /learning_logs/models.py | 07374f15a1dd80c0da30da5f613c24be01bddedc | []
| no_license | radybsb/Learning_log | 44707e62cc5819b1f467a68df187c2baaf41eed6 | 035d6669ec5080a032767e44e8b900b84fad5c9c | refs/heads/master | 2023-06-14T20:28:28.211527 | 2021-07-10T22:03:35 | 2021-07-10T22:03:35 | 384,798,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Topic(models.Model):
"""A topic the user is learning about"""
text = models.CharField(max_length=200)
date_added = models.DateTimeField(auto_now_add=True)
owner = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
"""Return a string representation of the model."""
return self.text
class Entry(models.Model):
"""Something specific learned about a topic"""
topic = models.ForeignKey(Topic, on_delete=models.CASCADE)
text = models.TextField()
date_added = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name_plural = 'entries'
def __str__(self):
"""Return a string representation of the model."""
return self.text[:50] + "..."
| [
"[email protected]"
]
| |
f5f5c3aae737b64e06ecd0a7e4bf5200866fa6bb | 294ab0a0cc02b12b5a5a0e4e0d0ed0ee3d812d5f | /pystagram/settings.py | 1a19f94c0ae6f81e979f39acbceef9176d47c531 | []
| no_license | supegate/imageupload | 83ae83c43ac723a94ba8785ae029e92026a57607 | dcb534a56b5aa9be553497f989fae66fcc8735d2 | refs/heads/master | 2021-05-05T17:49:30.920635 | 2018-01-15T12:35:05 | 2018-01-15T12:35:05 | 117,515,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,458 | py | """
Django settings for pystagram project.
Generated by 'django-admin startproject' using Django 1.11.7.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ea+80muergl7_k8$!v6dxnd-_j(k2ll+*r^2!pb#kacaa8h2v1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'photos',
'profiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pystagram.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pystagram.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/assets/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
('byebye', os.path.join(BASE_DIR, 'static2'),),
)
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
MEDIA_URL = '/upload_files/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'uploads')
LOGIN_REDIRECT_URL = '/photos/upload/'
| [
"[email protected]"
]
| |
d68598d3f5bbfffbffbed80e78e7dd09c5066061 | bba645705f6fdf0605f3da2b426385c93e40da7d | /unit38/try_except_raise_in_except.py | e8886ee6d48478a079a078fef249e716a9cb6bff | []
| no_license | chichchic/coding-studio | f053b7ffafe5b9bc331facb07d4d74a025d32931 | f2dcd24de7761f7dcbf6a84517622d8338fe8017 | refs/heads/main | 2023-08-07T07:24:25.658640 | 2021-08-29T17:22:09 | 2021-08-29T17:22:09 | 391,098,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | def three_multiple():
try:
x = int(input('3의 배수를 입력하세요: '))
if x % 3 != 0: # x가 3의 배수가 아니면
raise Exception('3의 배수가 아닙니다.') # 예외를 발생시킴
print(x)
except Exception as e: # 함수 안에서 예외를 처리함
print('three_multiple 함수에서 예외가 발생했습니다.', e)
raise # raise로 현재 예외를 다시 발생시켜서 상위 코드 블록으로 넘김
try:
three_multiple()
except Exception as e: # 하위 코드 블록에서 예외가 발생해도 실행됨
print('스크립트 파일에서 예외가 발생했습니다.', e)
| [
"[email protected]"
]
| |
7612152051700db815866e680db4a609cf9106b9 | f9a72e61c415fdcf5b01389749f496b831a735ae | /quickness of pandas.py | ba6c0355c624c379dc5b4e23e1eccda6c0525c93 | []
| no_license | alexanu/PythonLearn | 1b94e1af25496adb8420692c79b870b34beafc79 | 06362c3c6c6bd2bdb12b911ae6efb5b1b973b7e6 | refs/heads/master | 2020-03-20T16:21:26.848283 | 2018-09-26T15:38:18 | 2018-09-26T15:38:18 | 137,537,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,557 | py | '''
Operating on NumPy arrays is a lot more efficient than operating on Python objects.
There are two big factors that contribute to this:
1) It takes longer to add 1 to a number in pure Python than in NumPy:
- Python doesn’t know until runtime that the number is an integer:
- It must check for each number what its data type is and hence how to compute “x + 1.”
2) The Python data structure takes up a lot more space compared to the NumPy array:
- it must also carry around metadata that specifies what data type each list element is. =>
... you fill up the high levels of cache faster in Python than in NumPy
'''
import time, numpy as np, matplotlib.pyplot as plt
def time_numpy(n): # time required to increment all elements of a list of numbers for a NumPy array
a = np.arange(n)
start = time.time()
bigger = a + 1
stop = time.time()
return stop - start
def time_python(n): # time required to increment all elements of a list of numbers for a Python list
l = range(n)
start = time.time()
bigger = [x+1 for x in l]
stop = time.time()
return stop - start
n_trials = 10
ns = range(20, 500)
ratios = []
for n in ns:
python_total = sum([time_python(n) for _ in range(n_trials)])
numpy_total = sum([time_numpy(n) for _ in range(n_trials)])
ratios.append(python_total / numpy_total)
plt.plot(ns, ratios)
plt.xlabel("Length of List / Array")
plt.ylabel("Python / Numpy Ratio")
plt.title("Relative Speed of Numpy vs Pure Python")
plt.show()
| [
"[email protected]"
]
| |
c11ff8783c5e1b31a53fa32f228e93a78addae87 | 8c16d90a63f561c42042531a137827bd3e16655c | /setup.py | 44502eacb5881c78f94557e8c38d6ccbc8300521 | [
"BSD-3-Clause"
]
| permissive | alisterburt/mrchead | 252735382fb0ccbc8757a0262dad4a4968591280 | 8bfbfcb5e572a3e5c48726b45558692274b75113 | refs/heads/main | 2023-09-02T11:18:35.029882 | 2021-10-19T13:54:06 | 2021-10-19T13:54:06 | 418,932,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | import setuptools
setuptools.setup(use_scm_version={"write_to": "mrchead/_version.py"})
| [
"[email protected]"
]
| |
df69ac51d7eb7d05336fc51882b5aeac4bb6f0cf | f0c402d3858f0643561886797578b1e64655b1b3 | /utils/regression/classes/exec_controller.py | 8b4abf61309907009c487f59fd31956639b4b509 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | Leo-Wang-JL/force-riscv | 39ad2a72abd814df4b63879ce9825b6b06a9391a | deee6acaaee092eb90ac2538de122303334e5be3 | refs/heads/master | 2023-01-28T00:06:58.135651 | 2020-11-18T02:54:10 | 2020-11-18T02:54:10 | 271,873,013 | 0 | 0 | NOASSERTION | 2020-06-28T00:51:26 | 2020-06-12T19:15:26 | C++ | UTF-8 | Python | false | false | 4,451 | py | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# file: execute_controller.py
# comment: executes the processes necessary to
from common.path_utils import PathUtils
from common.msg_utils import Msg
from common.sys_utils import SysUtils
from common.errors import *
from classes.controller import Controller
from classes.control_item import ControlItem, CtrlItmKeys
class ExecuteController( Controller ):
def __init__( self, aAppsInfo ):
super().__init__(aAppsInfo )
self.executors = []
self.frun = None
self.task_file = None
self.task_name = None
self.task_ndx = None
def load(self, arg_ctrl_item ):
super().load( arg_ctrl_item )
self.task_file, self.task_name, self.task_ndx = self.initialize_task()
self.load_executors()
def process( self ):
# super().process()
try:
self.process_executors()
except Exception as arg_ex:
self.report_error( arg_ex )
raise
finally:
return True
def set_frun( self, arg_frun ):
self.frun = arg_frun
def load_executors( self ):
try:
# for app_cfg in self.mAppsInfo.mSequenceApps:
for seq_app_cfg in self.mAppsInfo.mSequenceApps:
my_executor = seq_app_cfg.createExecutor()
Msg.user( "ExecuteController::load_executors( 2 )" )
my_executor.load( self.ctrl_item )
Msg.user( "ExecuteController::load_executors( 1 )" )
if not my_executor.skip():
Msg.user( "ExecuteController::load_executors( 3 )" )
my_executor.set_frun( self.frun )
# Msg.user( "ExecuteController::load_executors( 4 )" )
my_executor.set_task_file( self.task_file )
# Msg.user( "ExecuteController::load_executors( 5 )" )
my_executor.set_task_name( self.task_name )
# Msg.user( "ExecuteController::load_executors( 5.1 )" )
self.executors.append(my_executor)
except Exception as arg_ex:
Msg.user( "ExecuteController::load_executors( 14 )" )
Msg.error_trace()
self.report_error( arg_ex )
finally:
Msg.user( "ExecuteController::load_executors( 13 )" )
return True
def process_executors( self ):
my_ret_val = False
try:
for my_executor in self.executors:
Msg.user( "ExecuteController::process_executors( my_executor: %s )" % str( type( my_executor )), "EXE-CTRL" )
my_executor.pre()
if not my_executor.execute():
Msg.user("Executor returning False", "EXE-CTRL")
break
my_executor.post()
my_ret_val = True
except Exception as arg_ex:
Msg.error_trace()
self.report_error( arg_ex )
finally:
return my_ret_val
def initialize_task( self ):
#Msg.user( "ExecuteController::initialize_task(1)" )
my_task_file = PathUtils.append_path( self.ctrl_item.fctrl_dir, self.ctrl_item.fctrl_name )
#Msg.user( "ExecuteController::initialize_task(2)" )
my_tmp, my_task_ndx = PathUtils.split_path( self.ctrl_item.fctrl_dir )
#Msg.user( "ExecuteController::initialize_task(3)" )
my_task_name = self.ctrl_item.fctrl_name.replace( ".py", "")
#Msg.user( "ExecuteController::initialize_task(5)" )
Msg.user( "Task File: %s, Task Name: %s, Task Index: %s" % ( my_task_file, my_task_name, my_task_ndx ) )
return ( my_task_file, my_task_name, my_task_ndx )
# arg_ex is an exception class
def report_error( self, arg_ex ):
Msg.err( "%s: %s" %( str( type( self ), str( arg_ex ) )))
raise
| [
"[email protected]"
]
| |
214a6ddf5af4631b2a22e9d394592c9d2a30ccba | 2bca59a89b04e65e3b464e3f3ecd914e005bbb04 | /src/lattice.py | 4599ca183c0d454eb05ea83f3b06703bb46efcbd | []
| no_license | johannesnauta/sllvm-fragmented | 856128a40b3911601ad6f726497146c615a05713 | 3348bbbef4924ba087d269dddcc21c44d71b2058 | refs/heads/main | 2023-04-14T20:37:00.320107 | 2022-05-26T09:31:39 | 2022-05-26T09:31:39 | 369,608,459 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,805 | py | """ Holds modules for generating resource lattices using fractional Brownian motion
Currently, the main function that generates an L x L landscape uses the
two-dimensional spectral synthesis from (Saupe, 1988, Algorithms for random fractals).
"""
# Import necessary libraries
import numpy as np
import numba
from numba.core import types
from numba.typed import Dict
from scipy.ndimage import label
# Define the type used in the dictionary
int_ = types.int64
int_array = types.int64[:]
@numba.jit(nopython=True, cache=True)
def nb_set_seed(seed):
np.random.seed(seed)
@numba.jit(nopython=True, cache=True)
def nb_SpectralSynthesis2D(L, H, sig):
A = np.zeros((L,L), dtype=np.cdouble)
for i in range(L//2):
for j in range(L//2):
phase = 2*np.pi*np.random.random()
if i!=0 or j!=0:
r = (i*i+j*j)**(-(H+1)/2) * np.random.normal(0, sig)
else:
r = 0
A[i,j] = r*np.cos(phase) + 1j*r*np.sin(phase)
i0 = 0 if i == 0 else L-i
j0 = 0 if j == 0 else L-j
A[i0,j0] = r*np.cos(phase) + 1j*r*np.sin(phase)
# @TODO: Why does one need to loop 'twice'
# (but note different indices are assigned)
# See also https://link.springer.com/content/pdf/10.1023/A:1008193015770.pdf
for i in range(1, L//2):
for j in range(1, L//2):
phase = 2*np.pi*np.random.random()
r = (i*i+j*j)**(-(H+1)/2) * np.random.normal(sig)
A[i,L-j] = r*np.cos(phase) + 1j*r*np.sin(phase)
A[L-i,j] = r*np.cos(phase) - 1j*r*np.sin(phase)
return A
@numba.jit(nopython=True, cache=True)
def nb_applyPBC(lattice, num_labels):
""" Apply periodic boundary conditions on a labelled lattice """
L, _ = lattice.shape
for i in range(L):
# Compare left and right
if lattice[i,0] and lattice[i,-1]:
if lattice[i,0] != lattice[i,-1]:
lattice = np.where(lattice==lattice[i,-1], lattice[i,0], lattice)
num_labels -= 1
# Compare up and down
if lattice[0,i] and lattice[-1,i]:
if lattice[0,i] != lattice[-1,i]:
lattice = np.where(lattice==lattice[-1,i], lattice[0,i], lattice)
num_labels -= 1
return lattice, num_labels
@numba.jit(nopython=True, cache=True)
def nb_construct_label_list(labelled_lattice, labels):
# Go through each label, and gather their indices and put them in a list of lists
label_dict = Dict.empty(
key_type=int_,
value_type=int_array,
)
for lab in labels:
indices = np.flatnonzero(labelled_lattice==lab)
label_dict[lab] = indices
return label_dict
class Lattice(object):
def __init__(self, seed) -> None:
nb_set_seed(seed)
def binary_lattice(self, lattice, rho):
""" Generate binary lattice from the continuous fraction Brownian motion lattice
ρ ∈ [0,1] determines the occupancy, where ρN is the (integer) number of sites
available to be occupied by resources
"""
if rho == 1:
return np.ones(lattice.shape, dtype=np.bool_)
shifted_lattice = lattice + abs(np.min(lattice)) # Shift
sorted_lattice = np.sort(shifted_lattice.flatten()) # Sort
# Determine cutoff point
cutoff = sorted_lattice[int((1-rho)*lattice.shape[0]*lattice.shape[1])]
# Generate binary lattice
_lattice = shifted_lattice / cutoff # Normalize lattice
_lattice[_lattice >= 1] = 1 # All above cutoff to 1
_lattice[_lattice < 1] = 0 # All below to 0
return np.asarray(_lattice, dtype=np.bool_)
def label(self, lattice):
""" Compute the labelled lattice using periodic boundary conditions """
labelled_lattice, num_labels = nb_applyPBC(*label(lattice))
labels = np.unique(labelled_lattice)
# Find the label for which lattice entries are empty
for lab in labels:
if not np.any(lattice[np.where(labelled_lattice==lab)]):
break
# Compute mask for those indices
mask = np.ones(len(labels), bool)
mask[np.argwhere(labels==lab)] = False
# Apply the mask
labels = labels[mask]
label_list = nb_construct_label_list(labelled_lattice, labels)
return label_list, num_labels
def SpectralSynthesis2D(self, L, H, sig=1, bounds=[0,1]):
""" Generate fractional Brownian in two dimensions
@TODO: Fully understand the algorithm below, and clarify with comments
"""
A = nb_SpectralSynthesis2D(L, H, sig)
X = np.real(np.fft.fft2(A))
return X
| [
"[email protected]"
]
| |
bf984d5039627f947dc2053c32f7bff7512e96b7 | 4a8893d1b23c226bf2d493c11d787fbf774488ef | /verifier.py | 06e8b4182e0abe39ec088b7c639ec86ecb3e1fda | []
| no_license | multisignature/flashflip-verifier | 6e4f1c6858e8c6a2c5504d3c378e77e69d3903d0 | c132a93887b45aa62fe8f53e0b9d0836e3fa7073 | refs/heads/main | 2022-12-27T14:04:32.603950 | 2020-10-12T08:31:37 | 2020-10-12T08:31:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,790 | py | import hashlib
import hmac
import base64
import random
import sys
import math
def provabilitySeed(dailySecret,clientSecret,nonce):
clientSecretAndNounce = clientSecret+"-"+str(nonce)
hmacOutput = hmac.new(dailySecret.encode('utf8'), clientSecretAndNounce.encode('utf8'), hashlib.sha512).hexdigest()
return(hmacOutput)
def rpsSelectionBot(clientSeed,nonce,serverSeed):
alternatives = ["rock","paper","scissors"]
combinedSeed = provabilitySeed(serverSeed,clientSeed,nonce)
random.seed(combinedSeed)
random.shuffle(alternatives)
result = {"result":alternatives[0],"nonce":nonce,"clientSeed":clientSeed}
return result
def flipCoin(clientSeed,nonce,serverSeed):
coinSides = ["Heads","Tails"]
combinedSeed = provabilitySeed(serverSeed,clientSeed,nonce)
random.seed(combinedSeed)
random.shuffle(coinSides)
result = {"result":coinSides[0],"nonce":nonce,"clientSeed":clientSeed}
return result
def playBank(clientSeed,nonce,serverSeed):
bankList = ["Lose","Lose"] + (["Win"] * 100)
combinedSeed = provabilitySeed(serverSeed,clientSeed,nonce)
random.seed(combinedSeed)
random.shuffle(bankList)
result = {"result":bankList[0],"nonce":nonce,"clientSeed":clientSeed}
return result
def playDice(clientSeed,nonce,serverSeed,diceMax):
diceSides = list(range(1, (diceMax+1)))
combinedSeed = provabilitySeed(serverSeed,clientSeed,nonce)
random.seed(combinedSeed)
random.shuffle(diceSides)
result = {"result":diceSides[0],"nonce":nonce,"clientSeed":clientSeed}
return result
counterHeads = 0
counterTails = 0
def playCrash(clientSeed,nonce,serverSeed):
combinedSeed = provabilitySeed(serverSeed,clientSeed,nonce)
random.seed(combinedSeed)
i = random.random()
i = 0.99/(1-i)
i = max(i,1.0)
i = math.floor(i*100)/100
result = {"result":i,"nonce":nonce,"clientSeed":clientSeed}
return result
if sys.argv[1] == "bank":
for i in range(int(sys.argv[3]), int(sys.argv[4])):
print(playBank(sys.argv[2],i,sys.argv[5]))
elif sys.argv[1] == "flip":
for i in range(int(sys.argv[3]), int(sys.argv[4])):
result = flipCoin(sys.argv[2],i,sys.argv[5])
print(result)
if result['result'] == "Heads":
counterHeads = counterHeads +1
else:
counterTails = counterTails +1
print(str(counterHeads),str(counterTails))
elif sys.argv[1] == "rps":
for i in range(int(sys.argv[3]), int(sys.argv[4])):
print(rpsSelectionBot(sys.argv[2],i,sys.argv[5]))
elif sys.argv[1] == "dice":
for i in range(int(sys.argv[3]), int(sys.argv[4])):
result = playDice(sys.argv[2],i,sys.argv[5],int(sys.argv[6]))
print(playDice(sys.argv[2],i,sys.argv[5],int(sys.argv[6])))
elif sys.argv[1] == "crash":
for i in range(int(sys.argv[3]), int(sys.argv[4])):
result = playCrash(sys.argv[2],i,sys.argv[5])
print(result)
else:
print("Invalid game")
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.