hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13b4d3a68baccbd7f4bd02979209b96592b03de0 | 2,076 | py | Python | data_processing/dataset_processor.py | nazariinyzhnyk/fashion-img-segmentation | 99bc08fcda1fbc6453442b0d96a84bf40e7d0c4a | [
"MIT"
] | 1 | 2020-05-30T02:33:45.000Z | 2020-05-30T02:33:45.000Z | data_processing/dataset_processor.py | nazariinyzhnyk/fashion-img-segmentation | 99bc08fcda1fbc6453442b0d96a84bf40e7d0c4a | [
"MIT"
] | null | null | null | data_processing/dataset_processor.py | nazariinyzhnyk/fashion-img-segmentation | 99bc08fcda1fbc6453442b0d96a84bf40e7d0c4a | [
"MIT"
] | null | null | null | from mrcnn import utils
import os
import cv2
import numpy as np
from data_processing import resize_image, get_label_names
class DatasetProcessor(utils.Dataset):
def __init__(self, df):
super().__init__(self)
self.label_names = get_label_names(os.path.join('..', 'data', 'label_descriptions.json'))
# Add classes
for i, name in enumerate(self.label_names):
self.add_class("fashion", i + 1, name)
# Add images
for i, row in df.iterrows():
self.add_image("fashion",
image_id=row.name,
path=os.path.join('..', 'data', 'images', row.name),
labels=row['CategoryId'],
annotations=row['EncodedPixels'],
height=row['Height'], width=row['Width'])
self.img_size = 512
def image_reference(self, image_id):
info = self.image_info[image_id]
return info['path'], [self.label_names[int(x)] for x in info['labels']]
def load_image(self, image_id):
img_path = self.image_info[image_id]['path']
return resize_image(img_path, self.img_size)
def load_mask(self, image_id):
info = self.image_info[image_id]
mask = np.zeros((self.img_size, self.img_size, len(info['annotations'])), dtype=np.uint8)
labels = []
for m, (annotation, label) in enumerate(zip(info['annotations'], info['labels'])):
sub_mask = np.full(info['height'] * info['width'], 0, dtype=np.uint8)
annotation = [int(x) for x in annotation.split(' ')]
for i, start_pixel in enumerate(annotation[::2]):
sub_mask[start_pixel: start_pixel + annotation[2 * i + 1]] = 1
sub_mask = sub_mask.reshape((info['height'], info['width']), order='F')
sub_mask = cv2.resize(sub_mask, (self.img_size, self.img_size), interpolation=cv2.INTER_NEAREST)
mask[:, :, m] = sub_mask
labels.append(int(label) + 1)
return mask, np.array(labels)
| 37.745455 | 108 | 0.585742 |
062874b1bb9104c34d08db9ad838ce9d1d3cbe3f | 6,939 | py | Python | ros/src/tl_detector/light_classification/tl_classifier.py | iammsg/Capstone-Project | 7191dea6168dc39b95c636d59b3a5d6d4ccd98c1 | [
"MIT"
] | null | null | null | ros/src/tl_detector/light_classification/tl_classifier.py | iammsg/Capstone-Project | 7191dea6168dc39b95c636d59b3a5d6d4ccd98c1 | [
"MIT"
] | 9 | 2020-01-28T22:44:12.000Z | 2022-03-11T23:47:37.000Z | ros/src/tl_detector/light_classification/tl_classifier.py | iammsg/Capstone-Project | 7191dea6168dc39b95c636d59b3a5d6d4ccd98c1 | [
"MIT"
] | null | null | null | import os
import cv2
import numpy as np
import rospy
import tensorflow as tf
from styx_msgs.msg import TrafficLight
import time
import json
from datetime import datetime
import os
dir = os.path.dirname(__file__)
class TLClassifier(object):
def now(self):
return str(datetime.now().strftime('%I:%M:%S.%f'))
def log(self, msg):
filename = os.path.join(dir, '../../../../master.log')
f = open(filename, 'a+')
f.write('{} [tl_classifier]: {}\n'.format(self.now(), msg))
f.close()
#def __init__(self):
def __init__(self, model_file):
# TODO load classifier
self.current_light = TrafficLight.UNKNOWN
cwd = os.path.dirname(os.path.realpath(__file__))
model_path = os.path.join(cwd, "train_model/{}".format(model_file))
rospy.logwarn("model_path={}".format(model_path))
# load frozen tensorflow model
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.category_index = {1: {'id': 1, 'name': 'Green'}, 2: {'id': 2, 'name': 'Red'},
3: {'id': 3, 'name': 'Yellow'}, 4: {'id': 4, 'name': 'off'}}
# create tensorflow session for detection
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# end
self.sess = tf.Session(graph=self.detection_graph, config=config)
# Definite input and output Tensors for detection_graph
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
"""
Sample response (some fields are added later):
{
"lights": {
"green": {"count": 2, "sum": 1.98, "average": 0.99202722311019897},
"red": {"count": 0, "sum": 0.0, "average": 0.0},
"final": {"color": "GREEN", "average": 0.99, "state": 2}
},
"boxes": [
{"xmin": 312, "score": 0.99, "ymin": 122, "ymax": 287, "xmax": 393},
{"xmin": 652, "score": 0.99, "ymin": 140, "ymax": 295, "xmax": 731}
],
"filename": "/home/james/github/udacity/jmsktm/T2-CarND-Capstone/images/img-01-49-57-974795.jpg",
"waypoints": {"current": 747, "traffic_light": 753},
"time": {"dashed": "01-49-57-974795", "colon": "01:49:57.974795"}
}
"""
def get_classification(self, image):
current_time = datetime.now()
time_colon = str(current_time.strftime('%I:%M:%S.%f'))
time_dashed = str(current_time.strftime('%I-%M-%S-%f'))
result = { "time": { "colon": time_colon, "dashed": time_dashed } }
filename = os.path.join(dir, '../../../../images/img-{}.jpg'.format(result["time"]["dashed"]))
result["filename"] = filename
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# return TrafficLight.RED
# TODO implement light color prediction
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
(im_width, im_height, _) = image_rgb.shape
image_np = np.expand_dims(image_rgb, axis=0)
# Actual detection.
with self.detection_graph.as_default():
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores,
self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_np})
boxes = np.squeeze(boxes)
scores = np.squeeze(scores)
classes = np.squeeze(classes).astype(np.int32)
min_score_thresh = .70
red_count = 0
red_sum = 0.0
red_average = 0.0
green_count = 0
green_sum = 0.0
green_average = 0.0
total_count = 0
average = 1.0
count = 0
height, width, channels = image.shape
arr = []
for i in range(boxes.shape[0]):
if scores is None or scores[i] > min_score_thresh:
total_count += 1
class_name = self.category_index[classes[i]]['name']
# Traffic light thing
if class_name == 'Red' or class_name == 'Yellow':
red_count += 1
red_sum += scores[i]
elif class_name == 'Green':
green_count += 1
green_sum += scores[i]
box = boxes[i]
ymin, xmin, ymax, xmax = box
xmin1 = int(xmin * width)
ymin1 = int(ymin * height)
xmax1 = int(xmax * width)
ymax1 = int(ymax * height)
score = round(scores[i], 2)
arr.append({ "xmin": xmin1, "ymin": ymin1, "xmax": xmax1, "ymax": ymax1, "score": score })
result["boxes"] = arr
if red_count > 0:
red_average = red_sum / red_count
if green_count > 0:
green_average = green_sum / green_count
light_color = 'UNKNOWN'
self.current_light = TrafficLight.UNKNOWN
if red_count > 0 and red_average > min_score_thresh and red_average > green_average:
light_color = 'RED'
red_sum = round(red_sum, 2)
average = round(red_average, 2)
self.current_light = TrafficLight.RED
elif green_count > 0 and green_average > min_score_thresh and green_average > red_average:
light_color = 'GREEN'
green_sum = round(green_sum, 2)
average = round(green_average, 2)
self.current_light = TrafficLight.GREEN
result["lights"] = {
"red": { "count": red_count, "sum": red_sum, "average": red_average },
"green": { "count": green_count, "sum": green_sum, "average": green_average },
"final": { "color": light_color, "average": average, "state": self.current_light }
}
return result
| 37.711957 | 106 | 0.576452 |
1b490ba9b28b7ae6444832cc5cd9240f19101f87 | 851 | py | Python | coblog/urls.py | canokay/coblog-backend | 51854ed2d69f8484877bc9dcc95c19e3aa7d4107 | [
"MIT"
] | 1 | 2020-12-19T15:55:47.000Z | 2020-12-19T15:55:47.000Z | coblog/urls.py | canokay/coblog-backend | 51854ed2d69f8484877bc9dcc95c19e3aa7d4107 | [
"MIT"
] | null | null | null | coblog/urls.py | canokay/coblog-backend | 51854ed2d69f8484877bc9dcc95c19e3aa7d4107 | [
"MIT"
] | null | null | null | """coblog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('blog.urls', namespace='blog')),
]
| 32.730769 | 77 | 0.703878 |
ba45ce35c091ae4088db67a455c7a72e1ca4862d | 1,027 | py | Python | src/rps/heredity_problems/mendels_first_law.py | Vikdemen/RosalindPS | 05cb3c2162e569bd92a99b9be127999cae1babf7 | [
"MIT"
] | 1 | 2020-03-01T11:57:56.000Z | 2020-03-01T11:57:56.000Z | src/rps/heredity_problems/mendels_first_law.py | Vikdemen/RosalindPS | 05cb3c2162e569bd92a99b9be127999cae1babf7 | [
"MIT"
] | null | null | null | src/rps/heredity_problems/mendels_first_law.py | Vikdemen/RosalindPS | 05cb3c2162e569bd92a99b9be127999cae1babf7 | [
"MIT"
] | 1 | 2020-03-01T18:39:44.000Z | 2020-03-01T18:39:44.000Z | """
Given: Three positive integers k, m, and n, representing a population containing k+m+n organisms: k individuals
are homozygous dominant for a factor, m are heterozygous, and n are homozygous recessive.
Return: The probability that two randomly selected mating organisms will produce an individual possessing a dominant
allele (and thus displaying the dominant phenotype). Assume that any two organisms can mate.
"""
from __future__ import annotations
from typing import List
from rps.heredity_problems.mendel import calculate_dominant_probabilities
def probability_of_dominants(lines: List[str]) -> str:
"""
:param lines: A single line with 3 space-separated numbers representing number of organism with dominant homozygous,
heterozygous and recessive homozygous
:return: The probability of offspring with dominant allele from 2 random parents
"""
line, = lines
k, m, n = [int(num) for num in line.split()]
dominant = calculate_dominant_probabilities(k, m, n)
return f"{dominant:.4f}"
| 46.681818 | 120 | 0.770204 |
a71497fc2e3c9991d0a18c534d45853aa45d8cd9 | 2,087 | py | Python | src/account/views.py | BusyJay/sokoban | a7fac324e9ee725c7954016d368d799ca2a7c47c | [
"MIT"
] | 1 | 2018-07-08T06:12:02.000Z | 2018-07-08T06:12:02.000Z | src/account/views.py | BusyJay/sokoban | a7fac324e9ee725c7954016d368d799ca2a7c47c | [
"MIT"
] | null | null | null | src/account/views.py | BusyJay/sokoban | a7fac324e9ee725c7954016d368d799ca2a7c47c | [
"MIT"
] | null | null | null | from django.conf import settings
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import login as auth_login, logout as auth_logout
from django.http import HttpResponse
from django.shortcuts import render, resolve_url
from django.utils.http import is_safe_url
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from sokoban.utils import json_api
@sensitive_post_parameters()
@csrf_protect
@never_cache
@json_api
def login(request):
redirect_to = request.REQUEST.get('next', '')
if request.method == 'GET':
request.session.set_test_cookie()
return render(request, 'accounts/login.html', dictionary=dict(
form=AuthenticationForm(),
next=redirect_to,
))
elif request.method == 'POST':
login_form = AuthenticationForm(data=request.POST)
if not login_form.is_valid():
return {
'errors': login_form.errors.as_ul(),
}, 400
if not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
auth_login(request, login_form.get_user())
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
return {
'username': request.user.username,
'next': redirect_to,
}
else:
return HttpResponse(status=405)
@json_api
def logout(request):
next_page = '/'
if 'next' in request.REQUEST:
next_page = request.REQUEST['next']
# Security check -- don't allow redirection to a different host.
if not is_safe_url(url=next_page, host=request.get_host()):
next_page = request.path
if request.method == 'GET':
return render(request, 'accounts/logged_out.html', dictionary=dict(
next=next_page,
))
else:
auth_logout(request)
return {
'success': 1,
}
| 32.107692 | 75 | 0.667465 |
885ffac482ce6a6c47e37ca2153588eabd2be5cd | 18,819 | py | Python | log_complete/model_717.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_complete/model_717.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | log_complete/model_717.py | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | 54a5ef7e868be34289836bbbb024a2963c0c9c86 | [
"MIT"
] | null | null | null | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('C6A', ['C8pro'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 179250.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C6A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C6A_obs', C6A())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
| 91.354369 | 710 | 0.806525 |
0c376b04830cf7cc698bd6837d06666e9ddce082 | 5,424 | py | Python | rsl/config.py | torchingloom/invenio-instance | b2cd4112e3960fa90fedf33bbedb2367f2ec47ac | [
"MIT"
] | null | null | null | rsl/config.py | torchingloom/invenio-instance | b2cd4112e3960fa90fedf33bbedb2367f2ec47ac | [
"MIT"
] | null | null | null | rsl/config.py | torchingloom/invenio-instance | b2cd4112e3960fa90fedf33bbedb2367f2ec47ac | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 RSL.
#
# rsl is free software; you can redistribute it and/or modify it under the
# terms of the MIT License; see LICENSE file for more details.
"""Default configuration for rsl.
You overwrite and set instance-specific configuration by either:
- Configuration file: ``<virtualenv prefix>/var/instance/invenio.cfg``
- Environment variables: ``APP_<variable name>``
"""
import os
from pathlib import Path
from datetime import timedelta
from invenio_app.config import APP_DEFAULT_SECURE_HEADERS
from invenio_previewer.config import PREVIEWER_PREFERENCE as BASE_PREFERENCE
def _(x):
"""Identity function used to trigger string extraction."""
return x
# Rate limiting
# =============
#: Storage for ratelimiter.
RATELIMIT_STORAGE_URL = 'redis://localhost:6379/3'
# I18N
# ====
#: Default language
BABEL_DEFAULT_LANGUAGE = 'ru'
#: Default time zone
BABEL_DEFAULT_TIMEZONE = 'Europe/Moscow'
#: Other supported languages (do not include the default language in list).
I18N_LANGUAGES = [
('ru', _('Русский'))
]
# Base templates
# ==============
#: Global base template.
BASE_TEMPLATE = 'invenio_theme/page.html'
#: Cover page base template (used for e.g. login/sign-up).
COVER_TEMPLATE = 'invenio_theme/page_cover.html'
#: Footer base template.
FOOTER_TEMPLATE = 'invenio_theme/footer.html'
#: Header base template.
HEADER_TEMPLATE = 'invenio_theme/header.html'
#: Settings base template.
SETTINGS_TEMPLATE = 'invenio_theme/page_settings.html'
# Theme configuration
# ===================
#: The Invenio theme.
APP_THEME = ['semantic-ui']
#: Site name.
THEME_SITENAME = _('rsl')
#: Use default frontpage.
THEME_FRONTPAGE = True
#: Frontpage title.
THEME_FRONTPAGE_TITLE = _('rsl')
#: Frontpage template.
THEME_FRONTPAGE_TEMPLATE = 'rsl/frontpage.html'
# Email configuration
# ===================
#: Email address for support.
SUPPORT_EMAIL = "[email protected]"
#: Disable email sending by default.
MAIL_SUPPRESS_SEND = True
# Assets
# ======
#: Static files collection method (defaults to copying files).
COLLECT_STORAGE = 'flask_collect.storage.file'
# Accounts
# ========
#: Email address used as sender of account registration emails.
SECURITY_EMAIL_SENDER = SUPPORT_EMAIL
#: Email subject for account registration emails.
SECURITY_EMAIL_SUBJECT_REGISTER = _("Welcome to rsl!")
#: Redis session storage URL.
ACCOUNTS_SESSION_REDIS_URL = 'redis://localhost:6379/1'
#: Enable session/user id request tracing. This feature will add X-Session-ID
#: and X-User-ID headers to HTTP response. You MUST ensure that NGINX (or other
#: proxies) removes these headers again before sending the response to the
#: client. Set to False, in case of doubt.
ACCOUNTS_USERINFO_HEADERS = True
# Celery configuration
# ====================
BROKER_URL = 'amqp://guest:guest@localhost:5672/'
#: URL of message broker for Celery (default is RabbitMQ).
CELERY_BROKER_URL = 'amqp://guest:guest@localhost:5672/'
#: URL of backend for result storage (default is Redis).
CELERY_RESULT_BACKEND = 'redis://localhost:6379/2'
#: Scheduled tasks configuration (aka cronjobs).
CELERY_BEAT_SCHEDULE = {
'indexer': {
'task': 'invenio_indexer.tasks.process_bulk_queue',
'schedule': timedelta(minutes=5),
},
'accounts': {
'task': 'invenio_accounts.tasks.clean_session_table',
'schedule': timedelta(minutes=60),
},
}
# Database
# ========
#: Database URI including user and password
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://postgres:123@localhost/rsl-invenio'
# JSONSchemas
# ===========
#: Hostname used in URLs for local JSONSchemas.
JSONSCHEMAS_HOST = 'rsl.com'
# Flask configuration
# ===================
# See details on
# http://flask.pocoo.org/docs/0.12/config/#builtin-configuration-values
#: Secret key - each installation (dev, production, ...) needs a separate key.
#: It should be changed before deploying.
SECRET_KEY = 'asdasdqwe90qwe90;qwe;;2e&as;ldsal;dlasdnmasd'
#: Max upload size for form data via application/mulitpart-formdata.
MAX_CONTENT_LENGTH = 100 * 1024 * 1024 # 100 MiB
#: Sets cookie with the secure flag by default
SESSION_COOKIE_SECURE = True
#: Since HAProxy and Nginx route all requests no matter the host header
#: provided, the allowed hosts variable is set to localhost. In production it
#: should be set to the correct host and it is strongly recommended to only
#: route correct hosts to the application.
APP_ALLOWED_HOSTS = ['rsl.com', 'localhost', 'local.invenio', '127.0.0.1', '*']
# OAI-PMH
# =======
OAISERVER_ID_PREFIX = 'oai:rsl.com:'
# Previewers
# ==========
#: Include IIIF preview for images.
PREVIEWER_PREFERENCE = ['iiif_image'] + BASE_PREFERENCE
# Debug
# =====
# Flask-DebugToolbar is by default enabled when the application is running in
# debug mode. More configuration options are available at
# https://flask-debugtoolbar.readthedocs.io/en/latest/#configuration
#: Switches off incept of redirects by Flask-DebugToolbar.
DEBUG_TB_INTERCEPT_REDIRECTS = False
# Configures Content Security Policy for PDF Previewer
# Remove it if you are not using PDF Previewer
APP_DEFAULT_SECURE_HEADERS['content_security_policy'] = {
'default-src': ["'self'", "'unsafe-inline'"],
'object-src': ["'none'"],
'style-src': ["'self'", "'unsafe-inline'", "https://fonts.googleapis.com"],
'font-src': ["'self'", "data:", "https://fonts.gstatic.com"],
}
WTF_CSRF_ENABLED = False | 31.534884 | 84 | 0.721792 |
fe48a7ec25fe0f5d9086e7d73c74d028c0a8705d | 2,576 | py | Python | WidgetsUnlimited/operations/simulator.py | AlanHorowitz/open-ended-capstone | 80590af5b09c2245f124cec20ed7594d62cff30e | [
"MIT"
] | null | null | null | WidgetsUnlimited/operations/simulator.py | AlanHorowitz/open-ended-capstone | 80590af5b09c2245f124cec20ed7594d62cff30e | [
"MIT"
] | null | null | null | WidgetsUnlimited/operations/simulator.py | AlanHorowitz/open-ended-capstone | 80590af5b09c2245f124cec20ed7594d62cff30e | [
"MIT"
] | null | null | null | from model.metadata import Table
from .generator import DataGenerator, GeneratorRequest
from .base import BaseSystem
from typing import List
class OperationsSimulator:
"""
A simulator of activity in Widgets Unlimited's source systems. A sequence of generation requests are processed and
new and updated records are fed to the source systems. The source systems will expose these changes via
different protocols to be ingested by the Data Warehouse.
"""
def __init__(self, data_generator: DataGenerator, source_systems: List[BaseSystem]):
self._data_generator = data_generator
self._source_systems = set(source_systems)
self._source_system_lookup = {}
def add_tables(self, source_system: BaseSystem, tables: List[Table]) -> None:
"""
Associate a list of Tables to a source system and pass the tables to both source
system and data generator for initialization.
Create a dictionary mapping table names to the source system objects. A table may only
be associated with a single source system.
:param source_system: source system object
:param tables: list of Table objects
:return: None, raises an exception if source system is unknown or table is added more than once
"""
if source_system not in self._source_systems:
raise Exception("Error. May not add tables to unknown source system")
for table in tables:
table_name = table.get_name()
if table_name in self._source_system_lookup:
raise Exception("Error. Table may only be added once to simulator")
self._source_system_lookup[table_name] = source_system
source_system.add_tables(tables)
self._data_generator.add_tables(tables)
def process(
self, batch_id: int, generator_requests: List[GeneratorRequest]
) -> None:
"""
Feed a list of generator requests to the DataGenerator, then pass the inputs and updates for each table
on to the associated source system.
:param batch_id: identifier used to correlate requests
:param generator_requests: list of generator parameter objects
:return: None
"""
for request in generator_requests:
table = request.table
op_system: BaseSystem = self._source_system_lookup[table.get_name()]
i_rows, u_rows = self._data_generator.generate(request, batch_id)
op_system.insert(table, i_rows)
op_system.update(table, u_rows)
| 42.933333 | 119 | 0.692935 |
24c47051d17926e9a104a129a551d7d6f05f4ba9 | 545 | py | Python | manage.py | guillaumepiot/cotidia-demo | 497177fa63942ee22288e93ed7d4867854110dd0 | [
"BSD-3-Clause"
] | null | null | null | manage.py | guillaumepiot/cotidia-demo | 497177fa63942ee22288e93ed7d4867854110dd0 | [
"BSD-3-Clause"
] | 7 | 2020-02-11T23:47:40.000Z | 2022-03-11T23:42:02.000Z | manage.py | guillaumepiot/cotidia-demo | 497177fa63942ee22288e93ed7d4867854110dd0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "consult.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.0625 | 77 | 0.688073 |
621aae82a06746cc648ac34db4a5733a56265f48 | 6,428 | py | Python | main.py | JDACS4C-IMPROVE/TGSA | cdd9903b889112b04325bec9f61935d05d9e9179 | [
"MIT"
] | 13 | 2021-06-17T15:01:49.000Z | 2022-03-11T05:19:28.000Z | main.py | JDACS4C-IMPROVE/TGSA | cdd9903b889112b04325bec9f61935d05d9e9179 | [
"MIT"
] | null | null | null | main.py | JDACS4C-IMPROVE/TGSA | cdd9903b889112b04325bec9f61935d05d9e9179 | [
"MIT"
] | 10 | 2021-10-06T08:56:58.000Z | 2022-03-22T04:55:44.000Z | import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from utils import load_data
from utils import EarlyStopping, set_random_seed
from utils import train, validate
from preprocess_gene import get_STRING_graph, get_predefine_cluster
from models.TGDRP import TGDRP
import argparse
import fitlog
def arg_parse():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=42,
help='seed')
parser.add_argument('--device', type=str, default='cuda:6',
help='device')
parser.add_argument('--model', type=str, default='TGDRP', help='Name of the model')
parser.add_argument('--batch_size', type=int, default=128,
help='batch size (default: 128)')
parser.add_argument('--lr', type=float, default=0.0001,
help='learning rate')
parser.add_argument('--layer_drug', type=int, default=3, help='layer for drug')
parser.add_argument('--dim_drug', type=int, default=128, help='hidden dim for drug')
parser.add_argument('--layer', type=int, default=3, help='number of GNN layer')
parser.add_argument('--hidden_dim', type=int, default=8, help='hidden dim for cell')
parser.add_argument('--weight_decay', type=float, default=0,
help='weight decay')
parser.add_argument('--dropout_ratio', type=float, default=0.2,
help='dropout ratio')
parser.add_argument('--epochs', type=int, default=300,
help='maximum number of epochs (default: 300)')
parser.add_argument('--patience', type=int, default=10,
help='patience for earlystopping (default: 10)')
parser.add_argument('--edge', type=float, default=0.95, help='threshold for cell line graph')
parser.add_argument('--setup', type=str, default='known', help='experimental setup')
parser.add_argument('--pretrain', type=int, default=1,
help='whether use pre-trained weights (0 for False, 1 for True')
parser.add_argument('--weight_path', type=str, default='',
help='filepath for pretrained weights')
parser.add_argument('--mode', type=str, default='test',
help='train or test')
return parser.parse_args()
def main():
args = arg_parse()
set_random_seed(args.seed)
drug_dict = np.load('./data/Drugs/drug_feature_graph.npy', allow_pickle=True).item()
cell_dict = np.load('./data/CellLines_DepMap/CCLE_580_18281/census_706/cell_feature_all.npy',
allow_pickle=True).item()
edge_index = np.load('./data/CellLines_DepMap/CCLE_580_18281/census_706/edge_index_PPI_{}.npy'.format(args.edge))
IC = pd.read_csv('./data/PANCANCER_IC_82833_580_170.csv')
train_loader, val_loader, test_loader = load_data(IC, drug_dict, cell_dict, edge_index, args)
print(len(IC), len(train_loader.dataset), len(val_loader.dataset), len(test_loader.dataset))
print('mean degree:{}'.format(len(edge_index[0]) / 706))
args.num_feature = cell_dict['ACH-000001'].x.shape[1]
genes_path = './data/CellLines_DepMap/CCLE_580_18281/census_706'
edge_index = get_STRING_graph(genes_path, args.edge)
cluster_predefine = get_predefine_cluster(edge_index, genes_path, args.edge, args.device)
model = TGDRP(cluster_predefine, args).to(args.device)
if args.mode == 'train':
if args.pretrain and args.weight_path != '':
model.GNN_drug.load_state_dict(torch.load('./model_pretrain/{}.pth'.format(args.weight_path))['model_state_dict'])
criterion = nn.MSELoss()
opt = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
log_folder = os.path.join(os.getcwd(), "logs", model._get_name())
if not os.path.exists(log_folder):
os.makedirs(log_folder)
fitlog.set_log_dir(log_folder)
fitlog.add_hyper(args)
fitlog.add_hyper_in_file(__file__)
stopper = EarlyStopping(mode='lower', patience=args.patience)
for epoch in range(1, args.epochs + 1):
print("=====Epoch {}".format(epoch))
print("Training...")
train_loss = train(model, train_loader, criterion, opt, args.device)
fitlog.add_loss(train_loss.item(), name='Train MSE', step=epoch)
print('Evaluating...')
rmse, _, _, _ = validate(model, val_loader, args.device)
print("Validation rmse:{}".format(rmse))
fitlog.add_metric({'val': {'RMSE': rmse}}, step=epoch)
early_stop = stopper.step(rmse, model)
if early_stop:
break
print('EarlyStopping! Finish training!')
print('Testing...')
stopper.load_checkpoint(model)
train_rmse, train_MAE, train_r2, train_r = validate(model, train_loader, args.device)
val_rmse, val_MAE, val_r2, val_r = validate(model, val_loader, args.device)
test_rmse, test_MAE, test_r2, test_r = validate(model, test_loader, args.device)
print('Train reslut: rmse:{} r2:{} r:{}'.format(train_rmse, train_r2, train_r))
print('Val reslut: rmse:{} r2:{} r:{}'.format(val_rmse, val_r2, val_r))
print('Test reslut: rmse:{} r2:{} r:{}'.format(test_rmse, test_r2, test_r))
fitlog.add_best_metric(
{'epoch': epoch - args.patience,
"train": {'RMSE': train_rmse, 'MAE': train_MAE, 'pearson': train_r, "R2": train_r2},
"valid": {'RMSE': stopper.best_score, 'MAE': val_MAE, 'pearson': val_r, 'R2': val_r2},
"test": {'RMSE': test_rmse, 'MAE': test_MAE, 'pearson': test_r, 'R2': test_r2}})
elif args.mode == 'test':
weight = "TGDRP_pre" if args.pretrain else "TGDRP"
model.load_state_dict(torch.load('./weights/{}.pth'.format(weight), map_location=args.device)['model_state_dict'])
test_rmse, test_MAE, test_r2, test_r = validate(model, test_loader, args.device)
print('Test RMSE: {}, MAE: {}, R2: {}, R: {}'.format(round(test_rmse.item(), 4), round(test_MAE, 4),
round(test_r2, 4), round(test_r, 4)))
if __name__ == "__main__":
main()
| 50.614173 | 127 | 0.626167 |
2e0fa094285a5b3d5267707b6c8f3c80d4708a20 | 6,618 | py | Python | python-2-apps/fn_crowdstrike_query-1.0.0/fn_crowdstrike_query/util/customize.py | JayDi11a/Geralds-IBM-SOAR-Integrations | 0e0eb18adbaf3a266e1dc5a316df7cd5a93f88d0 | [
"MIT"
] | null | null | null | python-2-apps/fn_crowdstrike_query-1.0.0/fn_crowdstrike_query/util/customize.py | JayDi11a/Geralds-IBM-SOAR-Integrations | 0e0eb18adbaf3a266e1dc5a316df7cd5a93f88d0 | [
"MIT"
] | 1 | 2022-03-06T00:10:13.000Z | 2022-03-06T00:10:13.000Z | python-2-apps/fn_crowdstrike_query/fn_crowdstrike_query/util/customize.py.OLD.py | JayDi11a/Geralds-IBM-SOAR-Integrations | 0e0eb18adbaf3a266e1dc5a316df7cd5a93f88d0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Generate the Resilient customizations required for fn_crowdstrike_query"""
from __future__ import print_function
from resilient_circuits.util import *
def customization_data(client=None):
"""Produce any customization definitions (types, fields, message destinations, etc)
that should be installed by `resilient-circuits customize`
"""
# This import data contains:
# Function inputs:
# artifact_type
# artifact_value
# incident_id
# Message Destinations:
# crowdstrike_query
# Functions:
# query_malware_user_connection
yield ImportDefinition(u"""
eyJ0YXNrX29yZGVyIjogW10sICJ3b3JrZmxvd3MiOiBbXSwgImFjdGlvbnMiOiBbXSwgImxheW91
dHMiOiBbXSwgImV4cG9ydF9mb3JtYXRfdmVyc2lvbiI6IDIsICJpZCI6IDI0LCAiaW5kdXN0cmll
cyI6IG51bGwsICJwaGFzZXMiOiBbXSwgImFjdGlvbl9vcmRlciI6IFtdLCAiZ2VvcyI6IG51bGws
ICJzZXJ2ZXJfdmVyc2lvbiI6IHsibWFqb3IiOiAzMCwgInZlcnNpb24iOiAiMzAuMi44OCIsICJi
dWlsZF9udW1iZXIiOiA4OCwgIm1pbm9yIjogMn0sICJ0aW1lZnJhbWVzIjogbnVsbCwgIndvcmtz
cGFjZXMiOiBbXSwgImF1dG9tYXRpY190YXNrcyI6IFtdLCAiZnVuY3Rpb25zIjogW3siZGlzcGxh
eV9uYW1lIjogIlF1ZXJ5IE1hbHdhcmUgVXNlciBDb25uZWN0aW9uIiwgImRlc2NyaXB0aW9uIjog
eyJjb250ZW50IjogIlRoaXMgZnVuY3Rpb24gcGFzc2VzIGluIHRoZSBhcnRpZmFjdCAoZmlsZSBu
YW1lKSBmb3IgaXRzIHBvdGVudGlhbCB0aHJlYXQgYmVoYXZpb3IgYW5kIHRoZSBuYW1lIGFzc29j
aWF0ZWQgd2l0aCB0aGF0IHRocmVhdCBkZXRlY3Rpb24uIiwgImZvcm1hdCI6ICJ0ZXh0In0sICJj
cmVhdG9yIjogeyJkaXNwbGF5X25hbWUiOiAiR2VyYWxkIFRyb3RtYW4iLCAidHlwZSI6ICJ1c2Vy
IiwgImlkIjogNCwgIm5hbWUiOiAiZ2VyYWxkLnRyb3RtYW5AaWJtLmNvbSJ9LCAidmlld19pdGVt
cyI6IFt7InNob3dfaWYiOiBudWxsLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgInNob3df
bGlua19oZWFkZXIiOiBmYWxzZSwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJjb250ZW50Ijog
ImVhZDIxNGMyLTEzZmUtNDNmNi1hM2M3LTY3NmE4ODMzOGRiYiIsICJzdGVwX2xhYmVsIjogbnVs
bH0sIHsic2hvd19pZiI6IG51bGwsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAic2hvd19s
aW5rX2hlYWRlciI6IGZhbHNlLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImNvbnRlbnQiOiAi
M2EyZTM0N2ItNjcyZS00MjYzLTg3ODctYTNlOWViYTRhYzkxIiwgInN0ZXBfbGFiZWwiOiBudWxs
fSwgeyJzaG93X2lmIjogbnVsbCwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJzaG93X2xp
bmtfaGVhZGVyIjogZmFsc2UsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiY29udGVudCI6ICI5
YmE0OTg4Ny0wZGNmLTQwY2UtYTVlYS05YzBjNDNmODMxYmYiLCAic3RlcF9sYWJlbCI6IG51bGx9
XSwgImV4cG9ydF9rZXkiOiAicXVlcnlfbWFsd2FyZV91c2VyX2Nvbm5lY3Rpb24iLCAidXVpZCI6
ICI0Yjg2MDFlYi04Mzc0LTQzZGItOWQxNC1iZmU0YjM5ZjAzMjIiLCAibGFzdF9tb2RpZmllZF9i
eSI6IHsiZGlzcGxheV9uYW1lIjogIkdlcmFsZCBUcm90bWFuIiwgInR5cGUiOiAidXNlciIsICJp
ZCI6IDQsICJuYW1lIjogImdlcmFsZC50cm90bWFuQGlibS5jb20ifSwgInZlcnNpb24iOiAxLCAi
d29ya2Zsb3dzIjogW10sICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTM2NTg4ODc5ODkxLCAiZGVz
dGluYXRpb25faGFuZGxlIjogImNyb3dkc3RyaWtlX3F1ZXJ5IiwgImlkIjogODIsICJuYW1lIjog
InF1ZXJ5X21hbHdhcmVfdXNlcl9jb25uZWN0aW9uIn1dLCAibm90aWZpY2F0aW9ucyI6IG51bGws
ICJyZWd1bGF0b3JzIjogbnVsbCwgImluY2lkZW50X3R5cGVzIjogW3siY3JlYXRlX2RhdGUiOiAx
NTM2NjE4NTE1OTc4LCAiZGVzY3JpcHRpb24iOiAiQ3VzdG9taXphdGlvbiBQYWNrYWdlcyAoaW50
ZXJuYWwpIiwgImV4cG9ydF9rZXkiOiAiQ3VzdG9taXphdGlvbiBQYWNrYWdlcyAoaW50ZXJuYWwp
IiwgImlkIjogMCwgIm5hbWUiOiAiQ3VzdG9taXphdGlvbiBQYWNrYWdlcyAoaW50ZXJuYWwpIiwg
InVwZGF0ZV9kYXRlIjogMTUzNjYxODUxNTk3OCwgInV1aWQiOiAiYmZlZWMyZDQtMzc3MC0xMWU4
LWFkMzktNGEwMDA0MDQ0YWEwIiwgImVuYWJsZWQiOiBmYWxzZSwgInN5c3RlbSI6IGZhbHNlLCAi
cGFyZW50X2lkIjogbnVsbCwgImhpZGRlbiI6IGZhbHNlfV0sICJzY3JpcHRzIjogW10sICJ0eXBl
cyI6IFtdLCAibWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBbeyJ1dWlkIjogIjVjYTFlMjA4LTk4YTgt
NGYyOS1iOWE2LTY1MmNjOTg1MjkxZCIsICJleHBvcnRfa2V5IjogImNyb3dkc3RyaWtlX3F1ZXJ5
IiwgIm5hbWUiOiAiQ3Jvd2RTdHJpa2UgUXVlcnkiLCAiZGVzdGluYXRpb25fdHlwZSI6IDAsICJw
cm9ncmFtbWF0aWNfbmFtZSI6ICJjcm93ZHN0cmlrZV9xdWVyeSIsICJleHBlY3RfYWNrIjogdHJ1
ZSwgInVzZXJzIjogWyJnZXJhbGQudHJvdG1hbkBpYm0uY29tIl19XSwgImluY2lkZW50X2FydGlm
YWN0X3R5cGVzIjogW10sICJyb2xlcyI6IFtdLCAiZmllbGRzIjogW3sib3BlcmF0aW9ucyI6IFtd
LCAicmVhZF9vbmx5IjogdHJ1ZSwgIm5hbWUiOiAiaW5jX3RyYWluaW5nIiwgInRlbXBsYXRlcyI6
IFtdLCAidHlwZV9pZCI6IDAsICJjaG9zZW4iOiBmYWxzZSwgInRleHQiOiAiU2ltdWxhdGlvbiIs
ICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiaW5jaWRl
bnQvaW5jX3RyYWluaW5nIiwgInRvb2x0aXAiOiAiV2hldGhlciB0aGUgaW5jaWRlbnQgaXMgYSBz
aW11bGF0aW9uIG9yIGEgcmVndWxhciBpbmNpZGVudC4gIFRoaXMgZmllbGQgaXMgcmVhZC1vbmx5
LiIsICJyaWNoX3RleHQiOiBmYWxzZSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAicHJlZml4Ijog
bnVsbCwgImludGVybmFsIjogZmFsc2UsICJ2YWx1ZXMiOiBbXSwgImJsYW5rX29wdGlvbiI6IGZh
bHNlLCAiaW5wdXRfdHlwZSI6ICJib29sZWFuIiwgImNoYW5nZWFibGUiOiB0cnVlLCAiaGlkZV9u
b3RpZmljYXRpb24iOiBmYWxzZSwgImlkIjogMzcsICJ1dWlkIjogImMzZjBlM2VkLTIxZTEtNGQ1
My1hZmZiLWZlNWNhMzMwOGNjYSJ9LCB7Im9wZXJhdGlvbnMiOiBbXSwgInR5cGVfaWQiOiAxMSwg
Im9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidGV4dCI6ICJpbmNpZGVudF9pZCIsICJibGFua19vcHRp
b24iOiBmYWxzZSwgInByZWZpeCI6IG51bGwsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlkIjogMTE1
LCAicmVhZF9vbmx5IjogZmFsc2UsICJ1dWlkIjogImVhZDIxNGMyLTEzZmUtNDNmNi1hM2M3LTY3
NmE4ODMzOGRiYiIsICJjaG9zZW4iOiBmYWxzZSwgImlucHV0X3R5cGUiOiAibnVtYmVyIiwgInRv
b2x0aXAiOiAiIiwgImludGVybmFsIjogZmFsc2UsICJyaWNoX3RleHQiOiBmYWxzZSwgInRlbXBs
YXRlcyI6IFtdLCAiZXhwb3J0X2tleSI6ICJfX2Z1bmN0aW9uL2luY2lkZW50X2lkIiwgImhpZGVf
bm90aWZpY2F0aW9uIjogZmFsc2UsICJwbGFjZWhvbGRlciI6ICIiLCAibmFtZSI6ICJpbmNpZGVu
dF9pZCIsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgInZhbHVlcyI6IFtdfSwg
eyJvcGVyYXRpb25zIjogW10sICJ0eXBlX2lkIjogMTEsICJvcGVyYXRpb25fcGVybXMiOiB7fSwg
InRleHQiOiAiYXJ0aWZhY3RfdHlwZSIsICJibGFua19vcHRpb24iOiBmYWxzZSwgInByZWZpeCI6
IG51bGwsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgImlkIjogMTUwLCAicmVhZF9vbmx5IjogZmFsc2Us
ICJ1dWlkIjogIjNhMmUzNDdiLTY3MmUtNDI2My04Nzg3LWEzZTllYmE0YWM5MSIsICJjaG9zZW4i
OiBmYWxzZSwgImlucHV0X3R5cGUiOiAidGV4dCIsICJ0b29sdGlwIjogIiIsICJpbnRlcm5hbCI6
IGZhbHNlLCAicmljaF90ZXh0IjogZmFsc2UsICJ0ZW1wbGF0ZXMiOiBbXSwgImV4cG9ydF9rZXki
OiAiX19mdW5jdGlvbi9hcnRpZmFjdF90eXBlIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2Us
ICJwbGFjZWhvbGRlciI6ICIiLCAibmFtZSI6ICJhcnRpZmFjdF90eXBlIiwgImRlZmF1bHRfY2hv
c2VuX2J5X3NlcnZlciI6IGZhbHNlLCAidmFsdWVzIjogW119LCB7Im9wZXJhdGlvbnMiOiBbXSwg
InR5cGVfaWQiOiAxMSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidGV4dCI6ICJhcnRpZmFjdF92
YWx1ZSIsICJibGFua19vcHRpb24iOiBmYWxzZSwgInByZWZpeCI6IG51bGwsICJjaGFuZ2VhYmxl
IjogdHJ1ZSwgImlkIjogMTQ5LCAicmVhZF9vbmx5IjogZmFsc2UsICJ1dWlkIjogIjliYTQ5ODg3
LTBkY2YtNDBjZS1hNWVhLTljMGM0M2Y4MzFiZiIsICJjaG9zZW4iOiBmYWxzZSwgImlucHV0X3R5
cGUiOiAidGV4dCIsICJ0b29sdGlwIjogIiIsICJpbnRlcm5hbCI6IGZhbHNlLCAicmljaF90ZXh0
IjogZmFsc2UsICJ0ZW1wbGF0ZXMiOiBbXSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9hcnRp
ZmFjdF92YWx1ZSIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAicGxhY2Vob2xkZXIiOiAi
IiwgIm5hbWUiOiAiYXJ0aWZhY3RfdmFsdWUiLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjog
ZmFsc2UsICJ2YWx1ZXMiOiBbXX1dLCAib3ZlcnJpZGVzIjogW10sICJleHBvcnRfZGF0ZSI6IDE1
MzY1ODkwODA4ODh9
"""
)
| 62.433962 | 87 | 0.955878 |
8093ba3a64743151f52da01ac6dd3832d0c30355 | 10,174 | py | Python | docs/conf.py | WilliamMayor/jingerly | 225feb2e71b4256302209c815cccd54b694d52eb | [
"MIT"
] | null | null | null | docs/conf.py | WilliamMayor/jingerly | 225feb2e71b4256302209c815cccd54b694d52eb | [
"MIT"
] | null | null | null | docs/conf.py | WilliamMayor/jingerly | 225feb2e71b4256302209c815cccd54b694d52eb | [
"MIT"
] | 1 | 2020-02-23T15:07:47.000Z | 2020-02-23T15:07:47.000Z | # -*- coding: utf-8 -*-
#
# . documentation build configuration file, created by
# sphinx-quickstart on Sat Feb 14 21:29:17 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'jinjerly'
copyright = u'2015, William Mayor'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.3'
# The full version, including alpha/beta/rc tags.
release = '0.0.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sphinxdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'sphinx.tex', u'. Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sphinx', u'. Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sphinx', u'. Documentation',
u'Author', 'sphinx', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'.'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2015, Author'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'.'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| 30.644578 | 80 | 0.716139 |
d4219fbd51a1b331ee29a550a7a35f570e296d0e | 471 | py | Python | cudaBLAS/pythonMatMultTest.py | CUDA-me-impressed/CuMat-Compiler | d5050f96a2712d4d135c1729484cd91db2bdd42e | [
"MIT"
] | 2 | 2020-10-18T10:29:34.000Z | 2021-01-05T15:46:34.000Z | cudaBLAS/pythonMatMultTest.py | CUDA-me-impressed/CuMat-Compiler | d5050f96a2712d4d135c1729484cd91db2bdd42e | [
"MIT"
] | 11 | 2020-10-08T18:41:20.000Z | 2021-03-19T14:49:19.000Z | cudaBLAS/pythonMatMultTest.py | CUDA-me-impressed/CuMat-Compiler | d5050f96a2712d4d135c1729484cd91db2bdd42e | [
"MIT"
] | null | null | null | import numpy as np
import timeit
def printAsCuMatMatrix(mat):
np.set_printoptions(threshold=9999999999)
file = "["
file = file + np.array2string(mat, separator=', ').replace('],', '\\').replace('[', '').replace(']','')
file = file + ']'
return file
dim = (200,200)
x = np.ones(dim, dtype=np.float64) * 2.1
y = np.ones(dim) * 2.65
time = timeit.timeit(
lambda: np.matmul(np.matmul(x,x),y),
number=10
)
print(f"{time} ms".format(time * 1000/ 10)) | 22.428571 | 107 | 0.615711 |
0e77c6b78f329c8168b2504893ea04df09895b99 | 782 | py | Python | user/views.py | Emmanuel-otieno/Awward_clone | ce0fb841984cae619599b51600403d7a1d873fc8 | [
"Unlicense"
] | null | null | null | user/views.py | Emmanuel-otieno/Awward_clone | ce0fb841984cae619599b51600403d7a1d873fc8 | [
"Unlicense"
] | null | null | null | user/views.py | Emmanuel-otieno/Awward_clone | ce0fb841984cae619599b51600403d7a1d873fc8 | [
"Unlicense"
] | null | null | null | from django.shortcuts import render,redirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from .forms import UserRegisterForm
# Create your views here.
def register(request):
if request.method == 'POST':
form= UserRegisterForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
messages.success(request, f'Your Account has been created! you are now able to log in {username}! ')
return redirect('login')
else:
form= UserRegisterForm()
return render (request, 'users/register.html', {'form': form})
@login_required
def profile(request):
return render(request, 'users/profile.html')
| 30.076923 | 115 | 0.671355 |
bbff12c368c3635418ef4eed40dfee7495e72e9a | 2,636 | py | Python | venv/Lib/site-packages/pkginfo/tests/test_index.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 54 | 2019-10-30T19:32:23.000Z | 2022-03-16T13:40:40.000Z | venv/Lib/site-packages/pkginfo/tests/test_index.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 301 | 2020-10-03T10:46:31.000Z | 2022-03-27T23:46:23.000Z | venv/Lib/site-packages/pkginfo/tests/test_index.py | EkremBayar/bayar | aad1a32044da671d0b4f11908416044753360b39 | [
"MIT"
] | 19 | 2019-12-14T05:21:22.000Z | 2021-06-29T14:33:59.000Z | import unittest
class IndexTests(unittest.TestCase):
def _getTargetClass(self):
from pkginfo.index import Index
return Index
def _makeOne(self):
return self._getTargetClass()()
def test_empty(self):
index = self._makeOne()
self.assertEqual(len(index), 0)
self.assertEqual(len(index.keys()), 0)
self.assertEqual(len(index.values()), 0)
self.assertEqual(len(index.items()), 0)
def _makeDummy(self):
from pkginfo.distribution import Distribution
class DummyDistribution(Distribution):
name = 'dummy'
version = '1.0'
return DummyDistribution()
def test___getitem___miss(self):
index = self._makeOne()
self.assertRaises(KeyError, index.__getitem__, 'nonesuch')
def test___setitem___value_not_dist(self):
class NotDistribution:
name = 'dummy'
version = '1.0'
dummy = NotDistribution()
index = self._makeOne()
self.assertRaises(ValueError, index.__setitem__, 'dummy-1.0', dummy)
def test___setitem___bad_key(self):
index = self._makeOne()
dummy = self._makeDummy()
self.assertRaises(ValueError, index.__setitem__, 'nonesuch', dummy)
def test___setitem___valid_key(self):
index = self._makeOne()
dummy = self._makeDummy()
index['dummy-1.0'] = dummy
self.assertTrue(index['dummy-1.0'] is dummy)
self.assertEqual(len(index), 1)
self.assertEqual(len(index.keys()), 1)
self.assertEqual(list(index.keys())[0], 'dummy-1.0')
self.assertEqual(len(index.values()), 1)
self.assertEqual(list(index.values())[0], dummy)
self.assertEqual(len(index.items()), 1)
self.assertEqual(list(index.items())[0], ('dummy-1.0', dummy))
def test_add_not_dist(self):
index = self._makeOne()
class NotDistribution:
name = 'dummy'
version = '1.0'
dummy = NotDistribution()
self.assertRaises(ValueError, index.add, dummy)
def test_add_valid_dist(self):
index = self._makeOne()
dummy = self._makeDummy()
index.add(dummy)
self.assertTrue(index['dummy-1.0'] is dummy)
self.assertEqual(len(index), 1)
self.assertEqual(len(index.keys()), 1)
self.assertEqual(list(index.keys())[0], 'dummy-1.0')
self.assertEqual(len(index.values()), 1)
self.assertEqual(list(index.values())[0], dummy)
self.assertEqual(len(index.items()), 1)
self.assertEqual(list(index.items())[0], ('dummy-1.0', dummy))
| 34.233766 | 76 | 0.614568 |
ea90319590a2ee38fc301b13217a6a409333eff4 | 1,819 | py | Python | prophy/tests/test_float.py | florczakraf/prophy | a42a6151a77b31afa05300fc2e1f52cc15a298cf | [
"MIT"
] | 14 | 2015-02-19T22:00:37.000Z | 2020-11-30T03:03:55.000Z | prophy/tests/test_float.py | florczakraf/prophy | a42a6151a77b31afa05300fc2e1f52cc15a298cf | [
"MIT"
] | 31 | 2015-06-22T11:11:10.000Z | 2021-05-12T06:35:47.000Z | prophy/tests/test_float.py | florczakraf/prophy | a42a6151a77b31afa05300fc2e1f52cc15a298cf | [
"MIT"
] | 16 | 2015-06-12T06:48:06.000Z | 2019-11-26T22:48:13.000Z | import prophy
import pytest
def Float():
class Float(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("value", prophy.r32)]
return Float
def Double():
class Double(prophy.with_metaclass(prophy.struct_generator, prophy.struct)):
_descriptor = [("value", prophy.r64)]
return Double
@pytest.mark.parametrize("FloatTypeFactory", [Float, Double])
def test_float(FloatTypeFactory):
FloatType = FloatTypeFactory()
x = FloatType()
assert x.value == 0.0
x.value = 1.455
assert x.value == 1.455
with pytest.raises(Exception):
x.value = b"45.486"
y = FloatType()
y.value = 4.1
y.copy_from(x)
assert y.value == 1.455
@pytest.mark.parametrize("FloatTypeFactory, one, minus_one, too_long, too_short", [
(Float,
b"\x3f\x80\x00\x00",
b"\xbf\x80\x00\x00",
b"\xff\xff\xff\xff\xff",
b"\xff\xff\xff"),
(Double,
b"\x3f\xf0\x00\x00\x00\x00\x00\x00",
b"\xbf\xf0\x00\x00\x00\x00\x00\x00",
b"\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
b"\xff\xff\xff\xff\xff")
])
def test_float_codec(FloatTypeFactory, one, minus_one, too_long, too_short):
x = FloatTypeFactory()()
x.value = 8
assert str(x) == "value: 8\n"
x.decode(one, ">")
assert x.value == 1.0
x.decode(minus_one, ">")
assert x.value == -1.0
x.value = 1.0
assert x.encode(">") == one
x.value = -1.0
assert x.encode(">") == minus_one
with pytest.raises(prophy.ProphyError) as e:
x.decode(too_long, ">")
assert "not all bytes of {} read".format(FloatTypeFactory.__name__) in str(e.value)
with pytest.raises(prophy.ProphyError) as e:
x.decode(too_short, ">")
assert "too few bytes to decode integer" in str(e.value)
| 25.263889 | 87 | 0.619021 |
3699bce78aeda766266e83dd1ef7aebc36ff7596 | 282 | py | Python | basic-service/tanya/solve.py | tanyav2/hackasat-qualifier-2021 | 595338e375f3c7f53b0f8b1cc886eb62462c750e | [
"MIT"
] | null | null | null | basic-service/tanya/solve.py | tanyav2/hackasat-qualifier-2021 | 595338e375f3c7f53b0f8b1cc886eb62462c750e | [
"MIT"
] | null | null | null | basic-service/tanya/solve.py | tanyav2/hackasat-qualifier-2021 | 595338e375f3c7f53b0f8b1cc886eb62462c750e | [
"MIT"
] | null | null | null | import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("localhost", 12345))
data = s.recv(22)
# split string into two variables
ns = data.decode("utf-8").split(" ")
n1 = int(ns[0])
n2 = int(ns[2])
sum = n1 + n2
str_sum = str(sum)
s.send(str_sum.encode())
| 18.8 | 53 | 0.673759 |
dc293028476940544df3d74d1cb040e4eb5ed61b | 37,005 | bzl | Python | internal/rollup/rollup_bundle.bzl | tarekbecker/rules_nodejs | 8da02819ecf966f2a7acc4ef2c2f2f0f2d8ab4a7 | [
"Apache-2.0"
] | null | null | null | internal/rollup/rollup_bundle.bzl | tarekbecker/rules_nodejs | 8da02819ecf966f2a7acc4ef2c2f2f0f2d8ab4a7 | [
"Apache-2.0"
] | null | null | null | internal/rollup/rollup_bundle.bzl | tarekbecker/rules_nodejs | 8da02819ecf966f2a7acc4ef2c2f2f0f2d8ab4a7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rollup bundling
The versions of Rollup and terser are controlled by the Bazel toolchain.
You do not need to install them into your project.
"""
load("@build_bazel_rules_nodejs//internal/common:node_module_info.bzl", "NodeModuleSources", "collect_node_modules_aspect")
load("//internal/common:collect_es6_sources.bzl", _collect_es2015_sources = "collect_es6_sources")
load("//internal/common:expand_into_runfiles.bzl", "expand_path_into_runfiles")
load("//internal/common:module_mappings.bzl", "get_module_mappings")
_ROLLUP_MODULE_MAPPINGS_ATTR = "rollup_module_mappings"
def _rollup_module_mappings_aspect_impl(target, ctx):
mappings = get_module_mappings(target.label, ctx.rule.attr)
return struct(rollup_module_mappings = mappings)
rollup_module_mappings_aspect = aspect(
_rollup_module_mappings_aspect_impl,
attr_aspects = ["deps"],
)
def _trim_package_node_modules(package_name):
# trim a package name down to its path prior to a node_modules
# segment. 'foo/node_modules/bar' would become 'foo' and
# 'node_modules/bar' would become ''
segments = []
for n in package_name.split("/"):
if n == "node_modules":
break
segments += [n]
return "/".join(segments)
# This function is similar but slightly different than _compute_node_modules_root
# in /internal/node/node.bzl. TODO(gregmagolan): consolidate these functions
def _compute_node_modules_root(ctx):
"""Computes the node_modules root from the node_modules and deps attributes.
Args:
ctx: the skylark execution context
Returns:
The node_modules root as a string
"""
node_modules_root = None
if ctx.attr.node_modules:
if NodeModuleSources in ctx.attr.node_modules:
node_modules_root = "/".join(["external", ctx.attr.node_modules[NodeModuleSources].workspace, "node_modules"])
elif ctx.files.node_modules:
# ctx.files.node_modules is not an empty list
node_modules_root = "/".join([f for f in [
ctx.attr.node_modules.label.workspace_root,
_trim_package_node_modules(ctx.attr.node_modules.label.package),
"node_modules",
] if f])
for d in ctx.attr.deps:
if NodeModuleSources in d:
possible_root = "/".join(["external", d[NodeModuleSources].workspace, "node_modules"])
if not node_modules_root:
node_modules_root = possible_root
elif node_modules_root != possible_root:
fail("All npm dependencies need to come from a single workspace. Found '%s' and '%s'." % (node_modules_root, possible_root))
if not node_modules_root:
# there are no fine grained deps and the node_modules attribute is an empty filegroup
# but we still need a node_modules_root even if its empty
node_modules_root = "/".join([f for f in [
ctx.attr.node_modules.label.workspace_root,
ctx.attr.node_modules.label.package,
"node_modules",
] if f])
return node_modules_root
# Expand entry_point into runfiles and strip the file extension
def _entry_point_path(ctx):
return "/".join([
expand_path_into_runfiles(ctx, ctx.file.entry_point.dirname),
ctx.file.entry_point.basename,
])[:-(len(ctx.file.entry_point.extension) + 1)]
def write_rollup_config(ctx, plugins = [], root_dir = None, filename = "_%s.rollup.conf.js", output_format = "iife", additional_entry_points = []):
"""Generate a rollup config file.
This is also used by the ng_rollup_bundle and ng_package rules in @angular/bazel.
Args:
ctx: Bazel rule execution context
plugins: extra plugins (defaults to [])
See the ng_rollup_bundle in @angular/bazel for example of usage.
root_dir: root directory for module resolution (defaults to None)
filename: output filename pattern (defaults to `_%s.rollup.conf.js`)
output_format: passed to rollup output.format option, e.g. "umd"
additional_entry_points: additional entry points for code splitting
Returns:
The rollup config file. See https://rollupjs.org/guide/en#configuration-files
"""
config = ctx.actions.declare_file(filename % ctx.label.name)
# build_file_path includes the BUILD.bazel file, transform here to only include the dirname
build_file_dirname = "/".join(ctx.build_file_path.split("/")[:-1])
entry_points = [_entry_point_path(ctx)] + additional_entry_points
mappings = dict()
all_deps = ctx.attr.deps + ctx.attr.srcs
for dep in all_deps:
if hasattr(dep, _ROLLUP_MODULE_MAPPINGS_ATTR):
for k, v in getattr(dep, _ROLLUP_MODULE_MAPPINGS_ATTR).items():
if k in mappings and mappings[k] != v:
fail(("duplicate module mapping at %s: %s maps to both %s and %s" %
(dep.label, k, mappings[k], v)), "deps")
mappings[k] = v
if not root_dir:
# This must be .es6 to match collect_es6_sources.bzl
root_dir = "/".join([ctx.bin_dir.path, build_file_dirname, ctx.label.name + ".es6"])
node_modules_root = _compute_node_modules_root(ctx)
is_default_node_modules = False
if node_modules_root == "node_modules" and ctx.attr.node_modules.label.package == "" and ctx.attr.node_modules.label.name == "node_modules_none":
is_default_node_modules = True
ctx.actions.expand_template(
output = config,
template = ctx.file._rollup_config_tmpl,
substitutions = {
"TMPL_additional_plugins": ",\n".join(plugins),
"TMPL_banner_file": "\"%s\"" % ctx.file.license_banner.path if ctx.file.license_banner else "undefined",
"TMPL_global_name": ctx.attr.global_name if ctx.attr.global_name else ctx.label.name,
"TMPL_inputs": ",".join(["\"%s\"" % e for e in entry_points]),
"TMPL_is_default_node_modules": "true" if is_default_node_modules else "false",
"TMPL_module_mappings": str(mappings),
"TMPL_named_exports": str(ctx.attr.named_exports),
"TMPL_node_modules_root": node_modules_root,
"TMPL_output_format": output_format,
"TMPL_rootDir": root_dir,
"TMPL_stamp_data": "\"%s\"" % ctx.version_file.path if ctx.version_file else "undefined",
"TMPL_target": str(ctx.label),
"TMPL_workspace_name": ctx.workspace_name,
},
)
return config
def run_rollup(ctx, sources, config, output):
"""Creates an Action that can run rollup on set of sources.
This is also used by ng_package and ng_rollup_bundle rules in @angular/bazel.
Args:
ctx: Bazel rule execution context
sources: JS sources to rollup
config: rollup config file
output: output file
Returns:
the sourcemap output file
"""
map_output = ctx.actions.declare_file(output.basename + ".map", sibling = output)
_run_rollup(ctx, sources, config, output, map_output)
return map_output
def _filter_js_inputs(all_inputs):
# Note: make sure that "all_inputs" is not a depset.
# Iterating over a depset is deprecated!
return [
f
for f in all_inputs
# We also need to include ".map" files as these can be read by
# the "rollup-plugin-sourcemaps" plugin.
if f.path.endswith(".js") or f.path.endswith(".json") or f.path.endswith(".map")
]
def _run_rollup(ctx, sources, config, output, map_output = None):
args = ctx.actions.args()
args.add_all(["--config", config.path])
if map_output:
args.add_all(["--output.file", output.path])
args.add_all(["--output.sourcemap", "--output.sourcemapFile", map_output.path])
else:
args.add_all(["--output.dir", output.path])
args.add_all(["--output.sourcemap"])
# We will produce errors as needed. Anything else is spammy: a well-behaved
# bazel rule prints nothing on success.
args.add("--silent")
if ctx.attr.globals:
args.add("--external")
args.add_joined(ctx.attr.globals.keys(), join_with = ",")
args.add("--globals")
args.add_joined(["%s:%s" % g for g in ctx.attr.globals.items()], join_with = ",")
direct_inputs = [config]
direct_inputs += _filter_js_inputs(ctx.files.node_modules)
# Also include files from npm fine grained deps as inputs.
# These deps are identified by the NodeModuleSources provider.
for d in ctx.attr.deps:
if NodeModuleSources in d:
# Note: we can't avoid calling .to_list() on sources
direct_inputs += _filter_js_inputs(d[NodeModuleSources].sources.to_list())
if ctx.file.license_banner:
direct_inputs += [ctx.file.license_banner]
if ctx.version_file:
direct_inputs += [ctx.version_file]
outputs = [output]
if map_output:
outputs += [map_output]
ctx.actions.run(
progress_message = "Bundling JavaScript %s [rollup]" % output.short_path,
executable = ctx.executable._rollup,
inputs = depset(direct_inputs, transitive = [sources]),
outputs = outputs,
arguments = [args],
)
def _run_tsc(ctx, input, output):
args = ctx.actions.args()
# No types needed since we are just downleveling.
# `--types` proceeded by another config argument means an empty types array
# for the command line parser.
# See https://github.com/Microsoft/TypeScript/issues/18581#issuecomment-330700612
args.add("--types")
args.add("--skipLibCheck")
args.add_all(["--target", "es5"])
args.add_all(["--lib", "es2015,dom"])
args.add("--allowJS")
args.add(input.path)
args.add_all(["--outFile", output.path])
ctx.actions.run(
progress_message = "Downleveling JavaScript to ES5 %s [typescript]" % output.short_path,
executable = ctx.executable._tsc,
inputs = [input],
outputs = [output],
arguments = [args],
)
def _run_tsc_on_directory(ctx, input_dir, output_dir):
config = ctx.actions.declare_file("_%s.code-split.tsconfig.json" % ctx.label.name)
args = ctx.actions.args()
args.add_all(["--project", config.path])
args.add_all(["--input", input_dir.path])
args.add_all(["--output", output_dir.path])
ctx.actions.run(
progress_message = "Downleveling JavaScript to ES5 %s [typescript]" % output_dir.short_path,
executable = ctx.executable._tsc_directory,
inputs = [input_dir],
outputs = [output_dir, config],
arguments = [args],
)
def run_uglify(**kwargs):
print("WARNING: run_uglify has been renamed to run_terser. Please update callsites")
run_terser(**kwargs)
def run_terser(ctx, input, output, debug = False, comments = True, config_name = None, in_source_map = None):
"""Runs terser on an input file.
This is also used by https://github.com/angular/angular.
Args:
ctx: Bazel rule execution context
input: input file
output: output file
debug: if True then output is beautified (defaults to False)
comments: if True then copyright comments are preserved in output file (defaults to True)
config_name: allows callers to control the name of the generated terser configuration,
which will be `_[config_name].terser.json` in the package where the target is declared
in_source_map: sourcemap file for the input file, passed to the "--source-map content="
option of rollup.
Returns:
The sourcemap file
"""
map_output = ctx.actions.declare_file(output.basename + ".map", sibling = output)
_run_terser(ctx, input, output, map_output, debug, comments, config_name, in_source_map)
return map_output
def _run_terser(ctx, input, output, map_output, debug = False, comments = True, config_name = None, in_source_map = None):
inputs = [input]
outputs = [output]
args = ctx.actions.args()
if map_output:
# Running terser on an individual file
if not config_name:
config_name = ctx.label.name
if debug:
config_name += ".debug"
config = ctx.actions.declare_file("_%s.terser.json" % config_name)
args.add_all(["--config-file", config.path])
outputs += [map_output, config]
args.add(input.path)
args.add_all(["--output", output.path])
# Source mapping options are comma-packed into one argv
# see https://github.com/terser-js/terser#command-line-usage
source_map_opts = ["includeSources", "base=" + ctx.bin_dir.path]
if in_source_map:
source_map_opts.append("content=" + in_source_map.path)
inputs.append(in_source_map)
# This option doesn't work in the config file, only on the CLI
args.add_all(["--source-map", ",".join(source_map_opts)])
if comments:
args.add("--comments")
if debug:
args.add("--debug")
args.add("--beautify")
ctx.actions.run(
progress_message = "Optimizing JavaScript %s [terser]" % output.short_path,
executable = ctx.executable._terser_wrapped,
inputs = inputs,
outputs = outputs,
arguments = [args],
)
def run_sourcemapexplorer(ctx, js, map, output):
"""Runs source-map-explorer to produce an HTML visualization of the sourcemap.
Args:
ctx: bazel rule execution context
js: Javascript bundle
map: sourcemap from the bundle back to original sources
output: file where the HTML report is written
"""
# We must run in a shell in order to redirect stdout.
# TODO(alexeagle): file a feature request on ctx.actions.run so that stdout
# could be natively redirected to produce the output file
ctx.actions.run_shell(
inputs = [js, map],
tools = [ctx.executable._source_map_explorer],
outputs = [output],
command = "$1 --html $2 $3 > $4",
arguments = [
ctx.executable._source_map_explorer.path,
js.path,
map.path,
output.path,
],
)
def _generate_toplevel_entry(ctx, bundles_folder, output):
"""Generates a native ESmodule that imports the entry point
"""
main_entry_point_basename = _entry_point_path(ctx).split("/")[-1] + ".js"
ctx.actions.write(output, """import('./%s/%s');""" % (bundles_folder, main_entry_point_basename))
def _generate_code_split_entry(ctx, bundles_folder, output):
"""Generates a SystemJS boilerplate/entry point file.
See doc for additional_entry_points for more information
on purpose and usage of this generated file.
The SystemJS packages map outputted to the file is generated
from the entry_point and additional_entry_point attributes and
is targetted as a specific bundle variant specified by `folder`.
For example, a rollup_bundle in may be configured like so:
```
rollup_bundle(
name = "bundle",
additional_entry_points = [
"src/hello-world/hello-world.module.ngfactory",
"src/todos/todos.module.ngfactory",
],
entry_point = "src/main.prod",
deps = ["//src"],
)
```
In this case, the main_entry_point_dirname will evaluate to
`src/` and this will be stripped from the entry points for
the map. If folder is `bundle_chunks`, the generated SystemJS
boilerplate/entry point file will look like:
```
(function(global) {
System.config({
packages: {
'': {map: {
"./main.prod": "bundle_chunks/main.prod",
"./hello-world/hello-world.module.ngfactory": "bundle_chunks/hello-world.module.ngfactory",
"./todos/todos.module.ngfactory": "bundle_chunks/todos.module.ngfactory"},
defaultExtension: 'js'},
}
});
System.import('main.prod').catch(function(err) {
console.error(err);
});
})(this);
```
Args:
ctx: bazel rule execution context
bundles_folder: the folder name with the bundled chunks to map to
output: the file to generate
"""
entry_point_path = _entry_point_path(ctx)
main_entry_point_basename = entry_point_path.split("/")[-1] + ".js"
main_entry_point_dirname = "/".join(entry_point_path.split("/")[:-1]) + "/"
entry_points = {}
for e in [entry_point_path] + ctx.attr.additional_entry_points:
entry_point = e[len(main_entry_point_dirname):]
entry_points["./" + entry_point] = bundles_folder + "/" + entry_point.split("/")[-1]
ctx.actions.expand_template(
output = output,
template = ctx.file._system_config_tmpl,
substitutions = {
"TMPL_entry_points": str(entry_points),
"TMPL_main_entry_point": main_entry_point_basename,
},
)
def _rollup_bundle(ctx):
if len(ctx.attr.entry_point.files.to_list()) != 1:
fail("labels in entry_point must contain exactly one file")
if ctx.attr.additional_entry_points:
# Generate code split bundles if additional entry points have been specified.
# See doc for additional_entry_points for more information.
# Note: "_chunks" is needed on the output folders since ctx.label.name + ".es2015" is already
# a folder that contains the re-rooted es2015 sources
rollup_config = write_rollup_config(ctx, output_format = "es", additional_entry_points = ctx.attr.additional_entry_points)
code_split_es2015_output_dir = ctx.actions.declare_directory(ctx.label.name + "_chunks_es2015")
_run_rollup(ctx, _collect_es2015_sources(ctx), rollup_config, code_split_es2015_output_dir)
code_split_es2015_min_output_dir = ctx.actions.declare_directory(ctx.label.name + "_chunks_min_es2015")
_run_terser(ctx, code_split_es2015_output_dir, code_split_es2015_min_output_dir, None)
code_split_es2015_min_debug_output_dir = ctx.actions.declare_directory(ctx.label.name + "_chunks_min_debug_es2015")
_run_terser(ctx, code_split_es2015_output_dir, code_split_es2015_min_debug_output_dir, None, debug = True)
code_split_es5_output_dir = ctx.actions.declare_directory(ctx.label.name + "_chunks")
_run_tsc_on_directory(ctx, code_split_es2015_output_dir, code_split_es5_output_dir)
code_split_es5_min_output_dir = ctx.actions.declare_directory(ctx.label.name + "_chunks_min")
_run_terser(ctx, code_split_es5_output_dir, code_split_es5_min_output_dir, None)
code_split_es5_min_debug_output_dir = ctx.actions.declare_directory(ctx.label.name + "_chunks_min_debug")
_run_terser(ctx, code_split_es5_output_dir, code_split_es5_min_debug_output_dir, None, debug = True)
# Generate the SystemJS boilerplate/entry point files
_generate_toplevel_entry(ctx, ctx.label.name + "_chunks_es2015", ctx.outputs.build_es2015)
_generate_toplevel_entry(ctx, ctx.label.name + "_chunks_min_es2015", ctx.outputs.build_es2015_min)
_generate_toplevel_entry(ctx, ctx.label.name + "_chunks_min_debug_es2015", ctx.outputs.build_es2015_min_debug)
_generate_code_split_entry(ctx, ctx.label.name + "_chunks", ctx.outputs.build_es5)
_generate_code_split_entry(ctx, ctx.label.name + "_chunks_min", ctx.outputs.build_es5_min)
_generate_code_split_entry(ctx, ctx.label.name + "_chunks_min_debug", ctx.outputs.build_es5_min_debug)
# There is no UMD/CJS bundle when code-splitting but we still need to satisfy the output
_generate_code_split_entry(ctx, ctx.label.name + "_chunks", ctx.outputs.build_umd)
_generate_code_split_entry(ctx, ctx.label.name + "_chunks", ctx.outputs.build_umd_min)
_generate_code_split_entry(ctx, ctx.label.name + "_chunks", ctx.outputs.build_cjs)
_generate_code_split_entry(ctx, ctx.label.name + "_chunks", ctx.outputs.build_es5_umd)
_generate_code_split_entry(ctx, ctx.label.name + "_chunks", ctx.outputs.build_es5_umd_min)
# There is no source map explorer output when code-splitting but we still need to satisfy the output
ctx.actions.expand_template(
output = ctx.outputs.explore_html,
template = ctx.file._no_explore_html,
substitutions = {},
)
files = [
ctx.outputs.build_es2015,
ctx.outputs.build_es2015_min,
ctx.outputs.build_es2015_min_debug,
ctx.outputs.build_es5,
ctx.outputs.build_es5_min,
ctx.outputs.build_es5_min_debug,
code_split_es2015_output_dir,
code_split_es2015_min_output_dir,
code_split_es2015_min_debug_output_dir,
code_split_es5_output_dir,
code_split_es5_min_output_dir,
code_split_es5_min_debug_output_dir,
]
output_group = OutputGroupInfo(
es2015 = depset([ctx.outputs.build_es2015, code_split_es2015_output_dir]),
es2015_min = depset([ctx.outputs.build_es2015_min, code_split_es2015_min_output_dir]),
es2015_min_debug = depset([ctx.outputs.build_es2015_min_debug, code_split_es2015_min_debug_output_dir]),
es5 = depset([ctx.outputs.build_es5, code_split_es5_output_dir]),
es5_min = depset([ctx.outputs.build_es5_min, code_split_es5_min_output_dir]),
es5_min_debug = depset([ctx.outputs.build_es5_min_debug, code_split_es5_min_debug_output_dir]),
)
else:
# Generate the bundles
rollup_config = write_rollup_config(ctx)
es2015_map = run_rollup(ctx, _collect_es2015_sources(ctx), rollup_config, ctx.outputs.build_es2015)
es2015_min_map = run_terser(ctx, ctx.outputs.build_es2015, ctx.outputs.build_es2015_min, config_name = ctx.label.name + "es2015_min", in_source_map = es2015_map)
es2015_min_debug_map = run_terser(ctx, ctx.outputs.build_es2015, ctx.outputs.build_es2015_min_debug, debug = True, config_name = ctx.label.name + "es2015_min_debug", in_source_map = es2015_map)
_run_tsc(ctx, ctx.outputs.build_es2015, ctx.outputs.build_es5)
es5_min_map = run_terser(ctx, ctx.outputs.build_es5, ctx.outputs.build_es5_min)
es5_min_debug_map = run_terser(ctx, ctx.outputs.build_es5, ctx.outputs.build_es5_min_debug, debug = True)
cjs_rollup_config = write_rollup_config(ctx, filename = "_%s_cjs.rollup.conf.js", output_format = "cjs")
cjs_map = run_rollup(ctx, _collect_es2015_sources(ctx), cjs_rollup_config, ctx.outputs.build_cjs)
umd_rollup_config = write_rollup_config(ctx, filename = "_%s_umd.rollup.conf.js", output_format = "umd")
umd_map = run_rollup(ctx, _collect_es2015_sources(ctx), umd_rollup_config, ctx.outputs.build_umd)
umd_min_map = run_terser(ctx, ctx.outputs.build_umd, ctx.outputs.build_umd_min, config_name = ctx.label.name + "umd_min", in_source_map = umd_map)
_run_tsc(ctx, ctx.outputs.build_umd, ctx.outputs.build_es5_umd)
es5_umd_min_map = run_terser(ctx, ctx.outputs.build_es5_umd, ctx.outputs.build_es5_umd_min, config_name = ctx.label.name + "es5umd_min")
run_sourcemapexplorer(ctx, ctx.outputs.build_es5_min, es5_min_map, ctx.outputs.explore_html)
files = [ctx.outputs.build_es5_min, es5_min_map]
output_group = OutputGroupInfo(
cjs = depset([ctx.outputs.build_cjs, cjs_map]),
es2015 = depset([ctx.outputs.build_es2015, es2015_map]),
es2015_min = depset([ctx.outputs.build_es2015_min, es2015_min_map]),
es2015_min_debug = depset([ctx.outputs.build_es2015_min_debug, es2015_min_debug_map]),
es5 = depset([ctx.outputs.build_es5]),
es5_min = depset([ctx.outputs.build_es5_min, es5_min_map]),
es5_min_debug = depset([ctx.outputs.build_es5_min_debug, es5_min_debug_map]),
es5_umd = depset([ctx.outputs.build_es5_umd]),
es5_umd_min = depset([ctx.outputs.build_es5_umd_min, es5_umd_min_map]),
umd = depset([ctx.outputs.build_umd, umd_map]),
umd_min = depset([ctx.outputs.build_umd_min, umd_min_map]),
)
return [
DefaultInfo(
files = depset(files),
# NB: we don't include any runfiles here since they would always be built
# regardless if they are requested or not
),
output_group,
]
# Expose our list of aspects so derivative rules can override the deps attribute and
# add their own additional aspects.
# If users are in a different repo and load the aspect themselves, they will create
# different Provider symbols (e.g. NodeModuleInfo) and we won't find them.
# So users must use these symbols that are load'ed in rules_nodejs.
ROLLUP_DEPS_ASPECTS = [rollup_module_mappings_aspect, collect_node_modules_aspect]
ROLLUP_ATTRS = {
"srcs": attr.label_list(
doc = """JavaScript source files from the workspace.
These can use ES2015 syntax and ES Modules (import/export)""",
allow_files = [".js"],
),
"additional_entry_points": attr.string_list(
doc = """Additional entry points of the application for code splitting, passed as the input to rollup.
These should be a path relative to the workspace root.
When additional_entry_points are specified, rollup_bundle
will split the bundle in multiple entry points and chunks.
There will be a main entry point chunk as well as entry point
chunks for each additional_entry_point. The file names
of these entry points will correspond to the file names
specified in entry_point and additional_entry_points.
There will also be one or more common chunks that are shared
between entry points named chunk-<HASH>.js. The number
of common chunks is variable depending on the code being
bundled.
Entry points and chunks will be outputted to folders:
- <label-name>_chunks_es2015 // es2015
- <label-name>_chunks // es5
- <label-name>_chunks_min // es5 minified
- <label-name>_chunks_min_debug // es5 minified debug
The following files will be outputted that contain the
SystemJS boilerplate to map the entry points to their file
names and load the main entry point:
flavors:
- <label-name>.es2015.js // es2015 with EcmaScript modules
- <label-name>.js // es5 syntax with CJS modules
- <label-name>.min.js // es5 minified
- <label-name>.min_debug.js // es5 minified debug
NOTE: additional_entry_points MUST be in the same folder or deeper than
the main entry_point for the SystemJS boilerplate/entry point to
be valid. For example, if the main entry_point is
`src/main` then all additional_entry_points must be under
`src/**` such as `src/bar` or `src/foo/bar`. Alternate
additional_entry_points configurations are valid but the
SystemJS boilerplate/entry point files will not be usable and
it is up to the user in these cases to handle the SystemJS
boilerplate manually.
It is sufficient to load one of these SystemJS boilerplate/entry point
files as a script in your HTML to load your application""",
),
"entry_point": attr.label(
doc = """The starting point of the application, passed as the `--input` flag to rollup.
If the entry JavaScript file belongs to the same package (as the BUILD file),
you can simply reference it by its relative name to the package directory:
```
rollup_bundle(
name = "bundle",
entry_point = ":main.js",
)
```
You can specify the entry point as a typescript file so long as you also include
the ts_library target in deps:
```
ts_library(
name = "main",
srcs = ["main.ts"],
)
rollup_bundle(
name = "bundle",
deps = [":main"]
entry_point = ":main.ts",
)
```
The rule will use the corresponding `.js` output of the ts_library rule as the entry point.
If the entry point target is a rule, it should produce a single JavaScript entry file that will be passed to the nodejs_binary rule.
For example:
```
filegroup(
name = "entry_file",
srcs = ["main.js"],
)
rollup_bundle(
name = "bundle",
entry_point = ":entry_file",
)
```
""",
mandatory = True,
allow_single_file = True,
),
"global_name": attr.string(
doc = """A name given to this package when referenced as a global variable.
This name appears in the bundle module incantation at the beginning of the file,
and governs the global symbol added to the global context (e.g. `window`) as a side-
effect of loading the UMD/IIFE JS bundle.
Rollup doc: "The variable name, representing your iife/umd bundle, by which other scripts on the same page can access it."
This is passed to the `output.name` setting in Rollup.""",
),
"globals": attr.string_dict(
doc = """A dict of symbols that reference external scripts.
The keys are variable names that appear in the program,
and the values are the symbol to reference at runtime in a global context (UMD bundles).
For example, a program referencing @angular/core should use ng.core
as the global reference, so Angular users should include the mapping
`"@angular/core":"ng.core"` in the globals.""",
default = {},
),
"license_banner": attr.label(
doc = """A .txt file passed to the `banner` config option of rollup.
The contents of the file will be copied to the top of the resulting bundles.
Note that you can replace a version placeholder in the license file, by using
the special version `0.0.0-PLACEHOLDER`. See the section on stamping in the README.""",
allow_single_file = [".txt"],
),
"named_exports": attr.string_list_dict(
doc = """A dict of symbols informing rollup of objects exported by
modules that do not conform to commonjs, amd, or umd formats.
""",
default = {},
),
"node_modules": attr.label(
doc = """Dependencies from npm that provide some modules that must be
resolved by rollup.
This attribute is DEPRECATED. As of version 0.13.0 the recommended approach
to npm dependencies is to use fine grained npm dependencies which are setup
with the `yarn_install` or `npm_install` rules. For example, in a rollup_bundle
target that used the `node_modules` attribute,
```
rollup_bundle(
name = "bundle",
...
node_modules = "//:node_modules",
)
```
which specifies all files within the `//:node_modules` filegroup
to be inputs to the `bundle`. Using fine grained npm dependencies,
`bundle` is defined with only the npm dependencies that are
needed:
```
rollup_bundle(
name = "bundle",
...
deps = [
"@npm//foo",
"@npm//bar",
...
],
)
```
In this case, only the `foo` and `bar` npm packages and their
transitive deps are includes as inputs to the `bundle` target
which reduces the time required to setup the runfiles for this
target (see https://github.com/bazelbuild/bazel/issues/5153).
The @npm external repository and the fine grained npm package
targets are setup using the `yarn_install` or `npm_install` rule
in your WORKSPACE file:
yarn_install(
name = "npm",
package_json = "//:package.json",
yarn_lock = "//:yarn.lock",
)
""",
default = Label("//:node_modules_none"),
),
"deps": attr.label_list(
doc = """Other rules that produce JavaScript outputs, such as `ts_library`.""",
aspects = ROLLUP_DEPS_ASPECTS,
),
"_no_explore_html": attr.label(
default = Label("@build_bazel_rules_nodejs//internal/rollup:no_explore.html"),
allow_single_file = True,
),
"_rollup": attr.label(
executable = True,
cfg = "host",
default = Label("@build_bazel_rules_nodejs//internal/rollup:rollup"),
),
"_rollup_config_tmpl": attr.label(
default = Label("@build_bazel_rules_nodejs//internal/rollup:rollup.config.js"),
allow_single_file = True,
),
"_source_map_explorer": attr.label(
executable = True,
cfg = "host",
default = Label("@build_bazel_rules_nodejs//internal/rollup:source-map-explorer"),
),
"_system_config_tmpl": attr.label(
default = Label("@build_bazel_rules_nodejs//internal/rollup:system.config.js"),
allow_single_file = True,
),
"_terser_wrapped": attr.label(
executable = True,
cfg = "host",
default = Label("@build_bazel_rules_nodejs//internal/rollup:terser-wrapped"),
),
"_tsc": attr.label(
executable = True,
cfg = "host",
default = Label("@build_bazel_rules_nodejs//internal/rollup:tsc"),
),
"_tsc_directory": attr.label(
executable = True,
cfg = "host",
default = Label("@build_bazel_rules_nodejs//internal/rollup:tsc-directory"),
),
}
ROLLUP_OUTPUTS = {
"build_cjs": "%{name}.cjs.js",
"build_es2015": "%{name}.es2015.js",
"build_es2015_min": "%{name}.min.es2015.js",
"build_es2015_min_debug": "%{name}.min_debug.es2015.js",
"build_es5": "%{name}.js",
"build_es5_min": "%{name}.min.js",
"build_es5_min_debug": "%{name}.min_debug.js",
"build_es5_umd": "%{name}.es5umd.js",
"build_es5_umd_min": "%{name}.min.es5umd.js",
"build_umd": "%{name}.umd.js",
"build_umd_min": "%{name}.min.umd.js",
"explore_html": "%{name}.explore.html",
}
rollup_bundle = rule(
implementation = _rollup_bundle,
attrs = ROLLUP_ATTRS,
outputs = ROLLUP_OUTPUTS,
)
"""Produces several bundled JavaScript files using Rollup and terser.
Load it with
`load("@build_bazel_rules_nodejs//:defs.bzl", "rollup_bundle")`
It performs this work in several separate processes:
1. Call rollup on the original sources
2. Downlevel the resulting code to es5 syntax for older browsers
3. Minify the bundle with terser, possibly with pretty output for human debugging.
The default output of a `rollup_bundle` rule is the non-debug-minified es5 bundle.
However you can request one of the other outputs with a dot-suffix on the target's name.
For example, if your `rollup_bundle` is named `my_rollup_bundle`, you can use one of these labels:
To request the ES2015 syntax (e.g. `class` keyword) without downleveling or minification, use the `:my_rollup_bundle.es2015.js` label.
To request the ES5 downleveled bundle without minification, use the `:my_rollup_bundle.js` label
To request the debug-minified es5 bundle, use the `:my_rollup_bundle.min_debug.js` label.
To request a UMD-bundle, use the `:my_rollup_bundle.umd.js` label.
To request a CommonJS bundle, use the `:my_rollup_bundle.cjs.js` label.
You can also request an analysis from source-map-explorer by buildng the `:my_rollup_bundle.explore.html` label.
However this is currently broken for `rollup_bundle` ES5 mode because we use tsc for downleveling and
it doesn't compose the resulting sourcemaps with an input sourcemap.
See https://github.com/bazelbuild/rules_nodejs/issues/175
For debugging, note that the `rollup.config.js` and `terser.config.json` files can be found in the bazel-bin folder next to the resulting bundle.
An example usage can be found in https://github.com/bazelbuild/rules_nodejs/tree/master/internal/e2e/rollup
"""
# Adding the above docstring as `doc` attribute
# causes a build error but ONLY on Ubuntu 14.04 on BazelCI.
# ```
# File "internal/npm_package/npm_package.bzl", line 221, in <module>
# outputs = NPM_PACKAGE_OUTPUTS,
# TypeError: rule() got an unexpected keyword argument 'doc'
# ```
# This error does not occur on any other platform on BazelCI including Ubuntu 16.04.
# TOOD(gregmagolan): Figure out why and/or file a bug to Bazel
# See https://github.com/bazelbuild/buildtools/issues/471#issuecomment-485283200
| 43.382181 | 201 | 0.674368 |
08d93d9115e0536bd183b34532c584fad2d0a901 | 2,478 | py | Python | src/robot/model/message.py | bradyarthur/RobotFramework | f45747dfec1095359379ba0088cecd955a83e576 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/robot/model/message.py | bradyarthur/RobotFramework | f45747dfec1095359379ba0088cecd955a83e576 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-01-21T03:06:37.000Z | 2021-01-21T03:06:37.000Z | src/robot/model/message.py | bradyarthur/RobotFramework | f45747dfec1095359379ba0088cecd955a83e576 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import html_escape, py3to2
from .body import BodyItem
from .itemlist import ItemList
@py3to2
class Message(BodyItem):
"""A message created during the test execution.
Can be a log message triggered by a keyword, or a warning or an error
that occurred during parsing or test execution.
"""
type = BodyItem.MESSAGE_TYPE
repr_args = ('message', 'level')
__slots__ = ['message', 'level', 'html', 'timestamp']
def __init__(self, message='', level='INFO', html=False, timestamp=None, parent=None):
#: The message content as a string.
self.message = message
#: Severity of the message. Either ``TRACE``, ``DEBUG``, ``INFO``,
#: ``WARN``, ``ERROR``, ``FAIL`` or ``SKIP`. The last two are only used
#: with keyword failure messages.
self.level = level
#: ``True`` if the content is in HTML, ``False`` otherwise.
self.html = html
#: Timestamp in format ``%Y%m%d %H:%M:%S.%f``.
self.timestamp = timestamp
#: The object this message was triggered by.
self.parent = parent
@property
def html_message(self):
"""Returns the message content as HTML."""
return self.message if self.html else html_escape(self.message)
@property
def id(self):
if not self.parent:
return 'm1'
return '%s-m%d' % (self.parent.id, self.parent.messages.index(self) + 1)
def visit(self, visitor):
""":mod:`Visitor interface <robot.model.visitor>` entry-point."""
visitor.visit_message(self)
def __str__(self):
return self.message
class Messages(ItemList):
__slots__ = []
def __init__(self, message_class=Message, parent=None, messages=None):
ItemList.__init__(self, message_class, {'parent': parent}, messages)
| 34.901408 | 90 | 0.66021 |
6e56812ff4d0924178dc5c8e2476bf37e6411018 | 20,136 | py | Python | tests.py | wannaphong/TextRecognitionDataGenerator | bb23f065c1cafef8a58851a2d196417cdca19b49 | [
"MIT"
] | 8 | 2019-06-01T14:59:12.000Z | 2021-06-14T04:27:45.000Z | tests.py | wannaphong/TextRecognitionDataGenerator | bb23f065c1cafef8a58851a2d196417cdca19b49 | [
"MIT"
] | 10 | 2020-01-28T22:45:39.000Z | 2022-02-10T00:22:28.000Z | tests.py | wannaphong/TextRecognitionDataGenerator | bb23f065c1cafef8a58851a2d196417cdca19b49 | [
"MIT"
] | 4 | 2019-07-25T09:40:52.000Z | 2020-03-18T14:17:23.000Z | import os
import sys
import unittest
import subprocess
import hashlib
import string
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), './TextRecognitionDataGenerator')))
try:
os.mkdir('tests/out')
except:
pass
from TextRecognitionDataGenerator.data_generator import FakeTextDataGenerator
from TextRecognitionDataGenerator import background_generator
from TextRecognitionDataGenerator.string_generator import (
create_strings_from_file,
create_strings_from_dict,
create_strings_from_wikipedia,
create_strings_randomly
)
def md5(filename):
hash_md5 = hashlib.md5()
with open(filename, "rb") as f:
hash_md5.update(f.read())
h = hash_md5.hexdigest()
return h
def empty_directory(path):
for f in os.listdir(path):
os.remove(os.path.join(path, f))
class DataGenerator(unittest.TestCase):
def test_create_string_from_wikipedia(self):
"""
Test that the function returns different output if called twice.
(And that it doesn't throw of course)
"""
strings = create_strings_from_wikipedia(20, 2, 'en')
self.assertTrue(
len(strings) == 2 and
strings[0] != strings[1] and
len(strings[0].split(' ')) >= 20 and
len(strings[1].split(' ')) >= 20
)
def test_create_string_from_file(self):
strings = create_strings_from_file('tests/test.txt', 6)
self.assertTrue(
len(strings) == 6 and
strings[0] != strings[1] and
strings[0] == strings[3]
)
def test_create_strings_from_dict(self):
strings = create_strings_from_dict(3, False, 2, ['TEST\n', 'TEST\n', 'TEST\n', 'TEST\n'])
self.assertTrue(
len(strings) == 2 and
len(strings[0].split(' ')) == 3
)
def test_generate_data_with_format(self):
FakeTextDataGenerator.generate(
0,
'TEST TEST TEST',
'tests/font.ttf',
'tests/out/',
64,
'jpg',
0,
False,
0,
False,
1,
0,
0,
False,
0,
-1,
0,
'#010101',
0,
1,
(5,5,5,5),
0
)
self.assertTrue(
md5('tests/out/TEST TEST TEST_0.jpg') == md5('tests/expected_results/TEST TEST TEST_0.jpg')
)
os.remove('tests/out/TEST TEST TEST_0.jpg')
def test_generate_data_with_extension(self):
FakeTextDataGenerator.generate(
1,
'TEST TEST TEST',
'tests/font.ttf',
'tests/out/',
32,
'png',
0,
False,
0,
False,
1,
0,
0,
False,
0,
-1,
0,
'#010101',
0,
1,
(5,5,5,5),
0
)
self.assertTrue(
md5('tests/out/TEST TEST TEST_1.png') == md5('tests/expected_results/TEST TEST TEST_1.png')
)
os.remove('tests/out/TEST TEST TEST_1.png')
def test_generate_data_with_skew_angle(self):
FakeTextDataGenerator.generate(
2,
'TEST TEST TEST',
'tests/font.ttf',
'tests/out/',
64,
'jpg',
15,
False,
0,
False,
1,
0,
0,
False,
0,
-1,
0,
'#010101',
0,
1,
(5,5,5,5),
0
)
self.assertTrue(
md5('tests/out/TEST TEST TEST_2.jpg') == md5('tests/expected_results/TEST TEST TEST_2.jpg')
)
os.remove('tests/out/TEST TEST TEST_2.jpg')
def test_generate_data_with_blur(self):
FakeTextDataGenerator.generate(
3,
'TEST TEST TEST',
'tests/font.ttf',
'tests/out/',
64,
'jpg',
0,
False,
3,
False,
1,
0,
0,
False,
0,
-1,
0,
'#010101',
0,
1,
(5,5,5,5),
0
)
self.assertTrue(
md5('tests/out/TEST TEST TEST_3.jpg') == md5('tests/expected_results/TEST TEST TEST_3.jpg')
)
os.remove('tests/out/TEST TEST TEST_3.jpg')
def test_generate_data_with_sine_distorsion(self):
FakeTextDataGenerator.generate(
4,
'TEST TEST TEST',
'tests/font.ttf',
'tests/out/',
64,
'jpg',
0,
False,
3,
False,
1,
1,
2,
False,
0,
-1,
0,
'#010101',
0,
1,
(5,5,5,5),
0
)
self.assertTrue(
md5('tests/out/TEST TEST TEST_4.jpg') == md5('tests/expected_results/TEST TEST TEST_4.jpg')
)
os.remove('tests/out/TEST TEST TEST_4.jpg')
def test_generate_data_with_cosine_distorsion(self):
FakeTextDataGenerator.generate(
5,
'TEST TEST TEST',
'tests/font.ttf',
'tests/out/',
64,
'jpg',
0,
False,
3,
False,
1,
2,
2,
False,
0,
-1,
0,
'#010101',
0,
1,
(5,5,5,5),
0
)
self.assertTrue(
md5('tests/out/TEST TEST TEST_5.jpg') == md5('tests/expected_results/TEST TEST TEST_5.jpg')
)
os.remove('tests/out/TEST TEST TEST_5.jpg')
def test_generate_data_with_left_alignment(self):
FakeTextDataGenerator.generate(
6,
'TEST TEST TEST',
'tests/font.ttf',
'tests/out/',
64,
'jpg',
0,
False,
0,
False,
1,
0,
0,
False,
0,
600,
0,
'#010101',
0,
1,
(5,5,5,5),
0
)
self.assertTrue(
md5('tests/out/TEST TEST TEST_6.jpg') == md5('tests/expected_results/TEST TEST TEST_6.jpg')
)
os.remove('tests/out/TEST TEST TEST_6.jpg')
def test_generate_data_with_center_alignment(self):
FakeTextDataGenerator.generate(
7,
'TEST TEST TEST',
'tests/font.ttf',
'tests/out/',
64,
'jpg',
0,
False,
0,
False,
1,
0,
0,
False,
0,
800,
1,
'#010101',
0,
1,
(5,5,5,5),
0
)
self.assertTrue(
md5('tests/out/TEST TEST TEST_7.jpg') == md5('tests/expected_results/TEST TEST TEST_7.jpg')
)
os.remove('tests/out/TEST TEST TEST_7.jpg')
def test_generate_data_with_right_alignment(self):
FakeTextDataGenerator.generate(
8,
'TEST TEST TEST',
'tests/font.ttf',
'tests/out/',
64,
'jpg',
0,
False,
0,
False,
1,
0,
0,
False,
0,
1000,
2,
'#010101',
0,
1,
(5,5,5,5),
0
)
self.assertTrue(
md5('tests/out/TEST TEST TEST_8.jpg') == md5('tests/expected_results/TEST TEST TEST_8.jpg')
)
os.remove('tests/out/TEST TEST TEST_8.jpg')
def test_raise_if_handwritten_and_vertical(self):
try:
FakeTextDataGenerator.generate(
9,
'TEST TEST TEST',
'tests/font.ttf',
'tests/out/',
64,
'jpg',
0,
False,
0,
False,
1,
0,
0,
True,
0,
1000,
2,
'#010101',
1,
1,
(5,5,5,5),
0
)
raise Exception("Vertical handwritten did not throw")
except ValueError:
pass
def test_generate_vertical_text(self):
FakeTextDataGenerator.generate(
10,
'TEST TEST TEST',
'tests/font.ttf',
'tests/out/',
32,
'jpg',
0,
False,
0,
False,
1,
0,
0,
False,
0,
-1,
0,
'#010101',
1,
1,
(5,5,5,5),
0
)
self.assertTrue(
md5('tests/out/TEST TEST TEST_10.jpg') == md5('tests/expected_results/TEST TEST TEST_10.jpg')
)
os.remove('tests/out/TEST TEST TEST_10.jpg')
def test_generate_horizontal_text_with_variable_space(self):
FakeTextDataGenerator.generate(
11,
'TEST TEST TEST',
'tests/font.ttf',
'tests/out/',
32,
'jpg',
0,
False,
0,
False,
1,
0,
0,
False,
0,
-1,
0,
'#010101',
0,
4,
(5,5,5,5),
0
)
self.assertTrue(
md5('tests/out/TEST TEST TEST_11.jpg') == md5('tests/expected_results/TEST TEST TEST_11.jpg')
)
os.remove('tests/out/TEST TEST TEST_11.jpg')
def test_generate_vertical_text_with_variable_space(self):
FakeTextDataGenerator.generate(
12,
'TEST TEST TEST',
'tests/font.ttf',
'tests/out/',
32,
'jpg',
0,
False,
0,
False,
1,
0,
0,
False,
0,
-1,
0,
'#010101',
1,
2,
(5,5,5,5),
0
)
self.assertTrue(
md5('tests/out/TEST TEST TEST_12.jpg') == md5('tests/expected_results/TEST TEST TEST_12.jpg')
)
os.remove('tests/out/TEST TEST TEST_12.jpg')
def test_generate_text_with_unknown_orientation(self):
try:
FakeTextDataGenerator.generate(
12,
'TEST TEST TEST',
'tests/font.ttf',
'tests/out/',
32,
'jpg',
0,
False,
0,
False,
1,
0,
0,
False,
0,
-1,
0,
'#010101',
100,
2,
(5,5,5,5),
0
)
raise Exception("Unknown orientation did not throw")
except ValueError:
pass
def test_generate_data_with_fit(self):
FakeTextDataGenerator.generate(
13,
'TEST TEST TEST',
'tests/font.ttf',
'tests/out/',
64,
'jpg',
0,
False,
0,
False,
1,
0,
0,
False,
0,
-1,
0,
'#010101',
0,
1,
(0,0,0,0),
1
)
self.assertTrue(
md5('tests/out/TEST TEST TEST_13.jpg') == md5('tests/expected_results/TEST TEST TEST_13.jpg')
)
os.remove('tests/out/TEST TEST TEST_13.jpg')
def test_generate_string_with_letters(self):
s = create_strings_randomly(1, False, 1, True, False, False, 'en')[0]
self.assertTrue(
all([l in string.ascii_letters for l in s])
)
def test_generate_string_with_numbers(self):
s = create_strings_randomly(1, False, 1, False, True, False, 'en')[0]
self.assertTrue(
all([l in '0123456789' for l in s])
)
def test_generate_string_with_symbols(self):
s = create_strings_randomly(1, False, 1, False, False, True, 'en')[0]
self.assertTrue(
all([l in '!"#$%&\'()*+,-./:;?@[\\]^_`{|}~' for l in s])
)
def test_generate_chinese_string(self):
s = create_strings_randomly(1, False, 1, True, False, False, 'cn')[0]
cn_chars = [chr(i) for i in range(19968, 40908)]
self.assertTrue(
all([l in cn_chars for l in s])
)
def test_generate_data_with_white_background(self):
background_generator.plain_white(64, 128).convert('RGB').save('tests/out/white_background.jpg')
self.assertTrue(
md5('tests/out/white_background.jpg') == md5('tests/expected_results/white_background.jpg')
)
os.remove('tests/out/white_background.jpg')
def test_generate_data_with_gaussian_background(self):
background_generator.gaussian_noise(64, 128).convert('RGB').save('tests/out/gaussian_background.jpg')
self.assertTrue(
md5('tests/out/gaussian_background.jpg') == md5('tests/expected_results/gaussian_background.jpg')
)
os.remove('tests/out/gaussian_background.jpg')
def test_generate_data_with_quasicrystal_background(self):
bkgd = background_generator.quasicrystal(64, 128)
self.assertTrue(
len(bkgd.histogram()) > 20 and bkgd.size == (128, 64)
)
class CommandLineInterface(unittest.TestCase):
def test_output_dir(self):
args = ['python3', 'run.py', '-c', '1', '--output_dir', '../tests/out_2/']
subprocess.Popen(args, cwd="TextRecognitionDataGenerator/").wait()
self.assertTrue(len(os.listdir('tests/out_2/')) == 1)
empty_directory('tests/out_2/')
def test_language_english(self):
args = ['python3', 'run.py', '-l', 'en', '-c', '1', '--output_dir', '../tests/out/']
subprocess.Popen(args, cwd="TextRecognitionDataGenerator/").wait()
self.assertTrue(len(os.listdir('tests/out/')) == 1)
empty_directory('tests/out/')
def test_language_french(self):
args = ['python3', 'run.py', '-l', 'fr', '-c', '1', '--output_dir', '../tests/out/']
subprocess.Popen(args, cwd="TextRecognitionDataGenerator/").wait()
self.assertTrue(len(os.listdir('tests/out/')) == 1)
empty_directory('tests/out/')
def test_language_spanish(self):
args = ['python3', 'run.py', '-l', 'es', '-c', '1', '--output_dir', '../tests/out/']
subprocess.Popen(args, cwd="TextRecognitionDataGenerator/").wait()
self.assertTrue(len(os.listdir('tests/out/')) == 1)
empty_directory('tests/out/')
def test_language_german(self):
args = ['python3', 'run.py', '-l', 'de', '-c', '1', '--output_dir', '../tests/out/']
subprocess.Popen(args, cwd="TextRecognitionDataGenerator/").wait()
self.assertTrue(len(os.listdir('tests/out/')) == 1)
empty_directory('tests/out/')
def test_language_chinese(self):
args = ['python3', 'run.py', '-l', 'cn', '-c', '1', '--output_dir', '../tests/out/']
subprocess.Popen(args, cwd="TextRecognitionDataGenerator/").wait()
self.assertTrue(len(os.listdir('tests/out/')) == 1)
empty_directory('tests/out/')
def test_count_parameter(self):
args = ['python3', 'run.py', '-c', '10', '--output_dir', '../tests/out/']
subprocess.Popen(args, cwd="TextRecognitionDataGenerator/").wait()
self.assertTrue(len(os.listdir('tests/out/')) == 10)
empty_directory('tests/out/')
def test_random_sequences_letter_only(self):
args = ['python3', 'run.py', '-rs', '-let', '-c', '1', '--output_dir', '../tests/out/']
subprocess.Popen(args, cwd="TextRecognitionDataGenerator/").wait()
self.assertTrue(all([c in string.ascii_letters for f in os.listdir('tests/out/') for c in f.split('_')[0]]))
empty_directory('tests/out/')
def test_random_sequences_number_only(self):
args = ['python3', 'run.py', '-rs', '-num', '-c', '1', '--output_dir', '../tests/out/']
subprocess.Popen(args, cwd="TextRecognitionDataGenerator/").wait()
self.assertTrue(all([c in '0123456789' for f in os.listdir('tests/out/') for c in f.split('_')[0]]))
empty_directory('tests/out/')
def test_random_sequences_symbols_only(self):
args = ['python3', 'run.py', '-rs', '-sym', '-c', '1', '--output_dir', '../tests/out/']
subprocess.Popen(args, cwd="TextRecognitionDataGenerator/").wait()
with open('tests/out/labels.txt', 'r') as f:
self.assertTrue(all([c in "!\"#$%&'()*+,-./:;?@[\\]^_`{|}~" for c in f.readline().split(' ')[1][:-1]]))
empty_directory('tests/out/')
def test_handwritten(self):
args = ['python3', 'run.py', '-c', '1', '--output_dir', '../tests/out/']
subprocess.Popen(args, cwd="TextRecognitionDataGenerator/").wait()
self.assertTrue(len(os.listdir('tests/out/')) == 1)
empty_directory('tests/out/')
def test_personalfont(self):
args = ['python3', 'run.py', '--font', 'fonts/latin/Aller_Bd.ttf' , '-c', '1', '--output_dir', '../tests/out/']
subprocess.Popen(args, cwd="TextRecognitionDataGenerator/").wait()
self.assertTrue(len(os.listdir('tests/out/')) == 1)
empty_directory('tests/out/')
def test_personalfont_unlocated(self):
args = ['python3', 'run.py', '--font', 'fonts/latin/unlocatedFont.ttf' , '-c', '1', '--output_dir', '../tests/out/']
subprocess.Popen(args, cwd="TextRecognitionDataGenerator/").wait()
self.assertTrue(len(os.listdir('tests/out/')) == 0)
empty_directory('tests/out/')
# def test_word_count(self):
# args = ['python3', 'run.py', '-c', '1', '-w', '5']
# subprocess.Popen(args, cwd="TextRecognitionDataGenerator/").wait()
# self.assertTrue(False)
# empty_directory('tests/out/')
#
# def test_extension_jpg(self):
# args = ['python3', 'run.py', '-c', '1', '-e', 'jpg']
# subprocess.Popen(args, cwd="TextRecognitionDataGenerator/").wait()
# self.assertTrue(False)
# empty_directory('tests/out/')
#
# def test_extension_png(self):
# args = ['python3', 'run.py', '-c', '1', '-e', 'png']
# subprocess.Popen(args, cwd="TextRecognitionDataGenerator/").wait()
# self.assertTrue(False)
# empty_directory('tests/out/')
#
# def test_name_format_0(self):
# args = ['python3', 'run.py', '-c', '1', '-na', '0']
# subprocess.Popen(args, cwd="TextRecognitionDataGenerator/").wait()
# self.assertTrue(False)
# empty_directory('tests/out/')
#
# def test_name_format_1(self):
# args = ['python3', 'run.py', '-c', '1', '-na', '1']
# subprocess.Popen(args, cwd="TextRecognitionDataGenerator/").wait()
# self.assertTrue(False)
# empty_directory('tests/out/')
#
# def test_name_format_2(self):
# args = ['python3', 'run.py', '-c', '1', '-na', '2']
# subprocess.Popen(args, cwd="TextRecognitionDataGenerator/").wait()
# self.assertTrue(False)
# empty_directory('tests/out/')
if __name__=='__main__':
unittest.main()
| 28.083682 | 124 | 0.480979 |
0fb85f11d1b565eb1d9774c265013ce60ad26d59 | 19,230 | py | Python | tests/scenario_tests_async/test_events_shared_channels.py | korymath/bolt-python | 67e0286d756ba92510315d044303f43b03380b52 | [
"MIT"
] | 1 | 2021-05-02T16:06:44.000Z | 2021-05-02T16:06:44.000Z | tests/scenario_tests_async/test_events_shared_channels.py | korymath/bolt-python | 67e0286d756ba92510315d044303f43b03380b52 | [
"MIT"
] | 1 | 2021-02-23T21:05:57.000Z | 2021-02-23T21:05:57.000Z | tests/scenario_tests_async/test_events_shared_channels.py | korymath/bolt-python | 67e0286d756ba92510315d044303f43b03380b52 | [
"MIT"
] | null | null | null | import asyncio
import json
from random import random
from time import time
import pytest
from slack_sdk.signature import SignatureVerifier
from slack_sdk.web.async_client import AsyncWebClient
from slack_bolt.app.async_app import AsyncApp
from slack_bolt.authorization import AuthorizeResult
from slack_bolt.context.say.async_say import AsyncSay
from slack_bolt.request.async_request import AsyncBoltRequest
from tests.mock_web_api_server import (
setup_mock_web_api_server,
cleanup_mock_web_api_server,
)
from tests.utils import remove_os_env_temporarily, restore_os_env
valid_token = "xoxb-valid"
async def authorize(enterprise_id, team_id, client: AsyncWebClient):
assert enterprise_id == "E_INSTALLED"
assert team_id == "T_INSTALLED"
auth_test = await client.auth_test(token=valid_token)
return AuthorizeResult.from_auth_test_response(
auth_test_response=auth_test, bot_token=valid_token,
)
class TestAsyncEventsSharedChannels:
signing_secret = "secret"
valid_token = "xoxb-valid"
mock_api_server_base_url = "http://localhost:8888"
signature_verifier = SignatureVerifier(signing_secret)
web_client = AsyncWebClient(token=None, base_url=mock_api_server_base_url)
@pytest.fixture
def event_loop(self):
old_os_env = remove_os_env_temporarily()
try:
setup_mock_web_api_server(self)
loop = asyncio.get_event_loop()
yield loop
loop.close()
cleanup_mock_web_api_server(self)
finally:
restore_os_env(old_os_env)
def generate_signature(self, body: str, timestamp: str):
return self.signature_verifier.generate_signature(
body=body, timestamp=timestamp,
)
def build_headers(self, timestamp: str, body: str):
return {
"content-type": ["application/json"],
"x-slack-signature": [self.generate_signature(body, timestamp)],
"x-slack-request-timestamp": [timestamp],
}
def build_valid_app_mention_request(self) -> AsyncBoltRequest:
timestamp, body = str(int(time())), json.dumps(app_mention_body)
return AsyncBoltRequest(body=body, headers=self.build_headers(timestamp, body))
@pytest.mark.asyncio
async def test_mock_server_is_running(self):
resp = await self.web_client.api_test(token=valid_token)
assert resp != None
@pytest.mark.asyncio
async def test_app_mention(self):
app = AsyncApp(
client=self.web_client,
signing_secret=self.signing_secret,
authorize=authorize,
)
app.event("app_mention")(whats_up)
request = self.build_valid_app_mention_request()
response = await app.async_dispatch(request)
assert response.status == 200
assert self.mock_received_requests["/auth.test"] == 1
await asyncio.sleep(1) # wait a bit after auto ack()
assert self.mock_received_requests["/chat.postMessage"] == 1
@pytest.mark.asyncio
async def test_process_before_response(self):
app = AsyncApp(
client=self.web_client,
signing_secret=self.signing_secret,
authorize=authorize,
process_before_response=True,
)
app.event("app_mention")(whats_up)
request = self.build_valid_app_mention_request()
response = await app.async_dispatch(request)
assert response.status == 200
assert self.mock_received_requests["/auth.test"] == 1
# no sleep here
assert self.mock_received_requests["/chat.postMessage"] == 1
@pytest.mark.asyncio
async def test_middleware_skip(self):
app = AsyncApp(
client=self.web_client,
signing_secret=self.signing_secret,
authorize=authorize,
)
app.event("app_mention", middleware=[skip_middleware])(whats_up)
request = self.build_valid_app_mention_request()
response = await app.async_dispatch(request)
assert response.status == 404
assert self.mock_received_requests["/auth.test"] == 1
@pytest.mark.asyncio
async def test_simultaneous_requests(self):
app = AsyncApp(
client=self.web_client,
signing_secret=self.signing_secret,
authorize=authorize,
)
app.event("app_mention")(random_sleeper)
request = self.build_valid_app_mention_request()
times = 10
tasks = []
for i in range(times):
tasks.append(asyncio.ensure_future(app.async_dispatch(request)))
await asyncio.sleep(5)
# Verifies all the tasks have been completed with 200 OK
assert sum([t.result().status for t in tasks if t.done()]) == 200 * times
assert self.mock_received_requests["/auth.test"] == times
assert self.mock_received_requests["/chat.postMessage"] == times
def build_valid_reaction_added_request(self) -> AsyncBoltRequest:
timestamp, body = str(int(time())), json.dumps(reaction_added_body)
return AsyncBoltRequest(body=body, headers=self.build_headers(timestamp, body))
@pytest.mark.asyncio
async def test_reaction_added(self):
app = AsyncApp(
client=self.web_client,
signing_secret=self.signing_secret,
authorize=authorize,
)
app.event("reaction_added")(whats_up)
request = self.build_valid_reaction_added_request()
response = await app.async_dispatch(request)
assert response.status == 200
assert self.mock_received_requests["/auth.test"] == 1
await asyncio.sleep(1) # wait a bit after auto ack()
assert self.mock_received_requests["/chat.postMessage"] == 1
@pytest.mark.asyncio
async def test_stable_auto_ack(self):
app = AsyncApp(
client=self.web_client,
signing_secret=self.signing_secret,
authorize=authorize,
)
app.event("reaction_added")(always_failing)
for _ in range(10):
request = self.build_valid_reaction_added_request()
response = await app.async_dispatch(request)
assert response.status == 200
@pytest.mark.asyncio
async def test_self_events(self):
app = AsyncApp(
client=self.web_client,
signing_secret=self.signing_secret,
authorize=authorize,
)
app.event("reaction_added")(whats_up)
self_event = {
"token": "verification_token",
"team_id": "T_SOURCE",
"enterprise_id": "E_SOURCE",
"api_app_id": "A111",
"event": {
"type": "reaction_added",
"user": "W23456789", # bot_user_id
"item": {
"type": "message",
"channel": "C111",
"ts": "1599529504.000400",
},
"reaction": "heart_eyes",
"item_user": "W111",
"event_ts": "1599616881.000800",
},
"type": "event_callback",
"event_id": "Ev111",
"event_time": 1599616881,
"authorizations": [
{
"enterprise_id": "E_INSTALLED",
"team_id": "T_INSTALLED",
"user_id": "W111",
"is_bot": True,
"is_enterprise_install": False,
}
],
}
timestamp, body = str(int(time())), json.dumps(self_event)
request = AsyncBoltRequest(
body=body, headers=self.build_headers(timestamp, body)
)
response = await app.async_dispatch(request)
assert response.status == 200
assert self.mock_received_requests["/auth.test"] == 1
await asyncio.sleep(1) # wait a bit after auto ack()
# The listener should not be executed
assert self.mock_received_requests.get("/chat.postMessage") is None
@pytest.mark.asyncio
async def test_self_joined_left_events(self):
app = AsyncApp(
client=self.web_client,
signing_secret=self.signing_secret,
authorize=authorize,
)
app.event("reaction_added")(whats_up)
join_event_body = {
"token": "verification_token",
"team_id": "T_SOURCE",
"enterprise_id": "E_SOURCE",
"api_app_id": "A111",
"event": {
"type": "member_joined_channel",
"user": "W23456789", # bot_user_id
"channel": "C111",
"channel_type": "C",
"team": "T_INSTALLED",
"inviter": "U222",
},
"type": "event_callback",
"event_id": "Ev111",
"event_time": 1599616881,
"authorizations": [
{
"enterprise_id": "E_INSTALLED",
"team_id": "T_INSTALLED",
"user_id": "W111",
"is_bot": True,
"is_enterprise_install": False,
}
],
}
left_event_body = {
"token": "verification_token",
"team_id": "T_SOURCE",
"enterprise_id": "E_SOURCE",
"api_app_id": "A111",
"event": {
"type": "member_left_channel",
"user": "W23456789", # bot_user_id
"channel": "C111",
"channel_type": "C",
"team": "T_INSTALLED",
},
"type": "event_callback",
"event_id": "Ev111",
"event_time": 1599616881,
"authorizations": [
{
"enterprise_id": "E_INSTALLED",
"team_id": "T_INSTALLED",
"user_id": "W111",
"is_bot": True,
"is_enterprise_install": False,
}
],
}
@app.event("member_joined_channel")
async def handle_member_joined_channel(say):
await say("What's up?")
@app.event("member_left_channel")
async def handle_member_left_channel(say):
await say("What's up?")
timestamp, body = str(int(time())), json.dumps(join_event_body)
request = AsyncBoltRequest(
body=body, headers=self.build_headers(timestamp, body)
)
response = await app.async_dispatch(request)
assert response.status == 200
assert self.mock_received_requests["/auth.test"] == 1
timestamp, body = str(int(time())), json.dumps(left_event_body)
request = AsyncBoltRequest(
body=body, headers=self.build_headers(timestamp, body)
)
response = await app.async_dispatch(request)
assert response.status == 200
await asyncio.sleep(1) # wait a bit after auto ack()
# The listeners should be executed
assert self.mock_received_requests.get("/chat.postMessage") == 2
@pytest.mark.asyncio
async def test_joined_left_events(self):
app = AsyncApp(
client=self.web_client,
signing_secret=self.signing_secret,
authorize=authorize,
)
app.event("reaction_added")(whats_up)
join_event_body = {
"token": "verification_token",
"team_id": "T_SOURCE",
"enterprise_id": "E_SOURCE",
"api_app_id": "A111",
"event": {
"type": "member_joined_channel",
"user": "W111", # other user
"channel": "C111",
"channel_type": "C",
"team": "T_INSTALLED",
"inviter": "U222",
},
"type": "event_callback",
"event_id": "Ev111",
"event_time": 1599616881,
"authorizations": [
{
"enterprise_id": "E_INSTALLED",
"team_id": "T_INSTALLED",
"user_id": "W111",
"is_bot": True,
"is_enterprise_install": False,
}
],
}
left_event_body = {
"token": "verification_token",
"team_id": "T_SOURCE",
"enterprise_id": "E_SOURCE",
"api_app_id": "A111",
"event": {
"type": "member_left_channel",
"user": "W111", # other user
"channel": "C111",
"channel_type": "C",
"team": "T_INSTALLED",
},
"type": "event_callback",
"event_id": "Ev111",
"event_time": 1599616881,
"authorizations": [
{
"enterprise_id": "E_INSTALLED",
"team_id": "T_INSTALLED",
"user_id": "W111",
"is_bot": True,
"is_enterprise_install": False,
}
],
}
@app.event("member_joined_channel")
async def handle_member_joined_channel(say):
await say("What's up?")
@app.event("member_left_channel")
async def handle_member_left_channel(say):
await say("What's up?")
timestamp, body = str(int(time())), json.dumps(join_event_body)
request = AsyncBoltRequest(
body=body, headers=self.build_headers(timestamp, body)
)
response = await app.async_dispatch(request)
assert response.status == 200
assert self.mock_received_requests["/auth.test"] == 1
timestamp, body = str(int(time())), json.dumps(left_event_body)
request = AsyncBoltRequest(
body=body, headers=self.build_headers(timestamp, body)
)
response = await app.async_dispatch(request)
assert response.status == 200
await asyncio.sleep(1) # wait a bit after auto ack()
# The listeners should be executed
assert self.mock_received_requests.get("/chat.postMessage") == 2
@pytest.mark.asyncio
async def test_uninstallation_and_revokes(self):
app = AsyncApp(
client=self.web_client,
signing_secret=self.signing_secret,
authorize=authorize,
)
app._client = AsyncWebClient(
token="uninstalled-revoked", base_url=self.mock_api_server_base_url
)
@app.event("app_uninstalled")
async def handler1(say: AsyncSay):
await say(channel="C111", text="What's up?")
@app.event("tokens_revoked")
async def handler2(say: AsyncSay):
await say(channel="C111", text="What's up?")
app_uninstalled_body = {
"token": "verification_token",
"team_id": "T_SOURCE",
"enterprise_id": "E_SOURCE",
"api_app_id": "A111",
"event": {"type": "app_uninstalled"},
"type": "event_callback",
"event_id": "Ev111",
"event_time": 1599616881,
"authorizations": [
{
"enterprise_id": "E_INSTALLED",
"team_id": "T_INSTALLED",
"user_id": "W111",
"is_bot": True,
"is_enterprise_install": False,
}
],
}
timestamp, body = str(int(time())), json.dumps(app_uninstalled_body)
request: AsyncBoltRequest = AsyncBoltRequest(
body=body, headers=self.build_headers(timestamp, body)
)
response = await app.async_dispatch(request)
assert response.status == 200
tokens_revoked_body = {
"token": "verification_token",
"team_id": "T_SOURCE",
"enterprise_id": "E_SOURCE",
"api_app_id": "A111",
"event": {
"type": "tokens_revoked",
"tokens": {"oauth": ["UXXXXXXXX"], "bot": ["UXXXXXXXX"]},
},
"type": "event_callback",
"event_id": "Ev111",
"event_time": 1599616881,
"authorizations": [
{
"enterprise_id": "E_INSTALLED",
"team_id": "T_INSTALLED",
"user_id": "W111",
"is_bot": True,
"is_enterprise_install": False,
}
],
}
timestamp, body = str(int(time())), json.dumps(tokens_revoked_body)
request: AsyncBoltRequest = AsyncBoltRequest(
body=body, headers=self.build_headers(timestamp, body)
)
response = await app.async_dispatch(request)
assert response.status == 200
# AsyncApp doesn't call auth.test when booting
assert self.mock_received_requests.get("/auth.test") is None
await asyncio.sleep(1) # wait a bit after auto ack()
assert self.mock_received_requests["/chat.postMessage"] == 2
app_mention_body = {
"token": "verification_token",
"team_id": "T_INSTALLED",
"enterprise_id": "E_SOURCE",
"api_app_id": "A111",
"event": {
"client_msg_id": "9cbd4c5b-7ddf-4ede-b479-ad21fca66d63",
"type": "app_mention",
"text": "<@W111> Hi there!",
"user": "W222",
"ts": "1595926230.009600",
"team": "T_INSTALLED",
"channel": "C111",
"event_ts": "1595926230.009600",
},
"type": "event_callback",
"event_id": "Ev111",
"event_time": 1595926230,
"authorizations": [
{
"enterprise_id": "E_INSTALLED",
"team_id": "T_INSTALLED",
"user_id": "W111",
"is_bot": True,
"is_enterprise_install": False,
}
],
}
reaction_added_body = {
"token": "verification_token",
"team_id": "T_SOURCE",
"enterprise_id": "E_SOURCE",
"api_app_id": "A111",
"event": {
"type": "reaction_added",
"user": "W111",
"item": {"type": "message", "channel": "C111", "ts": "1599529504.000400"},
"reaction": "heart_eyes",
"item_user": "W111",
"event_ts": "1599616881.000800",
},
"type": "event_callback",
"event_id": "Ev111",
"event_time": 1599616881,
"authorizations": [
{
"enterprise_id": "E_INSTALLED",
"team_id": "T_INSTALLED",
"user_id": "W111",
"is_bot": True,
"is_enterprise_install": False,
}
],
}
async def random_sleeper(body, say, payload, event):
assert body == app_mention_body
assert body["event"] == payload
assert payload == event
seconds = random() + 2 # 2-3 seconds
await asyncio.sleep(seconds)
await say(f"Sending this message after sleeping for {seconds} seconds")
async def whats_up(body, say, payload, event):
assert body["event"] == payload
assert payload == event
await say("What's up?")
async def skip_middleware(req, resp, next):
# return next()
pass
async def always_failing():
raise Exception("Something wrong!")
| 34.035398 | 87 | 0.561258 |
09032824deadde17dc95c755a9a0d23a22540272 | 623 | py | Python | lib/sedna/__init__.py | wangyuan249/sedna | 304059ef46e87a637eff22a92f1b0894216fa3ea | [
"Apache-2.0"
] | 1 | 2021-01-29T11:12:54.000Z | 2021-01-29T11:12:54.000Z | lib/sedna/__init__.py | kevinshan/sedna | 9411a2cd0ef5e86ed76a910d60685e37d4404b65 | [
"Apache-2.0"
] | null | null | null | lib/sedna/__init__.py | kevinshan/sedna | 9411a2cd0ef5e86ed76a910d60685e37d4404b65 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .__version__ import __version__
| 38.9375 | 74 | 0.76886 |
8f97a64707f7e163039bd21801880aef0f192d11 | 6,387 | py | Python | telecarla_scenario_runner/script/plot_results.py | hofbi/telecarla | 020704a3b7087bc426f5ff97655c7e676c8b01bf | [
"MIT"
] | 26 | 2020-06-09T18:28:07.000Z | 2022-03-19T01:27:40.000Z | telecarla_scenario_runner/script/plot_results.py | hofbi/telecarla | 020704a3b7087bc426f5ff97655c7e676c8b01bf | [
"MIT"
] | 15 | 2020-06-21T21:04:44.000Z | 2022-02-20T17:24:58.000Z | telecarla_scenario_runner/script/plot_results.py | hofbi/telecarla | 020704a3b7087bc426f5ff97655c7e676c8b01bf | [
"MIT"
] | 7 | 2020-06-21T11:55:53.000Z | 2021-12-18T09:16:06.000Z | """
Plot results of one or multiple scenario runner evaluations
"""
import argparse
import os
import re
import statistics
import sys
import xml.etree.cElementTree as ET
import matplotlib.pyplot as plt
import numpy as np
class ScenarioResult:
"""
Collection of multiple results for the same scenario
"""
def __init__(self, name):
self._name = name
self._collisions = []
self._durations = []
def __lt__(self, other):
return self.name < other.name
def add_result(self, collision, duration):
self._collisions.append(collision)
self._durations.append(duration)
@property
def collision_rate(self):
return np.sum(self.collisions) / len(self.collisions)
@property
def mean_duration(self):
return statistics.mean(self.durations)
@property
def std_dev_duration(self):
return statistics.stdev(self.durations)
@property
def collisions(self):
return self._collisions
@property
def durations(self):
return self._durations
@property
def name(self):
return self._name
def main():
"""main"""
parser = argparse.ArgumentParser(
description="Plot the results from the scenario evaluation",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--eval_dir",
type=str,
default=os.path.join(
os.path.dirname(os.path.abspath(sys.argv[0])), "..", "output"
),
help="Path to the scenario runner results",
)
parser.add_argument(
"--out_dir",
type=str,
default=os.path.join(
os.path.dirname(os.path.abspath(sys.argv[0])), "..", "output"
),
help="Path to the output directory",
)
parser.add_argument(
"--yerr", action="store_true", help="Show the standard deviation"
)
parser.add_argument("--show", action="store_true", help="Show the plot")
args = parser.parse_args()
scenario_result_files = get_scenario_result_file_paths(args.eval_dir)
scenario_results = get_scenario_results(scenario_result_files)
plot_results(scenario_results, args.yerr)
plt.savefig(os.path.join(args.out_dir, os.path.basename(args.eval_dir)))
if args.show:
plt.show()
def plot_results(scenario_results, show_yerr):
"""
Plot the results
:param scenario_results:
:param show_yerr:
:return:
"""
plt.figure("Scenario Runner Results")
sorted_results = sorted(scenario_results.values())
durations = [result.mean_duration for result in sorted_results]
failure_rates = [result.collision_rate for result in sorted_results]
failures = [a * b for a, b in zip(durations, failure_rates)]
indices = range(1, len(scenario_results) + 1)
average = sum(durations) / len(durations)
if show_yerr:
duration_errors = [result.std_dev_duration for result in sorted_results]
plt.bar(
indices,
durations,
yerr=duration_errors,
align="center",
label="Mean Duration (∅%.0fs)" % average,
)
else:
plt.bar(
indices, durations, align="center", label="Mean Duration (∅%.0fs)" % average
)
average = sum(failure_rates) / len(failure_rates) * 100
rects = plt.bar(
indices,
failures,
align="center",
label="Collision Rate (∅{0:.0f}%)".format(average),
)
auto_label(rects, failure_rates)
plt.xlabel("Scenario")
plt.ylabel("Mean Scenario Duration [s]")
plt.legend()
def auto_label(rects, values):
"""
Add labels to the bar plot
:param rects:
:param values:
:return:
"""
for index, rect in enumerate(rects):
plt.annotate(
"{0:.2f}%".format(100 * float(values[index])),
xy=(rect.get_x() + rect.get_width() / 2, rect.get_height()),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha="center",
va="bottom",
)
def get_scenario_results(scenario_result_files):
"""
Parse scenario results from files
:param scenario_result_files:
:return:
"""
scenario_results = {}
for scenario_result_file in scenario_result_files:
fix_cdata(scenario_result_file)
root = ET.parse(scenario_result_file).getroot()
test_suite = root[0]
test_name = test_suite.attrib["name"]
if test_name not in scenario_results:
scenario_results[test_name] = ScenarioResult(test_name)
scenario_results[test_name].add_result(
has_collision(test_suite), get_duration(test_suite)
)
return scenario_results
def fix_cdata(scenario_result_file):
"""
Currently the scenario runner produces invalid XML CDATA,
which is fixed by this function to have a valid XML for the parser
:param scenario_result_file:
:return:
"""
with open(scenario_result_file, "r") as file:
file_data = file.read()
file_data = file_data.replace(r"\[CDATA\[", r"[CDATA[")
file_data = file_data.replace(r"\]\]", "]]")
with open(scenario_result_file, "w") as file:
file.write(file_data)
def has_collision(test_suite):
"""
Check if a run has a colition
:param test_suite:
:return:
"""
return test_suite[0].find("failure") is not None
def get_duration(test_suite):
"""
Get the duration of a single run
:param test_suite:
:return:
"""
duration_case = test_suite[1]
if len(list(duration_case)) > 0:
text = duration_case[0].text
else:
text = duration_case.text
durations = re.findall(r"[-+]?\d*\.\d+|\d+", text)
return float(durations[0])
def get_scenario_result_file_paths(eval_dir):
"""
Get all scenario result file paths located in the given directory
:param eval_dir:
:return:
"""
scenario_results = []
for root, dirs, files in os.walk(eval_dir):
for scenario_file in files:
if scenario_file.endswith(".xml"):
scenario_results.append(
os.path.abspath(os.path.join(root, scenario_file))
)
return scenario_results
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| 26.502075 | 88 | 0.627994 |
23e8320d0330d1ec8f7c296ccce6c6ea052ae6e2 | 12,809 | py | Python | ief_core/tests/old_tests/test_rnn.py | zeshanmh/ief | 1b7dbd340ecb8ccf40d22de989e3bc3d92135a45 | [
"MIT"
] | 5 | 2021-04-11T04:49:24.000Z | 2022-03-28T18:43:45.000Z | ief_core/tests/old_tests/test_rnn.py | clinicalml/ief | 97bcaad85ec820fbe062a86c6c500a308904f029 | [
"MIT"
] | 1 | 2021-12-13T06:33:16.000Z | 2021-12-16T02:04:14.000Z | ief_core/tests/old_tests/test_rnn.py | clinicalml/ief | 97bcaad85ec820fbe062a86c6c500a308904f029 | [
"MIT"
] | 1 | 2022-02-01T03:10:16.000Z | 2022-02-01T03:10:16.000Z | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
import sys
import optuna
import os
from lifelines.utils import concordance_index
from sklearn.metrics import r2_score
from torch.utils.data import DataLoader, TensorDataset
from torchcontrib.optim import SWA
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from argparse import ArgumentParser
from distutils.util import strtobool
sys.path.append('../')
sys.path.append('../../data/ml_mmrf')
sys.path.append('../../data/')
from ml_mmrf_v1.data import load_mmrf
from synthetic.synthetic_data import load_synthetic_data_trt, load_synthetic_data_noisy
from models.rnn import GRU
from main_trainer import *
from semi_synthetic.ss_data import *
def test_gru_load():
checkpoint_path = '../tbp_logs/rnn_pkpd_semi_synthetic_subsample_best/version_0/checkpoints/epoch=969.ckpt'
checkpoint = torch.load(checkpoint_path, map_location=lambda storage, loc: storage)
hparams = checkpoint['hyper_parameters']
gru = GRU(**hparams); gru.setup(1)
gru.load_state_dict(checkpoint['state_dict'])
assert 'dim_data' in gru.hparams
assert 'dim_treat' in gru.hparams
assert 'dim_base' in gru.hparams
assert gru.hparams['mtype'] == 'pkpd_gru'
valid_loader = gru.val_dataloader()
(nelbo, nll, kl, _), _ = gru.forward(*valid_loader.dataset.tensors, anneal = 1.)
print(nelbo)
def test_gru():
seed_everything(0)
parser = ArgumentParser()
parser.add_argument('--model_name', type=str, default='gru', help='fomm, ssm, or gru')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=bool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--nsamples_syn', default=100, type=int, help='number of training samples for synthetic data')
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='mm', type=str)
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--loss_type', type=str, default='semisup')
parser.add_argument('--bs', default=600, type=int, help='batch size')
parser.add_argument('--fold', default=1, type=int)
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add rest of args from FOMM and base trainer
parser = GRU.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
# parse args and convert to dict
args = parser.parse_args()
args.max_epochs = 100
dict_args = vars(args)
# initialize FOMM w/ args and train
model = GRU(**dict_args)
trainer = Trainer.from_argparse_args(args, deterministic=True, logger=False, checkpoint_callback=False)
trainer.fit(model)
# evaluate on validation set; this should match what we were getting with the old codebase (after 100 epochs)
valid_loader = model.val_dataloader()
(nelbo, nll, kl, _), _ = model.forward(*valid_loader.dataset.tensors, anneal = 1.)
assert (nelbo.item() - 146.43) < 1e-1
def run_gru_ss():
model_configs = [
# (0, 1000, 'gru', 514, 0.198, False, 'l2', .0023),
# (0, 1500, 'gru', 578, 0.0445, False, 'l2', .000655),
# (0, 2000, 'gru', 677, 0.00585, True, 'l1', .000599),
# (0, 10000, 'gru', 676, 0.002312, True, 'l1', 0.001280),
# (0, 1000, 'pkpd_gru', 290, 0.09916, False, 'l2', .002916),
# (0, 1500, 'pkpd_gru', 502, 0.02635, False, 'l2', .001307),
# (0, 2000, 'pkpd_gru', 298, 0.031917, False, 'l2', .006825)
(0, 1000, 'gru', 250, 0.01, False, 'l1', 1e-3),
(0, 1500, 'gru', 500, 0.01, False, 'l1', 1e-3),
(0, 2000, 'gru', 250, 0.01, False, 'l1', 1e-3),
(0, 10000, 'gru', 500, 0.01, False, 'l1', 1e-3),
(0, 1000, 'pkpd_gru', 500, 0.01, True, 'l2', 1e-3),
(0, 1500, 'pkpd_gru', 500, 0.01, False, 'l2', 1e-3),
(0, 2000, 'pkpd_gru', 500, 0.01, False, 'l1', 1e-3)
]
fname = './gru_ss_results_take2.txt'
parser = ArgumentParser()
parser.add_argument('--model_name', type=str, default='gru', help='fomm, ssm, or gru')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=bool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--nsamples_syn', default=1000, type=int, help='number of training samples for synthetic data')
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='semi_synthetic', type=str)
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--loss_type', type=str, default='unsup')
parser.add_argument('--bs', default=2000, type=int, help='batch size')
parser.add_argument('--fold', default=1, type=int)
parser.add_argument('--seed', default=1, type=int)
parser.add_argument('--ss_missing', type=strtobool, default=True, help='whether to add missing data in semi synthetic setup or not')
parser.add_argument('--ss_in_sample_dist', type=strtobool, default=True, help='whether to use mm training patients to generate validation/test set in semi synthetic data')
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add rest of args from FOMM and base trainer
parser = GRU.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
# parse args and convert to dict
args = parser.parse_args()
fi = open(fname, 'w')
for k,model_config in enumerate(model_configs):
seed, nsamples_syn, mtype, dim_hidden, C, reg_all, reg_type, lr = model_config
seed_everything(seed)
args.lr = lr
args.max_epochs = 1000
args.nsamples_syn = nsamples_syn
args.mtype = mtype
args.dim_hidden = dim_hidden
args.alpha1_type = 'linear'
args.add_stochastic = False
args.C = C; args.reg_all = reg_all; args.reg_type = reg_type
dict_args = vars(args)
trial = optuna.trial.FixedTrial({'lr': args.lr, 'C': args.C, 'reg_all': args.reg_all, 'reg_type': args.reg_type, 'dim_hidden': args.dim_hidden})
# initialize FOMM w/ args and train
model = GRU(trial, **dict_args)
in_sample_dist = model.hparams.ss_in_sample_dist; add_missing = model.hparams.ss_missing
print(f'[RUNNING] model config {k+1}: N = {args.nsamples_syn}, mtype = {args.mtype}, C = {args.C}, reg_all = {args.reg_all}, reg_type = {args.reg_type}, in_sample_dist = {in_sample_dist}, add_missing = {add_missing}')
fi.write(f'[RUNNING] model config {k+1}: N = {args.nsamples_syn}, mtype = {args.mtype}, C = {args.C}, reg_all = {args.reg_all}, reg_type = {args.reg_type}, in_sample_dist = {in_sample_dist}, add_missing = {add_missing}\n')
trainer = Trainer.from_argparse_args(args, deterministic=True, logger=False, checkpoint_callback=False, gpus=[2], check_val_every_n_epoch=10)
trainer.fit(model)
# evaluate on validation set; this should match what we were getting with the old codebase (after 100 epochs)
if torch.cuda.is_available():
device = torch.device('cuda:2')
else:
device = torch.device('cpu')
ddata = load_ss_data(model.hparams['nsamples_syn'], gen_fly=True, eval_mult=200, in_sample_dist=in_sample_dist, add_missing=add_missing)
print(f'eval set size: {ddata["valid"][0]["X"].shape}')
nelbos = []
for i in range(1,5):
_, valid_loader = load_ss_helper(ddata, tvt='valid', bs=model.hparams['bs'], device=device, valid_fold=i)
batch_nelbos = []
for i_batch, valid_batch_loader in enumerate(valid_loader):
(nelbo, nll, kl, _), _ = model.forward(*valid_batch_loader, anneal = 1.)
nelbo, nll, kl = nelbo.item(), nll.item(), kl.item()
batch_nelbos.append(nelbo)
# (nelbo, nll, kl, _), _ = model.forward(*valid_loader.dataset.tensors, anneal = 1.)
nelbos.append(np.mean(batch_nelbos))
print(f'[COMPLETE] model config {k+1}: mean nelbo: {np.mean(nelbos)}, std nelbo: {np.std(nelbos)}')
fi.write(f'[COMPLETE] model config {k+1}: mean nelbo: {np.mean(nelbos)}, std nelbo: {np.std(nelbos)}\n\n')
print()
def test_gru_pkpd():
seed_everything(0)
configs = [
(1000, 'pkpd_gru_att', 500, 0.01, True, 'l2')
# (1000, 'gru', 250, 0.01, True, 'l2')
]
parser = ArgumentParser()
parser.add_argument('--model_name', type=str, default='gru', help='fomm, ssm, or gru')
parser.add_argument('--lr', type=float, default=1e-3, help='learning rate')
parser.add_argument('--anneal', type=float, default=1., help='annealing rate')
parser.add_argument('--fname', type=str, help='name of save file')
parser.add_argument('--imp_sampling', type=bool, default=False, help='importance sampling to estimate marginal likelihood')
parser.add_argument('--nsamples', default=1, type=int)
parser.add_argument('--nsamples_syn', default=100, type=int, help='number of training samples for synthetic data')
parser.add_argument('--optimizer_name', type=str, default='adam')
parser.add_argument('--dataset', default='mm', type=str)
parser.add_argument('--eval_type', type=str, default='nelbo')
parser.add_argument('--loss_type', type=str, default='unsup')
parser.add_argument('--bs', default=600, type=int, help='batch size')
parser.add_argument('--fold', default=1, type=int)
parser.add_argument('--optuna', type=strtobool, default=True, help='whether to use optuna to optimize hyperparams')
parser.add_argument('--ss_missing', type=strtobool, default=False, help='whether to add missing data in semi synthetic setup or not')
parser.add_argument('--ss_in_sample_dist', type=strtobool, default=False, help='whether to use mm training patients to generate validation/test set in semi synthetic data')
parser.add_argument('--att_mask', type=strtobool, default=False, help='set to True for SSMAtt and FOMMAtt')
# THIS LINE IS KEY TO PULL THE MODEL NAME
temp_args, _ = parser.parse_known_args()
# add rest of args from GRU and base trainer
parser = GRU.add_model_specific_args(parser)
parser = Trainer.add_argparse_args(parser)
for k,config in enumerate(configs):
print(f'running config: {config}')
max_epochs, mtype, dh, C, reg_all, reg_type = config
# parse args and convert to dict
args = parser.parse_args()
args.max_epochs = max_epochs
args.mtype = mtype
args.dim_hidden = dh
args.reg_type = reg_type
args.C = C
args.reg_all = reg_all
args.alpha1_type = 'linear'
args.add_stochastic = False
dict_args = vars(args)
# initialize FOMM w/ args and train
trial = optuna.trial.FixedTrial({'lr': args.lr, 'C': args.C, 'reg_all': args.reg_all, 'reg_type': args.reg_type, 'dim_hidden': args.dim_hidden})
model = GRU(trial, **dict_args)
# early_stop_callback = EarlyStopping(
# monitor='val_loss',
# min_delta=0.00,
# patience=10,
# verbose=False,
# mode='min'
# )
checkpoint_callback = ModelCheckpoint(filepath='./checkpoints/gru_att1{epoch:05d}-{val_loss:.2f}')
trainer = Trainer.from_argparse_args(args, deterministic=True, logger=False, gpus=[2], \
early_stop_callback=False, checkpoint_callback=checkpoint_callback)
trainer.fit(model)
# evaluate on validation set; this should match what we were getting with the old codebase (after 100 epochs)
valid_loader = model.val_dataloader()
nelbos = []
for i in range(50):
(nelbo, nll, kl, _), _ = model.forward(*valid_loader.dataset.tensors, anneal = 1.)
nelbos.append(nelbo.item())
print(f'final nll for {config} (config {k+1}): mean: {np.mean(nelbos)}, std: {np.std(nelbos)}')
# assert (nelbo.item() - 183.759) < 1e-1
if __name__ == '__main__':
# run_gru_ss()
test_gru_pkpd() | 51.649194 | 230 | 0.666406 |
723203e05697d79668f0deb0825cc7523aa5ba48 | 7,223 | py | Python | tests/test_TDict.py | tadashi-aikawa/dict-mixin | 6418694aa2f7a8dc2c1990818a917e9b22ec1d16 | [
"MIT"
] | 3 | 2018-03-09T05:01:34.000Z | 2020-10-29T01:29:28.000Z | tests/test_TDict.py | tadashi-aikawa/dict-mixin | 6418694aa2f7a8dc2c1990818a917e9b22ec1d16 | [
"MIT"
] | 13 | 2017-11-07T08:28:51.000Z | 2020-07-11T09:20:47.000Z | tests/test_TDict.py | tadashi-aikawa/dict-mixin | 6418694aa2f7a8dc2c1990818a917e9b22ec1d16 | [
"MIT"
] | 1 | 2017-03-11T19:19:33.000Z | 2017-03-11T19:19:33.000Z | # coding: utf-8
from owlmixin import OwlMixin, TOption
from owlmixin.owlcollections import TDict, TList
# For python 3.5.0-3.5.1
try:
from typing import Text
except ImportError:
pass
class Address(OwlMixin):
name: str
class Spot(OwlMixin):
names: TList[str]
address: TOption[Address]
class TestGet:
def test_normal(self):
d = {"a": {"names": ["spot1"], "address": {"name": "address1"}}, "b": {"names": ["spot21", "spot22"]}}
assert Spot.from_dicts_by_key(d).get("a").get().to_dict() == {
"names": ["spot1"],
"address": {
"name": "address1"
}
}
def test_not_found(self):
d = {"a": {"names": ["spot1"], "address": {"name": "address1"}}, "b": {"names": ["spot21", "spot22"]}}
assert Spot.from_dicts_by_key(d).get("c").is_none()
class TestMap:
def test_normal(self):
d = {"a": {"names": ["spot1"], "address": {"name": "address1"}}, "b": {"names": ["spot21", "spot22"]}}
# Sort for test
assert sorted(Spot.from_dicts_by_key(d).map(lambda k, v: v.names), key=len) == [["spot1"], ["spot21", "spot22"]]
class TestMapValues:
def test_normal(self):
d = {"a": {"names": ["spot1"], "address": {"name": "address1"}}, "b": {"names": ["spot21", "spot22"]}}
# Sort for test
assert Spot.from_dicts_by_key(d).map_values(lambda v: len(v.names)).to_dict() == {"a": 1, "b": 2}
class TestMapValues2:
def test_normal(self):
d = {"a": {"names": ["spot1"], "address": {"name": "address1"}}, "b": {"names": ["spot21", "spot22"]}}
# Sort for test
assert Spot.from_dicts_by_key(d).map_values2(lambda k, v: f"len({k}.name) -> {len(v.names)}").to_dict() == {
"a": "len(a.name) -> 1",
"b": "len(b.name) -> 2"
}
class TestFilter:
def test_normal(self):
d = {"a": {"names": ["spot1"], "address": {"name": "address1"}}, "b": {"names": ["spot21", "spot22"]}}
assert Spot.from_dicts_by_key(d).filter(lambda k, v: v.address.get()).to_dicts() == [{
"names": ["spot1"],
"address": {
"name": "address1"
}
}]
class TestReject:
def test_normal(self):
d = {"a": {"names": ["spot1"], "address": {"name": "address1"}}, "b": {"names": ["spot21", "spot22"]}}
assert Spot.from_dicts_by_key(d).reject(lambda k, v: v.address.get()).to_dicts() == [{
"names": ["spot21", "spot22"]
}]
class TestSum:
def test_normal(self):
assert TDict({"a": 1, "b": 2, "c": 3}).sum() == 6
class TestSumBy:
def test_normal(self):
d = {"aaa": {"names": ["spot1"], "address": {"name": "address1"}}, "bb": {"names": ["spot21", "spot22"]}}
assert Spot.from_dicts_by_key(d).sum_by(lambda k, v: len(k) * len(v.names)) == 7
class TestSize:
def test_normal(self):
d = {"a": {"names": ["spot1"], "address": {"name": "address1"}}, "b": {"names": ["spot21", "spot22"]}}
assert Spot.from_dicts_by_key(d).size() == 2
class TestFind:
def test_normal(self):
d = {
"a": {
"names": ["spot1"],
"address": {
"name": "address1"
}
},
"b": {
"names": ["spot21", "spot22"]
},
"c": {
"names": ["spot31", "spot32", "spot33"]
}
}
assert Spot.from_dicts_by_key(d).find(lambda k, v: len(v.names) == 2).get().to_dict(ignore_none=True) == {
"names": ["spot21", "spot22"]
}
def test_not_found(self):
d = {
"a": {
"names": ["spot1"],
"address": {
"name": "address1"
}
},
"b": {
"names": ["spot21", "spot22"]
},
"c": {
"names": ["spot31", "spot32"]
}
}
assert Spot.from_dicts_by_key(d).find(lambda k, v: v.names == 3).is_none()
class TestToList:
def test_normal(self):
d = {"a": {"names": ["spot1"]}, "b": {"names": ["spot21", "spot22"]}, "c": {"names": ["spot31", "spot32"]}}
# Sort for test
assert sorted(Spot.from_dicts_by_key(d).to_list().to_dicts(ignore_none=True), key=lambda x: x["names"][0]) == [{
"names": ["spot1"]
}, {
"names": ["spot21", "spot22"]
}, {
"names": ["spot31", "spot32"]
}]
class TestAll:
def test_true(self):
d = {"a": {"names": ["spot1"]}, "bb": {"names": ["spot21", "spot22"]}, "cc": {"names": ["spot31", "spot32"]}}
assert Spot.from_dicts_by_key(d).all(lambda k, v: len(k) == len(v.names)) is True
def test_false(self):
d = {"a": {"names": ["spot1"]}, "b": {"names": ["spot21", "spot22"]}, "c": {"names": ["spot31", "spot32"]}}
assert Spot.from_dicts_by_key(d).all(lambda k, v: len(k) == len(v.names)) is False
class TestAny:
def test_true(self):
d = {"a": {"names": ["spot1"]}, "b": {"names": ["spot21", "spot22"]}, "c": {"names": ["spot31", "spot32"]}}
assert Spot.from_dicts_by_key(d).any(lambda k, v: len(k) == len(v.names)) is True
def test_false(self):
d = {
"aaa": {
"names": ["spot1"]
},
"bbb": {
"names": ["spot21", "spot22"]
},
"ccc": {
"names": ["spot31", "spot32"]
}
}
assert Spot.from_dicts_by_key(d).any(lambda k, v: len(k) == len(v.names)) is False
class TestAssign:
def test_normal(self):
d = {"a": {"names": ["spot1"]}, "b": {"names": ["spot21", "spot22"]}, "c": {"names": ["spot31", "spot32"]}}
d2 = {"c": {"names": ["spot3"]}, "d": {"names": ["spot4"]}}
spots_by_key: TDict[Spot] = Spot.from_dicts_by_key(d)
actual: TDict[Spot] = spots_by_key.assign(d2)
assert {
"a": {
"names": ["spot1"]
},
"b": {
"names": ["spot21", "spot22"]
},
"c": {
"names": ["spot3"]
},
"d": {
"names": ["spot4"]
}
} == actual.to_dict()
actual['a'] = None
assert actual['a'] is None
assert d['a'] is not None
assert spots_by_key['a'] is not None
class TestPickBy:
def test_normal(self):
d = {"a": {"names": ["spot1"]}, "b": {"names": ["spot21", "spot22"]}, "c": {"names": ["spot31", "spot32"]}}
actual: TDict[Spot] = Spot.from_dicts_by_key(d).pick_by(lambda k, v: len(v.names) > 1 and k in ["a", "b"])
assert {"b": {"names": ["spot21", "spot22"]}} == actual.to_dict()
class TestOmitBy:
def test_normal(self):
d = {"a": {"names": ["spot1"]}, "b": {"names": ["spot21", "spot22"]}, "c": {"names": ["spot31", "spot32"]}}
actual: TDict[Spot] = Spot.from_dicts_by_key(d).omit_by(lambda k, v: len(v.names) > 1 and k in ["a", "b"])
assert {"a": {"names": ["spot1"]}, "c": {"names": ["spot31", "spot32"]}} == actual.to_dict()
| 30.348739 | 120 | 0.47418 |
180b33ab8cca22ba43ba48d81418db32aceeff8d | 5,899 | py | Python | bot.py | theerfan/PublicQABot | d18b351e8ad502ddc60f9f2a870be9fe8601b71b | [
"MIT"
] | 4 | 2019-07-04T20:36:59.000Z | 2022-03-03T09:00:55.000Z | bot.py | theerfan/PublicQABot | d18b351e8ad502ddc60f9f2a870be9fe8601b71b | [
"MIT"
] | null | null | null | bot.py | theerfan/PublicQABot | d18b351e8ad502ddc60f9f2a870be9fe8601b71b | [
"MIT"
] | null | null | null | import telegram
from telegram.ext import Updater, Dispatcher, MessageHandler, MessageQueue, CommandHandler, Filters
TOKEN = "REDACTED"
bot = telegram.Bot(TOKEN)
updater = Updater(token=TOKEN)
dispatcher = updater.dispatcher
jq = updater.job_queue
list_of_active_users = dict()
FAIL_TEXT = "متاسفانه مشکلی پیش اومد، لطفا دوباره پیامتون رو ارسال کنید."
WELCOME_TEXT = "سلام. به بات پرسش و پاسخ خوش آمدید."
WAIT_TEXT = "پیام شما دریافت شد؛ لطفا شکیبا باشید تا مسئولین جواب بدند."
RESPONDED_TEXT = "جوابتون رو اینجا دادیم"
# must not start with an @
RESPONDER_ID = "REDACTED"
# must start with an @
CHANNEL_ID = "REDACTED"
DEV_ID = "REDACTED"
ASKED_TEXT = "پرسیده اند که"
ANSWERED_TEXT = "و جواب این است که"
HIDDEN_STATE = "مخفی"
SHOWING_STATE = "نمایان"
YOUR_STATE = "وضعیت فعلی شما: "
def start(bot, update):
message = update.message
chat_id = message.chat_id
user = message.from_user.username
add_to_users(user, chat_id)
bot.send_message(chat_id=chat_id, text=WELCOME_TEXT)
def receive(bot, update):
user = username = update.message.from_user
username = user.username
message = update.message
add_to_users(username, message.chat_id)
if username == RESPONDER_ID:
receive_from_ta(bot, update)
else:
receive_from_users(bot, update, user, message)
def add_to_users(user, chat_id):
if user not in list_of_active_users.keys():
list_of_active_users.update({user: {'id': chat_id, 'visible': True}})
def format_tas_outgoing_string(text, answer_text, caption=None):
half = txt = ""
checkText = text
if not text:
text = caption
splitMessage = text.split('\n')
asker = {"name": splitMessage[0], "handle": splitMessage[1][1:]}
for i in range(2, len(splitMessage)):
half += str(splitMessage[i])
if checkText and caption:
half += "\n" + caption
if list_of_active_users[asker["handle"]]["visible"]:
txt = asker["name"]
txt += " " + ASKED_TEXT + ":\n" + half + "\n\n" + ANSWERED_TEXT + ":\n" + answer_text
return txt, asker
def receive_from_ta(bot, update):
sent_in_channel = None
answer = update.message
message = answer.reply_to_message
caption = message.caption
answer_text = answer.text
txt, asker = format_tas_outgoing_string(message.text, answer_text, caption)
sent_in_channel = returnSentMedia(bot, CHANNEL_ID, message, txt)
sent_link = sent_in_channel.link
bot.send_message(chat_id=list_of_active_users[asker["handle"]]["id"],
text='<a href="' + sent_link + '">' + RESPONDED_TEXT + '</a>',
parse_mode=telegram.ParseMode.HTML)
def not_sent_error(bot, message, ex):
bot.send_message(chat_id=message.chat_id, text=FAIL_TEXT)
bot.send_message(chat_id=list_of_active_users[DEV_ID]["id"], text=str(ex) +"\n" + message.chat_id)
def receive_from_users(bot, update, user, message):
try:
if is_a_registered_member(user):
txt = user.full_name + "\n" + "@" + user.username + "\n" + message.text
bot.send_message(chat_id=list_of_active_users[RESPONDER_ID]["id"], text=txt)
bot.send_message(chat_id=message.chat_id, text=WAIT_TEXT)
except Exception as ex:
not_sent_error(bot, message, ex)
def returnSentMedia(bot, ta_id, message, txt):
sent_in_channel = None
if message.video:
sent_in_channel = bot.send_video(chat_id=ta_id , video=message.video.file_id, caption=txt)
if message.photo:
photos = message.photo
lastPhoto = len(photos) - 1
sent_in_channel = bot.send_photo(chat_id=ta_id, photo=message.photo[lastPhoto].file_id, caption=txt)
if message.audio:
sent_in_channel = bot.send_audio(chat_id=ta_id, photo=message.audio.file_id, caption=txt)
if message.document:
sent_in_channel = bot.send_document(chat_id=ta_id, photo=message.document.file_id, caption=txt)
if not sent_in_channel:
sent_in_channel = bot.send_message(chat_id=ta_id, text=txt)
return sent_in_channel
def forward_media(bot, update):
ta_id = chat_id = list_of_active_users[RESPONDER_ID]["id"]
message = update.message
user = message.from_user
username = user.username
chat_id = message.chat_id
add_to_users(username, chat_id)
txt = ""
if username != RESPONDER_ID:
try:
if message.caption:
txt += message.caption
if message.text and txt != "":
txt += "\n" + message.text
txt = user.full_name + "\n" + "@" + username + "\n" + txt
returnSentMedia(bot, ta_id, message, txt)
bot.send_message(chat_id=message.chat_id, text=WAIT_TEXT)
except Exception as ex:
not_sent_error(bot, message, ex)
def is_a_registered_member(user):
'''
Will be changed if in a future event we only want the opinion of the participants,
As of April 3rd, 2019 it's being used for an "Asrane" event, so there's no use to it.
'''
return True
def toggle_name_visibility(bot, update):
message = update.message
user = message.from_user
username = user.username
chat_id = message.chat_id
if username in list_of_active_users.keys():
user_in_list = list_of_active_users[username]
user_in_list["visible"] = not user_in_list["visible"]
if user_in_list["visible"]:
bot.send_message(chat_id=chat_id, text=YOUR_STATE + SHOWING_STATE)
else:
bot.send_message(chat_id=chat_id, text=YOUR_STATE + HIDDEN_STATE)
dispatcher.add_handler(CommandHandler('start', start))
dispatcher.add_handler(CommandHandler('toggle', toggle_name_visibility))
dispatcher.add_handler(MessageHandler(Filters.text, receive))
dispatcher.add_handler(MessageHandler(Filters.audio | Filters.video | Filters.photo | Filters.document, forward_media))
updater.start_polling() | 37.100629 | 119 | 0.68537 |
a498960ba6c4d99d2c29abba40d960990e60d4a6 | 842 | py | Python | molecool/io/xyz.py | rtb1c13/molecool | b296f7c3afea4bad32e4b20000a0ec1e82c7c3ce | [
"BSD-3-Clause"
] | null | null | null | molecool/io/xyz.py | rtb1c13/molecool | b296f7c3afea4bad32e4b20000a0ec1e82c7c3ce | [
"BSD-3-Clause"
] | 1 | 2020-05-08T15:52:06.000Z | 2020-05-08T15:52:55.000Z | molecool/io/xyz.py | rtb1c13/molecool | b296f7c3afea4bad32e4b20000a0ec1e82c7c3ce | [
"BSD-3-Clause"
] | null | null | null | """
Functions to manipulate xyz files
"""
import numpy as np
def open_xyz(file_location):
# Open an xyz file and return symbols and coordinates.
xyz_file = np.genfromtxt(fname=file_location, skip_header=2, dtype='unicode')
symbols = xyz_file[:,0]
coords = (xyz_file[:,1:])
coords = coords.astype(np.float)
return symbols, coords
def write_xyz(file_location, symbols, coordinates):
# Write an xyz file given a file location, symbols, and coordinates.
num_atoms = len(symbols)
with open(file_location, 'w+') as f:
f.write('{}\n'.format(num_atoms))
f.write('XYZ file\n')
for i in range(num_atoms):
f.write('{}\t{}\t{}\t{}\n'.format(symbols[i],
coordinates[i,0], coordinates[i,1], coordinates[i,2]))
| 30.071429 | 100 | 0.602138 |
0640362b1b5f8b321827749bf43fc6efba3566f0 | 9,237 | py | Python | gewittergefahr/scripts/run_echo_top_tracking.py | dopplerchase/GewitterGefahr | 4415b08dd64f37eba5b1b9e8cc5aa9af24f96593 | [
"MIT"
] | 26 | 2018-10-04T01:07:35.000Z | 2022-01-29T08:49:32.000Z | gewittergefahr/scripts/run_echo_top_tracking.py | liuximarcus/GewitterGefahr | d819874d616f98a25187bfd3091073a2e6d5279e | [
"MIT"
] | 4 | 2017-12-25T02:01:08.000Z | 2018-12-19T01:54:21.000Z | gewittergefahr/scripts/run_echo_top_tracking.py | liuximarcus/GewitterGefahr | d819874d616f98a25187bfd3091073a2e6d5279e | [
"MIT"
] | 11 | 2017-12-10T23:05:29.000Z | 2022-01-29T08:49:33.000Z | """Tracks storms based on echo top."""
import argparse
from gewittergefahr.gg_io import myrorss_io
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import echo_top_tracking
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
NATIVE_ECHO_TOP_FIELD_NAMES = [
radar_utils.ECHO_TOP_18DBZ_NAME, radar_utils.ECHO_TOP_50DBZ_NAME
]
RADAR_DIR_ARG_NAME = 'input_radar_dir_name'
TARRED_RADAR_DIR_ARG_NAME = 'input_radar_dir_name_tarred'
ECHO_CLASSIFN_DIR_ARG_NAME = 'input_echo_classifn_dir_name'
FIRST_SPC_DATE_ARG_NAME = 'first_spc_date_string'
LAST_SPC_DATE_ARG_NAME = 'last_spc_date_string'
ECHO_TOP_FIELD_ARG_NAME = 'echo_top_field_name'
MIN_ECHO_TOP_ARG_NAME = 'min_echo_top_km'
MIN_SIZE_ARG_NAME = 'min_size_pixels'
MIN_INTERMAX_DISTANCE_ARG_NAME = 'min_intermax_distance_metres'
MAX_VELOCITY_DIFF_ARG_NAME = 'max_velocity_diff_m_s01'
MAX_LINK_DISTANCE_ARG_NAME = 'max_link_distance_m_s01'
OUTPUT_DIR_ARG_NAME = 'output_tracking_dir_name'
RADAR_DIR_HELP_STRING = (
'Name of top-level radar directory. Files therein will be found by '
'`echo_top_tracking._find_input_radar_files`.')
TARRED_RADAR_DIR_HELP_STRING = (
'[used only if {0:s} = "{1:s}" or "{2:s}"] Name of top-level directory with'
' tarred MYRORSS files. These files will be untarred before tracking (into'
' `{3:s}`) and the untarred files will be deleted after tracking.'
).format(
ECHO_TOP_FIELD_ARG_NAME, NATIVE_ECHO_TOP_FIELD_NAMES[0],
NATIVE_ECHO_TOP_FIELD_NAMES[1], RADAR_DIR_ARG_NAME
)
ECHO_CLASSIFN_DIR_HELP_STRING = (
'Name of top-level directory with echo-classification files. Files therein'
' will be found by `echo_classification.find_classification_file` and read '
'by `echo_classification.read_classifications`. Tracking will be performed'
' only on convective pixels. If you do not want to use a convective mask, '
'leave this argument alone.')
SPC_DATE_HELP_STRING = (
'SPC date (format "yyyymmdd"). Tracking will be performed for all SPC '
'dates in the period `{0:s}`...`{1:s}`.'
).format(FIRST_SPC_DATE_ARG_NAME, LAST_SPC_DATE_ARG_NAME)
ECHO_TOP_FIELD_HELP_STRING = (
'Name of echo-top field to use for tracking. Must be accepted by '
'`echo_top_tracking._check_radar_field`.')
MIN_ECHO_TOP_HELP_STRING = (
'Minimum echo top. Smaller values are not considered storms.')
MIN_SIZE_HELP_STRING = 'Minimum storm-object size.'
MIN_INTERMAX_DISTANCE_HELP_STRING = (
'Minimum distance between any pair of local maxima at the same time. See '
'`echo_top_tracking._remove_redundant_local_maxima` for details.')
MAX_VELOCITY_DIFF_HELP_STRING = (
'Used to connect local maxima (storm objects) between times. See '
'`echo_top_tracking._link_local_maxima_in_time` for details.')
MAX_LINK_DISTANCE_HELP_STRING = (
'Used to connect local maxima (storm objects) between times. See '
'`echo_top_tracking._link_local_maxima_in_time` for details.')
OUTPUT_DIR_HELP_STRING = (
'Name of top-level output directory. Output files will be written by '
'`storm_tracking_io.write_processed_file`, to locations therein determined '
'by `storm_tracking_io.find_processed_file`.')
TARRED_RADAR_DIR_NAME_DEFAULT = '/condo/swatcommon/common/myrorss'
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + RADAR_DIR_ARG_NAME, type=str, required=True,
help=RADAR_DIR_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + TARRED_RADAR_DIR_ARG_NAME, type=str, required=False,
default=TARRED_RADAR_DIR_NAME_DEFAULT, help=TARRED_RADAR_DIR_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + ECHO_CLASSIFN_DIR_ARG_NAME, type=str, required=False, default='',
help=ECHO_CLASSIFN_DIR_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + FIRST_SPC_DATE_ARG_NAME, type=str, required=True,
help=SPC_DATE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + LAST_SPC_DATE_ARG_NAME, type=str, required=True,
help=SPC_DATE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + ECHO_TOP_FIELD_ARG_NAME, type=str, required=False,
default=radar_utils.ECHO_TOP_40DBZ_NAME, help=ECHO_TOP_FIELD_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + MIN_ECHO_TOP_ARG_NAME, type=float, required=False, default=4.,
help=MIN_ECHO_TOP_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + MIN_SIZE_ARG_NAME, type=int, required=False,
default=echo_top_tracking.DEFAULT_MIN_SIZE_PIXELS,
help=MIN_SIZE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + MIN_INTERMAX_DISTANCE_ARG_NAME, type=float, required=False,
default=echo_top_tracking.DEFAULT_MIN_INTERMAX_DISTANCE_METRES,
help=MIN_INTERMAX_DISTANCE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + MAX_VELOCITY_DIFF_ARG_NAME, type=float, required=False,
default=echo_top_tracking.DEFAULT_MAX_VELOCITY_DIFF_M_S01,
help=MAX_VELOCITY_DIFF_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + MAX_LINK_DISTANCE_ARG_NAME, type=float, required=False,
default=echo_top_tracking.DEFAULT_MAX_LINK_DISTANCE_M_S01,
help=MAX_LINK_DISTANCE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_DIR_ARG_NAME, type=str, required=True,
help=OUTPUT_DIR_HELP_STRING)
def _run(top_radar_dir_name, top_radar_dir_name_tarred,
top_echo_classifn_dir_name, first_spc_date_string,
last_spc_date_string, echo_top_field_name, min_echo_top_km,
min_size_pixels, min_intermax_distance_metres, max_velocity_diff_m_s01,
max_link_distance_m_s01, top_output_dir_name):
"""Tracks storms based on echo top.
This is effectively the main method.
:param top_radar_dir_name: See documentation at top of file.
:param top_radar_dir_name_tarred: Same.
:param top_echo_classifn_dir_name: Same.
:param first_spc_date_string: Same.
:param last_spc_date_string: Same.
:param echo_top_field_name: Same.
:param min_echo_top_km: Same.
:param min_size_pixels: Same.
:param min_intermax_distance_metres: Same.
:param max_velocity_diff_m_s01: Same.
:param max_link_distance_m_s01: Same.
:param top_output_dir_name: Same.
"""
if echo_top_field_name in NATIVE_ECHO_TOP_FIELD_NAMES:
spc_date_strings = time_conversion.get_spc_dates_in_range(
first_spc_date_string=first_spc_date_string,
last_spc_date_string=last_spc_date_string)
for this_spc_date_string in spc_date_strings:
this_tar_file_name = '{0:s}/{1:s}/{2:s}.tar'.format(
top_radar_dir_name_tarred, this_spc_date_string[:4],
this_spc_date_string)
myrorss_io.unzip_1day_tar_file(
tar_file_name=this_tar_file_name,
field_names=[echo_top_field_name],
spc_date_string=this_spc_date_string,
top_target_directory_name=top_radar_dir_name)
print(SEPARATOR_STRING)
if top_echo_classifn_dir_name in ['', 'None']:
top_echo_classifn_dir_name = None
echo_top_tracking.run_tracking(
top_radar_dir_name=top_radar_dir_name,
top_output_dir_name=top_output_dir_name,
first_spc_date_string=first_spc_date_string,
last_spc_date_string=last_spc_date_string,
echo_top_field_name=echo_top_field_name,
top_echo_classifn_dir_name=top_echo_classifn_dir_name,
min_echo_top_km=min_echo_top_km,
min_intermax_distance_metres=min_intermax_distance_metres,
min_polygon_size_pixels=min_size_pixels,
max_velocity_diff_m_s01=max_velocity_diff_m_s01,
max_link_distance_m_s01=max_link_distance_m_s01,
min_track_duration_seconds=0)
print(SEPARATOR_STRING)
if echo_top_field_name in NATIVE_ECHO_TOP_FIELD_NAMES:
for this_spc_date_string in spc_date_strings:
myrorss_io.remove_unzipped_data_1day(
spc_date_string=this_spc_date_string,
top_directory_name=top_radar_dir_name,
field_names=[echo_top_field_name]
)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
top_radar_dir_name=getattr(INPUT_ARG_OBJECT, RADAR_DIR_ARG_NAME),
top_radar_dir_name_tarred=getattr(
INPUT_ARG_OBJECT, TARRED_RADAR_DIR_ARG_NAME),
top_echo_classifn_dir_name=getattr(
INPUT_ARG_OBJECT, ECHO_CLASSIFN_DIR_ARG_NAME),
first_spc_date_string=getattr(
INPUT_ARG_OBJECT, FIRST_SPC_DATE_ARG_NAME),
last_spc_date_string=getattr(INPUT_ARG_OBJECT, LAST_SPC_DATE_ARG_NAME),
echo_top_field_name=getattr(INPUT_ARG_OBJECT, ECHO_TOP_FIELD_ARG_NAME),
min_echo_top_km=getattr(INPUT_ARG_OBJECT, MIN_ECHO_TOP_ARG_NAME),
min_size_pixels=getattr(INPUT_ARG_OBJECT, MIN_SIZE_ARG_NAME),
min_intermax_distance_metres=getattr(
INPUT_ARG_OBJECT, MIN_INTERMAX_DISTANCE_ARG_NAME),
max_velocity_diff_m_s01=getattr(
INPUT_ARG_OBJECT, MAX_VELOCITY_DIFF_ARG_NAME),
max_link_distance_m_s01=getattr(
INPUT_ARG_OBJECT, MAX_LINK_DISTANCE_ARG_NAME),
top_output_dir_name=getattr(INPUT_ARG_OBJECT, OUTPUT_DIR_ARG_NAME)
)
| 40.69163 | 80 | 0.761936 |
78db2eb650151c9726e8651058d7cffe4555fdf0 | 4,792 | py | Python | data/external/repositories_2to3/42139/KDDCup13Track2-master/cluster_hc.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/42139/KDDCup13Track2-master/cluster_hc.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories_2to3/42139/KDDCup13Track2-master/cluster_hc.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | #!/usr/bin/env python
# Given weighted graph, perform hierarchical clustering
from common import *
import argparse
import csv
import numpy as np
import scipy as sp
import pickle as pickle
from pprint import pprint
import networkx as nx
from itertools import combinations, product
import heapq as hq
from cluster_common import *
# from collections import defaultdict
def getSimilarity_Average(G_sim, cl1, cl2):
edge_sum = sum([G_sim[v1][v2]['weight'] for v1, v2 in product(cl1, cl2) if G_sim.has_edge(v1, v2)])
return edge_sum / float(len(cl1) * len(cl2))
def getSimilarity_AvgPresent(G_sim, cl1, cl2):
edgeweights = [G_sim[v1][v2]['weight'] for v1, v2 in product(cl1, cl2) if G_sim.has_edge(v1, v2)]
return sum(edgeweights) / float(len(edgeweights))
def getSimilarity_Min(G_sim, cl1, cl2):
return min([G_sim[v1][v2]['weight'] for v1, v2 in product(cl1, cl2) if G_sim.has_edge(v1, v2)])
def hcluster(G_sim, threshold_sim, sim_func):
sim_funcs = {
'average': getSimilarity_Average,
'avgpresent': getSimilarity_AvgPresent,
'min': getSimilarity_Min
}
chosen_simfunc = sim_funcs[sim_func]
print_err("Finding connected components")
connected_components = nx.connected_component_subgraphs(G_sim)
all_clusters = []
print_err('Clustering', len(connected_components), 'components')
for component_i, cc in enumerate(connected_components):
print_err('Starting component', component_i+1, 'of', len(connected_components), '(V={:}, E={:})'.format(len(cc), cc.size()))
if len(cc) == 2:
cl = list(cc.nodes())
if cc.size(weight='weight') >= threshold_sim:
all_clusters.append(cl)
continue
elif len(cc) < 2:
continue
clusters = [[v] for v in cc]
removed = set()
adjclusters = [set() for i in range(len(cc))]
c_sim = nx.to_scipy_sparse_matrix(cc, weight='weight', format='coo')
pq = [(sim, r, c) for (sim, r, c) in zip(-c_sim.data, c_sim.row, c_sim.col) if r < c]
for _, r, c in pq:
adjclusters[r].add(c)
adjclusters[c].add(r)
hq.heapify(pq)
while pq:
similarity, c1, c2 = hq.heappop(pq)
similarity = -similarity
if c1 in removed or c2 in removed:
continue
if similarity < threshold_sim:
break
# print_err(clusters[c1])
# print_err(clusters[c2])
# print_err(c1, c2, similarity)
# for i, cl in enumerate(clusters):
# if i not in removed:
# print_err(i, cl)
# print_err("--")
clusters.append(clusters[c1] + clusters[c2])
removed.add(c1)
removed.add(c2)
toremove = set([c1, c2])
adjclusters.append((adjclusters[c1] | adjclusters[c2]) - toremove)
for nc in adjclusters[-1]:
if nc in removed:
continue
adjclusters[nc] -= toremove
adjclusters[nc].add(len(clusters)-1)
nsim = chosen_simfunc(G_sim, clusters[-1], clusters[nc])
if nsim >= threshold_sim:
hq.heappush(pq, (-nsim, len(clusters)-1, nc))
# else:
# print_err("Not merged:")
# print_err(len(clusters)-1, clusters[len(clusters)-1])
# print_err(nc, clusters[nc])
# print_err(len(clusters)-1, nc, nsim)
# print_err("----")
all_clusters.extend([cl for i, cl in enumerate(clusters) if i not in removed and len(cl) > 1])
return sorted(all_clusters, key=len, reverse=True)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('edgelist')
parser.add_argument('outfile', nargs='?')
parser.add_argument('-t', '--interconnectivity', default=0.83, type=float)
parser.add_argument('-d', '--density', default=0.83, type=float)
parser.add_argument('-m', '--min-edge', default=0.05, type=float)
parser.add_argument('-l', '--linkage', default='average')
parser.add_argument('-a', '--authorprefeat', default='generated/Author_prefeat.pickle')
args = parser.parse_args()
if args.outfile == None:
args.outfile = args.edgelist.replace('.prob','') + '.clusters'
threshold_min_weight = args.min_edge
threshold_interconnectivity = args.interconnectivity
threshold_density = args.density
print_err("Loading graph")
G_sim = nx.read_weighted_edgelist(enforce_min(skip_comments(open(args.edgelist, 'rb')), threshold_min_weight), nodetype=int, delimiter=',')
print_err('Loaded (V={:}, E={:})'.format(len(G_sim), G_sim.size()))
print_err("Clustering")
clusters = hcluster(G_sim, threshold_interconnectivity, args.linkage)
print_err("Writing clusters")
G_nsim = nx.read_weighted_edgelist(skip_comments(open(args.edgelist, 'rb')), nodetype=int, delimiter=',')
print_err("Loading pickled author pre-features")
authors = pickle.load(open(args.authorprefeat, 'rb'))
outputClusters(clusters, open(args.outfile, 'wb'), G_nsim, authors, threshold_density)
if __name__ == "__main__":
main() | 37.4375 | 141 | 0.679466 |
7790d490a481d17a059a7dfee406a3429d42670d | 2,273 | py | Python | rqalpha/mod/rqalpha_mod_sys_transaction_cost/__init__.py | meteor27/alpha_mod | 4f7f0edf8338451a69f177058ec80766d846769e | [
"Apache-2.0"
] | null | null | null | rqalpha/mod/rqalpha_mod_sys_transaction_cost/__init__.py | meteor27/alpha_mod | 4f7f0edf8338451a69f177058ec80766d846769e | [
"Apache-2.0"
] | 2 | 2021-01-25T09:49:55.000Z | 2021-01-25T09:50:37.000Z | rqalpha/mod/rqalpha_mod_sys_transaction_cost/__init__.py | meteor27/alpha_mod | 4f7f0edf8338451a69f177058ec80766d846769e | [
"Apache-2.0"
] | 2 | 2021-01-10T10:35:13.000Z | 2021-01-10T10:43:13.000Z | # -*- coding: utf-8 -*-
# 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 [email protected] 获取。
import click
from rqalpha import cli
__config__ = {
# A股最小手续费
"cn_stock_min_commission": 5,
# 港股最小手续费
"hk_stock_min_commission": 50,
# 设置手续费乘数,默认为1
"commission_multiplier": 1,
# 印花税乘数,默认为1
"tax_multiplier": 1,
}
cli_prefix = "mod__sys_transaction_cost__"
cli.commands['run'].params.append(
click.Option(
('-cm', '--commission-multiplier', cli_prefix + "commission_multiplier"),
type=click.FLOAT,
help="[sys_simulation] set commission multiplier"
)
)
cli.commands['run'].params.append(
click.Option(
('-cnsmc', '--cn-stock-min-commission', cli_prefix + 'cn_stock_min_commission'),
type=click.FLOAT,
help="[sys_simulation] set minimum commission in chinese stock trades."
)
)
cli.commands['run'].params.append(
click.Option(
('-hksmc', '--hk-stock-min-commission', cli_prefix + 'hk_stock_min_commission'),
type=click.FLOAT,
help="[sys_simulation] set minimum commission in Hong Kong stock trades."
)
)
# [deprecated]
cli.commands['run'].params.append(
click.Option(
('-smc', '--stock-min-commission', cli_prefix + 'cn_stock_min_commission'),
type=click.FLOAT,
help="[sys_simulation][deprecated] set minimum commission in chinese stock trades."
)
)
cli.commands['run'].params.append(
click.Option(
('-tm', '--tax-multiplier', cli_prefix + "tax_multiplier"),
type=click.FLOAT,
help="[sys_simulation] set tax multiplier"
)
)
def load_mod():
from .mod import TransactionCostMod
return TransactionCostMod()
| 28.4125 | 144 | 0.673999 |
2fe877dd963542ab459cce83de295fce0d921eab | 453 | py | Python | src/ucis/scdb/scdb_scope.py | furiosa-ai/pyucis | 233277abf5a86e1158ae2cc09d91152ca9f1e517 | [
"Apache-2.0"
] | 16 | 2020-03-25T21:31:49.000Z | 2022-01-18T22:34:05.000Z | src/ucis/scdb/scdb_scope.py | furiosa-ai/pyucis | 233277abf5a86e1158ae2cc09d91152ca9f1e517 | [
"Apache-2.0"
] | 4 | 2020-01-05T00:26:00.000Z | 2022-01-27T07:44:06.000Z | src/ucis/scdb/scdb_scope.py | furiosa-ai/pyucis | 233277abf5a86e1158ae2cc09d91152ca9f1e517 | [
"Apache-2.0"
] | 4 | 2019-12-23T06:23:11.000Z | 2022-01-09T07:41:32.000Z | '''
Created on Mar 25, 2020
@author: ballance
'''
from ucis.scope import Scope
from ucis.source_info import SourceInfo
class SCDBScope(Scope):
def __init__(self):
Scope.__init__(self)
pass
def createScope(self,
name:str,
srcinfo:SourceInfo,
weight:int,
source,
type,
flags)->Scope:
Scope.createScope(self, name, srcinfo, weight, source, type, flags)
| 18.875 | 75 | 0.593819 |
2e61fa89ecb45046b2800e940389c95f899ca6cc | 21,796 | py | Python | cisco_aci/tests/test_capacity.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | null | null | null | cisco_aci/tests/test_capacity.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | null | null | null | cisco_aci/tests/test_capacity.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2010-2018
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import os
import pytest
import logging
import simplejson as json
from requests import Session
from datadog_checks.cisco_aci.api import SessionWrapper, Api
from datadog_checks.cisco_aci.capacity import Capacity
from datadog_checks.cisco_aci import CiscoACICheck
from datadog_checks.utils.containers import hash_mutable
import conftest
from .common import FIXTURE_LIST_FILE_MAP
log = logging.getLogger('test_cisco_aci')
class ApiMock:
def __init__(self):
pass
def get_eqpt_capacity(self, eqpt):
return [
{},
{
'other': {}
},
{
'attributes': {}
},
{
'attributes': {"other": "other"}
},
{
'attributes': {"other": "other"},
"children": []
},
{
'attributes': {"dn": "/pod-3/node-4/"},
"children": []
},
{
'attributes': {
"dn": "/pod-1/node-2/"
}, "children": [
{"eqptcapacityL3TotalUsageCap5min": {"attributes": {
"v4TotalEpCapCum": "1",
"v6TotalEpCapCum": "2"
}}},
{"eqptcapacityL3TotalUsage5min": {"attributes": {
"v4TotalEpCum": "3",
"v6TotalEpCum": "4"
}}},
{"eqptcapacityVlanUsage5min": {"attributes": {
"totalCapCum": "5",
"totalCum": "6"
}}},
{"eqptcapacityPolUsage5min": {"attributes": {
"polUsageCapCum": "7",
"polUsageCum": "8"
}}},
{"eqptcapacityMcastUsage5min": {"attributes": {
"localEpCapCum": "9",
"localEpCum": "10"
}}},
{"other": ""},
]
}
]
def get_capacity_contexts(self, context):
return [
{},
{"other": {}},
{"ctxClassCnt": {"attributes": {}}},
{"ctxClassCnt": {"other": {}}},
{
"ctxClassCnt": {
"attributes": {
"other": "other",
}
}
}, {
"ctxClassCnt": {
"attributes": {
"dn": "/pod-3/node-4/",
}
}
}, {
"ctxClassCnt": {
"attributes": {
"count": "666",
"dn": "/pod-1/node-2/",
"other": "other"
}
}
}
]
def get_apic_capacity_limits(self):
return [
{},
{"other": {}},
{"fvcapRule": {}},
{"fvcapRule": {"other": {}}},
{"fvcapRule": {"attributes": {}}},
{"fvcapRule": {"attributes": {"constraint": "100"}}},
{
"fvcapRule": {
"attributes": {
"subj": "subj1",
}
}
}, {
"fvcapRule": {
"attributes": {
"subj": "fabricNode",
}
}
}, {
"fvcapRule": {
"attributes": {
"constraint": "1",
"subj": "vzBrCP",
}
}
}, {
"fvcapRule": {
"attributes": {
"constraint": "2.0",
"subj": "fvTenant",
}
}
}, {
"fvcapRule": {
"attributes": {
"constraint": "3",
"subj": "fvCEp",
}
}
}, {
"fvcapRule": {
"attributes": {
"constraint": "4",
"subj": "plannerAzureDomainTmpl",
}
}
}, {
"fvcapRule": {
"attributes": {
"constraint": "5",
"subj": "fvCtx",
}
}
}, {
"fvcapRule": {
"attributes": {
"constraint": "6",
"subj": "plannerAzureDomainTmpl",
}
}
}, {
"fvcapRule": {
"attributes": {
"constraint": "7",
"subj": "plannerAzureDomain",
}
}
}, {
"fvcapRule": {
"attributes": {
"constraint": "8",
"subj": "vnsGraphInst",
}
}
}, {
"fvcapRule": {
"attributes": {
"constraint": "9",
"subj": "fvBD",
}
}
}, {
"fvcapRule": {
"attributes": {
"constraint": "10",
"subj": "fvAEPg",
}
}
}, {
"fvcapRule": {
"attributes": {
"constraint": "11",
"subj": "plannerVmwareDomain",
}
}
}
]
def get_apic_capacity_metrics(self, capacity_metric, query=None):
return [
{},
{"other": {}},
{"moCount": {}},
{"moCount": {"other": {}}},
{"moCount": {"attributes": {}}},
{"moCount": {"attributes": {"count": "666"}}}
]
def test_get_eqpt_capacity(aggregator):
check = CiscoACICheck(conftest.CHECK_NAME, {}, {})
api = ApiMock()
capacity = Capacity(api, instance={"tags": ["user_tag:1", "utag:2"]}, check_tags=["check_tag:1", "ctag:2"],
gauge=check.gauge, log=check.log)
capacity._get_eqpt_capacity()
tags = ['fabric_pod_id:1', 'node_id:2', 'check_tag:1', 'ctag:2', 'user_tag:1', 'utag:2']
hn = 'pod-1-node-2'
aggregator.assert_metric('cisco_aci.capacity.leaf.policy_cam.utilized', value=8.0, tags=tags, hostname=hn, count=1)
aggregator.assert_metric('cisco_aci.capacity.leaf.vlan.limit', value=5.0, tags=tags, hostname=hn, count=1)
aggregator.assert_metric('cisco_aci.capacity.leaf.ipv6_endpoint.limit', value=2.0, tags=tags, hostname=hn, count=1)
aggregator.assert_metric('cisco_aci.capacity.leaf.policy_cam.limit', value=7.0, tags=tags, hostname=hn, count=1)
aggregator.assert_metric('cisco_aci.capacity.leaf.ipv4_endpoint.limit', value=1.0, tags=tags, hostname=hn, count=1)
aggregator.assert_metric('cisco_aci.capacity.leaf.ipv6_endpoint.utilized', value=4.0, tags=tags, hostname=hn,
count=1)
aggregator.assert_metric('cisco_aci.capacity.leaf.vlan.utilized', value=6.0, tags=tags, hostname=hn, count=1)
aggregator.assert_metric('cisco_aci.capacity.leaf.multicast.limit', value=9.0, tags=tags, hostname=hn, count=1)
aggregator.assert_metric('cisco_aci.capacity.leaf.multicast.utilized', value=10.0, tags=tags, hostname=hn, count=1)
aggregator.assert_metric('cisco_aci.capacity.leaf.ipv4_endpoint.utilized', value=3.0, tags=tags, hostname=hn,
count=1)
# Assert coverage for this check on this instance
aggregator.assert_all_metrics_covered()
def test_get_contexts(aggregator):
check = CiscoACICheck(conftest.CHECK_NAME, {}, {})
api = ApiMock()
capacity = Capacity(api, instance={"tags": ["user_tag:1", "utag:2"]}, check_tags=["check_tag:1", "ctag:2"],
gauge=check.gauge, log=check.log)
capacity._get_contexts()
tags = ['check_tag:1', 'ctag:2', 'user_tag:1', 'utag:2']
aggregator.assert_metric('cisco_aci.capacity.leaf.bridge_domain.utilized', value=666.0,
tags=['fabric_pod_id:1', 'node_id:2'] + tags, hostname='pod-1-node-2', count=1)
aggregator.assert_metric('cisco_aci.capacity.leaf.vrf.utilized', value=666.0,
tags=['fabric_pod_id:1', 'node_id:2'] + tags, hostname='pod-1-node-2')
aggregator.assert_metric('cisco_aci.capacity.leaf.endpoint_group.limit', value=3500.0,
tags=['fabric_pod_id:1', 'node_id:2'] + tags, hostname='pod-1-node-2', count=1)
aggregator.assert_metric('cisco_aci.capacity.leaf.bridge_domain.limit', value=3500.0,
tags=['fabric_pod_id:1', 'node_id:2'] + tags, hostname='pod-1-node-2', count=1)
aggregator.assert_metric('cisco_aci.capacity.leaf.endpoint_group.utilized', value=666.0,
tags=['fabric_pod_id:1', 'node_id:2'] + tags, hostname='pod-1-node-2')
aggregator.assert_metric('cisco_aci.capacity.leaf.vrf.limit', value=800.0,
tags=['fabric_pod_id:1', 'node_id:2'] + tags, hostname='pod-1-node-2', count=1)
# Assert coverage for this check on this instance
aggregator.assert_all_metrics_covered()
def test_get_apic_capacity_limits(aggregator):
check = CiscoACICheck(conftest.CHECK_NAME, {}, {})
api = ApiMock()
capacity = Capacity(api, instance={"tags": ["user_tag:1", "utag:2"]}, check_tags=["check_tag:1", "ctag:2"],
gauge=check.gauge, log=check.log)
capacity._get_apic_capacity_limits()
tags = ['check_tag:1', 'ctag:2', 'user_tag:1', 'utag:2']
aggregator.assert_metric('cisco_aci.capacity.apic.tenant.limit', value=2.0, tags=tags, hostname='', count=1)
aggregator.assert_metric('cisco_aci.capacity.apic.service_graph.limit', value=8.0, tags=tags, hostname='', count=1)
aggregator.assert_metric('cisco_aci.capacity.apic.bridge_domain.limit', value=9.0, tags=tags, hostname='', count=1)
aggregator.assert_metric('cisco_aci.capacity.apic.azure_domain.endpoint_group.limit', value=7.0, tags=tags,
hostname='', count=1)
aggregator.assert_metric('cisco_aci.capacity.apic.vmware_domain.endpoint_group.limit', value=11.0, tags=tags,
hostname='', count=1)
aggregator.assert_metric('cisco_aci.capacity.apic.fabric_node.limit', value=0.0, tags=tags, hostname='', count=1)
aggregator.assert_metric('cisco_aci.capacity.apic.contract.limit', value=1.0, tags=tags, hostname='', count=1)
aggregator.assert_metric('cisco_aci.capacity.apic.azure_domain.limit', value=4.0, tags=tags, hostname='', count=1)
aggregator.assert_metric('cisco_aci.capacity.apic.azure_domain.limit', value=6.0, tags=tags, hostname='', count=1)
aggregator.assert_metric('cisco_aci.capacity.apic.endpoint_group.limit', value=10.0, tags=tags, hostname='',
count=1)
aggregator.assert_metric('cisco_aci.capacity.apic.private_network.limit', value=5.0, tags=tags, hostname='',
count=1)
aggregator.assert_metric('cisco_aci.capacity.apic.endpoint.limit', value=3.0, tags=tags, hostname='', count=1)
# Assert coverage for this check on this instance
aggregator.assert_all_metrics_covered()
def test_get_apic_capacity_metrics(aggregator):
check = CiscoACICheck(conftest.CHECK_NAME, {}, {})
api = ApiMock()
capacity = Capacity(api, instance={"tags": ["user_tag:1", "utag:2"]}, check_tags=["check_tag:1", "ctag:2"],
gauge=check.gauge, log=check.log)
capacity._get_apic_capacity_metrics()
tags = ['check_tag:1', 'ctag:2', 'user_tag:1', 'utag:2']
aggregator.assert_metric('cisco_aci.capacity.apic.endpoint.utilized', value=666.0, tags=tags, hostname='', count=1)
aggregator.assert_metric('cisco_aci.capacity.apic.bridge_domain.utilized', value=666.0, tags=tags, hostname='',
count=1)
aggregator.assert_metric('cisco_aci.capacity.apic.tenant.utilized', value=666.0, tags=tags, hostname='', count=1)
aggregator.assert_metric('cisco_aci.capacity.apic.private_network.utilized', value=666.0, tags=tags, hostname='',
count=1)
aggregator.assert_metric('cisco_aci.capacity.apic.endpoint_group.utilized', value=666.0, tags=tags, hostname='',
count=1)
aggregator.assert_metric('cisco_aci.capacity.apic.fabric_node.utilized', value=6.0, tags=tags, hostname='', count=1)
# Assert coverage for this check on this instance
aggregator.assert_all_metrics_covered()
class FakeSess(SessionWrapper):
""" This mock:
1. Takes the requested path and replace all special characters to underscore
2. Fetch the corresponding hash from FIXTURE_LIST_FILE_MAP
3. Returns the corresponding file content
"""
def make_request(self, path):
mock_path = path.replace('/', '_')
mock_path = mock_path.replace('?', '_')
mock_path = mock_path.replace('&', '_')
mock_path = mock_path.replace('=', '_')
mock_path = mock_path.replace(',', '_')
mock_path = mock_path.replace('-', '_')
mock_path = mock_path.replace('.', '_')
mock_path = mock_path.replace('"', '_')
mock_path = mock_path.replace('(', '_')
mock_path = mock_path.replace(')', '_')
mock_path = mock_path.replace('[', '_')
mock_path = mock_path.replace(']', '_')
mock_path = mock_path.replace('|', '_')
try:
mock_path = FIXTURE_LIST_FILE_MAP[mock_path]
mock_path = os.path.join(conftest.CAPACITY_FIXTURES_DIR, mock_path)
mock_path += '.txt'
log.info(os.listdir(conftest.CAPACITY_FIXTURES_DIR))
with open(mock_path, 'r') as f:
return json.loads(f.read())
except Exception:
return {"imdata": []}
@pytest.fixture
def session_mock():
session = Session()
setattr(session, 'send', conftest.mock_send)
fake_session_wrapper = FakeSess(conftest.ACI_URL, session, 'cookie')
return fake_session_wrapper
def test_capacity_end_to_end(aggregator, session_mock):
check = CiscoACICheck(conftest.CHECK_NAME, {}, {})
api = Api(conftest.ACI_URLS, conftest.USERNAME,
password=conftest.PASSWORD, log=check.log, sessions=[session_mock])
api._refresh_sessions = False
check._api_cache[hash_mutable(conftest.CONFIG_WITH_TAGS)] = api
check.check(conftest.CONFIG_WITH_TAGS)
tags = ['cisco', 'project:cisco_aci']
aggregator.assert_metric('cisco_aci.capacity.leaf.bridge_domain.utilized', value=44.0,
tags=['fabric_pod_id:1', 'node_id:101'] + tags, hostname='pod-1-node-101')
aggregator.assert_metric('cisco_aci.capacity.leaf.bridge_domain.utilized', value=1.0,
tags=['fabric_pod_id:1', 'node_id:201'] + tags, hostname='pod-1-node-201')
aggregator.assert_metric('cisco_aci.capacity.leaf.bridge_domain.utilized', value=1.0,
tags=['fabric_pod_id:1', 'node_id:202'] + tags, hostname='pod-1-node-202')
aggregator.assert_metric('cisco_aci.capacity.leaf.bridge_domain.utilized', value=34.0,
tags=['fabric_pod_id:1', 'node_id:102'] + tags, hostname='pod-1-node-102')
aggregator.assert_metric('cisco_aci.capacity.apic.endpoint_group.utilized', value=205.0, tags=tags, hostname='')
aggregator.assert_metric('cisco_aci.capacity.apic.private_network.utilized', value=85.0, tags=tags, hostname='')
aggregator.assert_metric('cisco_aci.capacity.leaf.bridge_domain.limit', value=3500.0,
tags=['fabric_pod_id:1', 'node_id:101'] + tags, hostname='pod-1-node-101')
aggregator.assert_metric('cisco_aci.capacity.leaf.bridge_domain.limit', value=3500.0,
tags=['fabric_pod_id:1', 'node_id:201'] + tags, hostname='pod-1-node-201')
aggregator.assert_metric('cisco_aci.capacity.leaf.bridge_domain.limit', value=3500.0,
tags=['fabric_pod_id:1', 'node_id:202'] + tags, hostname='pod-1-node-202')
aggregator.assert_metric('cisco_aci.capacity.leaf.bridge_domain.limit', value=3500.0,
tags=['fabric_pod_id:1', 'node_id:102'] + tags, hostname='pod-1-node-102')
aggregator.assert_metric('cisco_aci.capacity.apic.tenant.utilized', value=90.0, tags=tags, hostname='')
aggregator.assert_metric('cisco_aci.capacity.leaf.endpoint_group.utilized', value=94.0,
tags=['fabric_pod_id:1', 'node_id:101'] + tags, hostname='pod-1-node-101')
aggregator.assert_metric('cisco_aci.capacity.leaf.endpoint_group.utilized', value=0.0,
tags=['fabric_pod_id:1', 'node_id:201'] + tags, hostname='pod-1-node-201')
aggregator.assert_metric('cisco_aci.capacity.leaf.endpoint_group.utilized', value=0.0,
tags=['fabric_pod_id:1', 'node_id:202'] + tags, hostname='pod-1-node-202')
aggregator.assert_metric('cisco_aci.capacity.leaf.endpoint_group.utilized', value=78.0,
tags=['fabric_pod_id:1', 'node_id:102'] + tags, hostname='pod-1-node-102')
aggregator.assert_metric('cisco_aci.capacity.apic.endpoint_group.limit', value=15000.0, tags=tags, hostname='')
aggregator.assert_metric('cisco_aci.capacity.leaf.endpoint_group.limit', value=3500.0,
tags=['fabric_pod_id:1', 'node_id:101'] + tags, hostname='pod-1-node-101')
aggregator.assert_metric('cisco_aci.capacity.leaf.endpoint_group.limit', value=3500.0,
tags=['fabric_pod_id:1', 'node_id:201'] + tags, hostname='pod-1-node-201')
aggregator.assert_metric('cisco_aci.capacity.leaf.endpoint_group.limit', value=3500.0,
tags=['fabric_pod_id:1', 'node_id:202'] + tags, hostname='pod-1-node-202')
aggregator.assert_metric('cisco_aci.capacity.leaf.endpoint_group.limit', value=3500.0,
tags=['fabric_pod_id:1', 'node_id:102'] + tags, hostname='pod-1-node-102')
aggregator.assert_metric('cisco_aci.capacity.apic.endpoint.limit', value=180000.0, tags=tags, hostname='')
aggregator.assert_metric('cisco_aci.capacity.apic.endpoint.utilized', value=76.0, tags=tags, hostname='')
aggregator.assert_metric('cisco_aci.capacity.apic.bridge_domain.utilized', value=154.0, tags=tags, hostname='')
aggregator.assert_metric('cisco_aci.capacity.apic.vmware_domain.limit', value=5.0, tags=tags, hostname='')
aggregator.assert_metric('cisco_aci.capacity.apic.private_network.limit', value=3000.0, tags=tags, hostname='')
aggregator.assert_metric('cisco_aci.capacity.leaf.vrf.utilized', value=32.0,
tags=['fabric_pod_id:1', 'node_id:101'] + tags, hostname='pod-1-node-101')
aggregator.assert_metric('cisco_aci.capacity.leaf.vrf.utilized', value=4.0,
tags=['fabric_pod_id:1', 'node_id:201'] + tags, hostname='pod-1-node-201')
aggregator.assert_metric('cisco_aci.capacity.leaf.vrf.utilized', value=4.0,
tags=['fabric_pod_id:1', 'node_id:202'] + tags, hostname='pod-1-node-202')
aggregator.assert_metric('cisco_aci.capacity.leaf.vrf.utilized', value=27.0,
tags=['fabric_pod_id:1', 'node_id:102'] + tags, hostname='pod-1-node-102')
aggregator.assert_metric('cisco_aci.capacity.apic.contract.limit', value=1000.0, tags=tags, hostname='')
aggregator.assert_metric('cisco_aci.capacity.apic.azure_domain.endpoint_group.limit', value=9000.0,
tags=tags, hostname='')
aggregator.assert_metric('cisco_aci.capacity.apic.fabric_node.limit', value=200.0, tags=tags, hostname='')
aggregator.assert_metric('cisco_aci.capacity.apic.bridge_domain.limit', value=15000.0, tags=tags, hostname='')
aggregator.assert_metric('cisco_aci.capacity.apic.fabric_node.utilized', value=2.0, tags=tags, hostname='')
aggregator.assert_metric('cisco_aci.capacity.apic.tenant.limit', value=3000.0, tags=tags, hostname='')
aggregator.assert_metric('cisco_aci.capacity.leaf.vrf.limit', value=800.0,
tags=['fabric_pod_id:1', 'node_id:101'] + tags, hostname='pod-1-node-101')
aggregator.assert_metric('cisco_aci.capacity.leaf.vrf.limit', value=800.0,
tags=['fabric_pod_id:1', 'node_id:201'] + tags, hostname='pod-1-node-201')
aggregator.assert_metric('cisco_aci.capacity.leaf.vrf.limit', value=800.0,
tags=['fabric_pod_id:1', 'node_id:202'] + tags, hostname='pod-1-node-202')
aggregator.assert_metric('cisco_aci.capacity.leaf.vrf.limit', value=800.0,
tags=['fabric_pod_id:1', 'node_id:102'] + tags, hostname='pod-1-node-102')
aggregator.assert_metric('cisco_aci.capacity.apic.vmware_domain.endpoint_group.limit', value=15000.0,
tags=tags, hostname='')
aggregator.assert_metric('cisco_aci.capacity.apic.azure_domain.limit', value=5.0, tags=tags, hostname='')
aggregator.assert_metric('cisco_aci.capacity.apic.service_graph.limit', value=600.0, tags=tags, hostname='')
| 49.762557 | 120 | 0.562901 |
99051d771d01044fe34af1853799adb6c3943988 | 3,051 | py | Python | matrixprofile/algorithms/mpx.py | KSaiRahul21/matrixprofile | d8250e30d90ed0453bb7c35bb34ab0c04ae7b334 | [
"Apache-2.0"
] | null | null | null | matrixprofile/algorithms/mpx.py | KSaiRahul21/matrixprofile | d8250e30d90ed0453bb7c35bb34ab0c04ae7b334 | [
"Apache-2.0"
] | null | null | null | matrixprofile/algorithms/mpx.py | KSaiRahul21/matrixprofile | d8250e30d90ed0453bb7c35bb34ab0c04ae7b334 | [
"Apache-2.0"
] | 1 | 2020-04-10T19:15:17.000Z | 2020-04-10T19:15:17.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import math
import numpy as np
from matrixprofile import core
from matrixprofile.algorithms.cympx import mpx_parallel as cympx_parallel
from matrixprofile.algorithms.cympx import mpx_ab_parallel as cympx_ab_parallel
def mpx(ts, w, query=None, cross_correlation=False, n_jobs=1):
"""
The MPX algorithm computes the matrix profile without using the FFT.
Parameters
----------
ts : array_like
The time series to compute the matrix profile for.
w : int
The window size.
query : array_like
Optionally a query series.
cross_correlation : bool, Default=False
Setermine if cross_correlation distance should be returned. It defaults
to Euclidean Distance.
n_jobs : int, Default = 1
Number of cpu cores to use.
Returns
-------
dict : profile
A MatrixProfile data structure.
>>> {
>>> 'mp': The matrix profile,
>>> 'pi': The matrix profile 1NN indices,
>>> 'rmp': The right matrix profile,
>>> 'rpi': The right matrix profile 1NN indices,
>>> 'lmp': The left matrix profile,
>>> 'lpi': The left matrix profile 1NN indices,
>>> 'metric': The distance metric computed for the mp,
>>> 'w': The window size used to compute the matrix profile,
>>> 'ez': The exclusion zone used,
>>> 'join': Flag indicating if a similarity join was computed,
>>> 'sample_pct': Percentage of samples used in computing the MP,
>>> 'data': {
>>> 'ts': Time series data,
>>> 'query': Query data if supplied
>>> }
>>> 'class': "MatrixProfile"
>>> 'algorithm': "mpx"
>>> }
"""
ts = core.to_np_array(ts).astype('d')
n_jobs = core.valid_n_jobs(n_jobs)
is_join = False
if core.is_array_like(query):
query = core.to_np_array(query).astype('d')
is_join = True
mp, mpi, mpb, mpib = cympx_ab_parallel(ts, query, w,
int(cross_correlation), n_jobs)
else:
mp, mpi = cympx_parallel(ts, w, int(cross_correlation), n_jobs)
mp = np.asarray(mp)
mpi = np.asarray(mpi)
distance_metric = 'euclidean'
if cross_correlation:
distance_metric = 'cross_correlation'
return {
'mp': mp,
'pi': mpi,
'rmp': None,
'rpi': None,
'lmp': None,
'lpi': None,
'metric': distance_metric,
'w': w,
'ez': int(np.floor(w / 4)),
'join': is_join,
'sample_pct': 1,
'data': {
'ts': ts,
'query': query
},
'class': 'MatrixProfile',
'algorithm': 'mpx'
}
| 29.911765 | 79 | 0.579154 |
a626250de91365f58cfe051b4944c144f5e92339 | 871 | py | Python | graphics/density.py | tman540/probability-simulator | c3bc679d1f77fd751bc981734583bdc017290aef | [
"MIT"
] | null | null | null | graphics/density.py | tman540/probability-simulator | c3bc679d1f77fd751bc981734583bdc017290aef | [
"MIT"
] | null | null | null | graphics/density.py | tman540/probability-simulator | c3bc679d1f77fd751bc981734583bdc017290aef | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import matplotlib.style as style
# Set theme of graph
style.use('ggplot')
def heatmap(data):
# Define separate lists for x and y
x, y = [], []
# Separate the x and the y
for point in data:
x.append(point[0])
y.append(point[1])
# Define the color map
jet = plt.get_cmap('jet')
# Set the x and the y label
plt.xlabel("x", fontsize=5)
plt.ylabel("y", fontsize=5)
# Set the title and the window title of the graph
plt.title("Heatmap of thrown darts")
plt.gcf().canvas.set_window_title("Heatmap of thrown darts")
# Define the 2d histogram using the the x,y as arguments.
# THe bins are the size blocks, cmap is the color mapping
plt.hist2d(x, y, bins=(10, 10), cmap=jet)
# Display the graph
plt.show()
# todo: Return plot for saving command in shell
| 27.21875 | 64 | 0.647532 |
9c699c988522045cdf0f46825a21b7560d6deb81 | 1,774 | py | Python | src/m6e_grid_layout.py | colledkm/24-Tkinter | 4a900678902bf5d51d7df5e49cfb83262de742e6 | [
"MIT"
] | null | null | null | src/m6e_grid_layout.py | colledkm/24-Tkinter | 4a900678902bf5d51d7df5e49cfb83262de742e6 | [
"MIT"
] | null | null | null | src/m6e_grid_layout.py | colledkm/24-Tkinter | 4a900678902bf5d51d7df5e49cfb83262de742e6 | [
"MIT"
] | 97 | 2019-01-31T13:03:14.000Z | 2019-02-04T18:42:49.000Z | """
Example showing for tkinter and ttk:
-- How to layout objects using a grid of rows and columns.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and PUT_YOUR_NAME_HERE.
""" # TODO: 1. PUT YOUR NAME IN THE ABOVE LINE.
import tkinter
from tkinter import ttk
def main():
root = tkinter.Tk()
frame = ttk.Frame(root, padding=10)
frame.grid()
# -------------------------------------------------------------------------
# This example puts the widgets in a 3-column, 2-row grid
# with some of the grid-places empty. Here are the WIDGETS:
# -------------------------------------------------------------------------
label = ttk.Label(frame, text="Example of gridding\nrows and columns")
entry_box = ttk.Entry(frame)
button1 = ttk.Button(frame, text="Do you like\nyour button HERE?")
button1['command'] = (lambda:
print('Do you like green eggs and ham, Sam?'))
button2 = ttk.Button(frame, text="or HERE?")
button2['command'] = (lambda:
print("I DO like green eggs and ham, Sam I am!"))
# -------------------------------------------------------------------------
# Here is the use of GRID with rows and columns:
# -------------------------------------------------------------------------
label.grid(row=0, column=0)
entry_box.grid(row=0, column=1)
button1.grid(row=0, column=2)
button2.grid(row=1, column=1)
root.mainloop()
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main()
| 34.784314 | 79 | 0.468997 |
3357437eb4b1662ee121c3fe3c9ab5a2bb9e3cab | 6,552 | py | Python | tests/test_serialization.py | foobarbazmeow/marshmallow-recipe | 5bce8abd5db1abec4d60cfa2cd1428c6c5738566 | [
"MIT"
] | null | null | null | tests/test_serialization.py | foobarbazmeow/marshmallow-recipe | 5bce8abd5db1abec4d60cfa2cd1428c6c5738566 | [
"MIT"
] | null | null | null | tests/test_serialization.py | foobarbazmeow/marshmallow-recipe | 5bce8abd5db1abec4d60cfa2cd1428c6c5738566 | [
"MIT"
] | null | null | null | import dataclasses
import datetime
import decimal
import uuid
from typing import Any, cast
import marshmallow as m
import pytest
import marshmallow_recipe as mr
def test_simple_types() -> None:
@dataclasses.dataclass(frozen=True, slots=True, kw_only=True)
class SimpleTypesContainers:
str_field: str
optional_str_field: str | None
bool_field: bool
optional_bool_field: bool | None
decimal_field: decimal.Decimal
optional_decimal_field: decimal.Decimal | None
int_field: int
optional_int_field: int | None
float_field: float
optional_float_field: float | None
uuid_field: uuid.UUID
optional_uuid_field: uuid.UUID | None
datetime_field: datetime.datetime
optional_datetime_field: datetime.datetime | None
date_field: datetime.date
optional_date_field: datetime.date | None
dict_field: dict[str, Any]
optional_dict_field: dict[str, Any] | None
raw = dict(
str_field="42",
optional_str_field="42",
bool_field=True,
optional_bool_field=True,
decimal_field="42.00",
optional_decimal_field="42.00",
int_field=42,
optional_int_field=42,
float_field=42.0,
optional_float_field=42.0,
uuid_field="15f75b02-1c34-46a2-92a5-18363aadea05",
optional_uuid_field="15f75b02-1c34-46a2-92a5-18363aadea05",
datetime_field="2022-02-20T11:33:48.607289+00:00",
optional_datetime_field="2022-02-20T11:33:48.607289+00:00",
date_field="2022-02-20",
optional_date_field="2022-02-20",
dict_field=dict(key="value"),
optional_dict_field=dict(key="value"),
)
loaded = mr.load(SimpleTypesContainers, raw)
dumped = mr.dump(loaded)
assert loaded == SimpleTypesContainers(
str_field="42",
optional_str_field="42",
bool_field=True,
optional_bool_field=True,
decimal_field=decimal.Decimal("42.00"),
optional_decimal_field=decimal.Decimal("42.00"),
int_field=42,
optional_int_field=42,
float_field=42.0,
optional_float_field=42.0,
uuid_field=uuid.UUID("15f75b02-1c34-46a2-92a5-18363aadea05"),
optional_uuid_field=uuid.UUID("15f75b02-1c34-46a2-92a5-18363aadea05"),
datetime_field=datetime.datetime(2022, 2, 20, 11, 33, 48, 607289, datetime.timezone.utc),
optional_datetime_field=datetime.datetime(2022, 2, 20, 11, 33, 48, 607289, datetime.timezone.utc),
date_field=datetime.date(2022, 2, 20),
optional_date_field=datetime.date(2022, 2, 20),
dict_field=dict(key="value"),
optional_dict_field=dict(key="value"),
)
assert dumped == raw
assert mr.schema(SimpleTypesContainers) is mr.schema(SimpleTypesContainers)
def test_nested_dataclass() -> None:
@dataclasses.dataclass(frozen=True, slots=True, kw_only=True)
class BoolContainer:
bool_field: bool
@dataclasses.dataclass(frozen=True, slots=True, kw_only=True)
class Container:
bool_container_field: BoolContainer
raw = dict(bool_container_field=dict(bool_field=True))
loaded = mr.load(Container, raw)
dumped = mr.dump(loaded)
assert loaded == Container(bool_container_field=BoolContainer(bool_field=True))
assert dumped == raw
assert mr.schema(Container) is mr.schema(Container)
def test_custom_name() -> None:
@dataclasses.dataclass(frozen=True, slots=True, kw_only=True)
class BoolContainer:
bool_field: bool = dataclasses.field(metadata=mr.metadata(name="BoolField"))
raw = dict(BoolField=False)
loaded = mr.load(BoolContainer, raw)
dumped = mr.dump(loaded)
assert loaded == BoolContainer(bool_field=False)
assert dumped == raw
assert mr.schema(BoolContainer) is mr.schema(BoolContainer)
def test_unknown_field() -> None:
@dataclasses.dataclass(frozen=True, slots=True, kw_only=True)
class BoolContainer:
bool_field: bool
loaded = mr.load(BoolContainer, dict(bool_field=True, int_field=42))
dumped = mr.dump(loaded)
assert loaded == BoolContainer(bool_field=True)
assert dumped == dict(bool_field=True)
assert mr.schema(BoolContainer) is mr.schema(BoolContainer)
@pytest.mark.parametrize(
"raw, dt",
[
("2022-02-20T11:33:48.607289+00:00", datetime.datetime(2022, 2, 20, 11, 33, 48, 607289, datetime.timezone.utc)),
("2022-02-20T11:33:48.607289", datetime.datetime(2022, 2, 20, 11, 33, 48, 607289, datetime.timezone.utc)),
],
)
def test_datetime_field_load(raw: str, dt: datetime.datetime) -> None:
@dataclasses.dataclass(frozen=True, slots=True, kw_only=True)
class DateTimeContainer:
datetime_field: datetime.datetime
loaded = mr.load(DateTimeContainer, dict(datetime_field=raw))
assert loaded == DateTimeContainer(datetime_field=dt)
@pytest.mark.parametrize(
"dt, raw",
[
(datetime.datetime(2022, 2, 20, 11, 33, 48, 607289, datetime.timezone.utc), "2022-02-20T11:33:48.607289+00:00"),
(datetime.datetime(2022, 2, 20, 11, 33, 48, 607289, None), "2022-02-20T11:33:48.607289+00:00"),
],
)
def test_datetime_field_dump(dt: datetime.datetime, raw: str) -> None:
@dataclasses.dataclass(frozen=True, slots=True, kw_only=True)
class DateTimeContainer:
datetime_field: datetime.datetime
dumped = mr.dump(DateTimeContainer(datetime_field=dt))
assert dumped == dict(datetime_field=raw)
@pytest.mark.skip("Bug in marshmallow")
def test_dump_invalid_int_value():
@dataclasses.dataclass(frozen=True, slots=True, kw_only=True)
class IntContainer:
int_field: int
with pytest.raises(m.ValidationError):
mr.dump(IntContainer(int_field=cast(int, "invalid")))
def test_dump_invalid_value():
@dataclasses.dataclass(frozen=True, slots=True, kw_only=True)
class UUIDContainer:
uuid_field: uuid.UUID
with pytest.raises(m.ValidationError) as exc_info:
mr.dump(UUIDContainer(uuid_field=cast(uuid.UUID, "invalid")))
assert exc_info.value.messages == {"uuid_field": ["Not a valid UUID."]}
def test_dump_many_invalid_value():
@dataclasses.dataclass(frozen=True, slots=True, kw_only=True)
class UUIDContainer:
uuid_field: uuid.UUID
with pytest.raises(m.ValidationError) as exc_info:
mr.dump_many([UUIDContainer(uuid_field=cast(uuid.UUID, "invalid"))])
assert exc_info.value.messages == {0: {"uuid_field": ["Not a valid UUID."]}}
| 33.773196 | 120 | 0.688797 |
8d4b883cfecb3e2c64a2b6e0276c301f23f699fe | 2,525 | py | Python | src/oci/os_management/models/change_scheduled_job_compartment_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/os_management/models/change_scheduled_job_compartment_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/os_management/models/change_scheduled_job_compartment_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ChangeScheduledJobCompartmentDetails(object):
"""
Compartment id for a scheduled job
"""
def __init__(self, **kwargs):
"""
Initializes a new ChangeScheduledJobCompartmentDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param compartment_id:
The value to assign to the compartment_id property of this ChangeScheduledJobCompartmentDetails.
:type compartment_id: str
"""
self.swagger_types = {
'compartment_id': 'str'
}
self.attribute_map = {
'compartment_id': 'compartmentId'
}
self._compartment_id = None
@property
def compartment_id(self):
"""
Gets the compartment_id of this ChangeScheduledJobCompartmentDetails.
The `OCID`__ of the
compartment into which the resource should be moved.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this ChangeScheduledJobCompartmentDetails.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this ChangeScheduledJobCompartmentDetails.
The `OCID`__ of the
compartment into which the resource should be moved.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this ChangeScheduledJobCompartmentDetails.
:type: str
"""
self._compartment_id = compartment_id
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 32.792208 | 245 | 0.689505 |
9c4b5297fa20d93766f9d0b3cbb1674a32b9d5fa | 8,307 | py | Python | typeclasses/readable/readable_commands.py | sgsabbage/arxcode | ff1587721f9896062ffdcaf008dcd842baaec5d2 | [
"MIT"
] | 42 | 2018-08-12T00:55:24.000Z | 2021-12-24T15:16:08.000Z | typeclasses/readable/readable_commands.py | sgsabbage/arxcode | ff1587721f9896062ffdcaf008dcd842baaec5d2 | [
"MIT"
] | 312 | 2018-10-22T23:03:27.000Z | 2022-02-06T13:02:58.000Z | typeclasses/readable/readable_commands.py | sgsabbage/arxcode | ff1587721f9896062ffdcaf008dcd842baaec5d2 | [
"MIT"
] | 42 | 2018-08-12T00:41:48.000Z | 2022-01-27T14:03:16.000Z | from commands.base import ArxCommand
from evennia.utils.ansi import parse_ansi
from server.utils.arx_utils import sub_old_ansi
from typeclasses.readable.exceptions import ChapterNotFoundError, AddChapterError
from world.templates.exceptions import AlreadySignedError
from server.utils import arx_more
from world.templates.models import WrittenWork
from evennia.commands.cmdset import CmdSet
from evennia.utils.evtable import EvTable
class WriteCmdSet(CmdSet):
key = "WriteCmd"
priority = 0
duplicates = True
def at_cmdset_creation(self):
"""Init the cmdset"""
self.add(CmdWrite())
self.add(CmdRead())
class SignCmdSet(CmdSet):
key = "SignCmd"
priority = 0
duplicates = True
def at_cmdset_creation(self):
self.add(CmdSign())
class CmdSign(ArxCommand):
"""
Signs a document
Usage:
sign <chapter>
Places your signature on a document.
"""
key = "sign"
locks = "cmd:all()"
def func(self):
try:
caller = self.caller
obj = self.obj
chapter = obj.get_chapter(self.args)
chapter.add_signature(self.caller)
caller.msg("You sign your name on %s." % obj.name)
except (ChapterNotFoundError, AlreadySignedError) as err:
self.msg(err)
class CmdWrite(ArxCommand):
"""
Write a story that can be recorded on a scroll/book/letter.
Usage:
write[/body] <story>
write/title <title>
write/proof
write/language <language>
write/finish
write/listworks [<story ID>]
write/record <book>=<new book name>
write/add <book name>=<story ID>,<chapter number>
Writes stories that you can then add as chapters to a book object,
which can then be read with the 'read' command.
To write a story, use write/title to name the story, 'write' to add
the story's content, and then write/finish to create it. To set
the language of the story to be something other than the default,
use write/language to specify it.
To add stories to a book, first name the book with write/record,
then add chapters with write/add. For example, to rename 'a scroll'
to 'Furen's Book of Wisdom', use 'write/record Furen's Book of Wisdom'.
Once a book has chapters added to it, its name may no longer be
changed.
"""
key = "write"
locks = "cmd:all()"
story_switches = ("title", "proof", "language", "finish", "body")
book_switches = ("record", "add")
work_switches = ("listworks",)
@property
def draft(self):
if self.caller.ndb.story_draft is None:
self.caller.ndb.story_draft = WrittenWork()
return self.caller.ndb.story_draft
def display(self):
msg = f"|wTitle:|n {self.draft.colored_title}\n"
lang_string = ""
if self.draft.language:
lang_string = f" |w(Written in |c{self.draft.language.capitalize()}|w)|n"
msg += f"|wBody{lang_string}:|n\n{self.draft.body}"
return msg
def func(self):
"""Look for object in inventory that matches args to wear"""
try:
if not self.switches or self.check_switches(self.story_switches):
return self.do_story_switches()
if self.check_switches(self.book_switches):
return self.do_book_switches()
if self.check_switches(self.work_switches):
return self.do_work_switches()
raise self.error_class("Unrecognized syntax for write.")
except (self.error_class, AddChapterError) as err:
self.msg(err)
def do_story_switches(self):
if not self.args and not self.switches:
self.switches.append("proof")
if not self.switches or "body" in self.switches:
self.draft.body = self.args
if "title" in self.switches:
title = sub_old_ansi(self.args)
raw_title = parse_ansi(title, strip_ansi=True)
if WrittenWork.objects.filter(title__iexact=raw_title).exists():
raise self.error_class(
"Sorry, a written work already exists with that title. "
"Try adding a number, (eg: 'Part II')."
)
self.draft.colored_title = title
self.draft.title = raw_title
if "language" in self.switches:
lhs = self.lhs.lower()
if lhs and lhs not in self.caller.languages.known_languages:
self.msg("You cannot speak that language.")
return
self.draft.language = lhs
if "finish" in self.switches:
title = self.draft.title
colored_title = self.draft.colored_title
body = self.draft.body
lang = self.draft.language or ""
if not title:
raise self.error_class("Still needs a title set.")
if not body:
raise self.error_class("Still needs a body set.")
story = self.caller.authored_works.create(
title=title, body=body, language=lang, colored_title=colored_title
)
self.msg(
f"You have created '{story}' (#{story.id}). Use |cwrite/add|n "
f"to add it as a chapter to a book."
)
del self.caller.ndb.story_draft
return
# "proof" switch and others fall down to here, to display progress
self.msg(self.display(), options={"box": True})
def do_book_switches(self):
obj = self.search(self.lhs)
if not obj:
return
try:
is_named = obj.has_been_named
except AttributeError:
raise self.error_class(f"{obj} is not a book.")
if "record" in self.switches:
if is_named:
raise self.error_class(f"'{obj}' has already been named.")
obj.set_book_name(self.caller, self.rhs)
self.msg(f"You have set the book's name to {self.rhs}.")
return
if "add" in self.switches:
try:
work_id, chapter_num = int(self.rhslist[0]), int(self.rhslist[1])
except (ValueError, TypeError):
raise self.error_class(
"Enter the ID of one of your authored works "
"and the chapter number to add."
)
work = self.get_work(work_id)
obj.add_chapter(work, chapter_num)
obj.cmdset.delete_default()
obj.cmdset.add_default(SignCmdSet, permanent=True)
self.msg(f"You have added {work} as Chapter {chapter_num}.")
def get_work(self, work_id):
try:
return self.caller.authored_works.get(id=work_id)
except (WrittenWork.DoesNotExist, TypeError, ValueError):
raise self.error_class("You have not written a work by that ID.")
def do_work_switches(self):
"""List all the works written by the character"""
if self.args:
work = self.get_work(self.args)
self.msg(str(work.body))
return
table = EvTable("|wID|n", "|wTitle|n", width=78)
qs = self.caller.authored_works.all()
for work in qs:
table.add_row(work.id, work.pretty_title)
self.msg(str(table))
class CmdRead(ArxCommand):
"""
Reads a document
Usage:
read <book>=<chapter>
Reads a chapter from a document.
"""
key = "read"
locks = "cmd:all()"
def func(self):
try:
book = self.search(self.lhs)
try:
chapter = book.get_chapter(self.rhs)
except AttributeError:
raise ChapterNotFoundError(f"{book} is not a book.")
if (
chapter.written_work.language
and chapter.written_work.language.lower()
not in self.caller.languages.known_languages
):
raise ChapterNotFoundError(
"That chapter is written in a language you don't understand."
)
arx_more.msg(self.caller, chapter.get_chapter_text())
except (ChapterNotFoundError, self.error_class) as err:
self.msg(err)
| 35.050633 | 85 | 0.594077 |
0ad752ec8b72a023479c45e6834e623231e1a3b4 | 5,498 | py | Python | gssapi/_utils.py | atodorov/python-gssapi | 99ee548cd871451047612d2fd1cd0be88bb56bfd | [
"ISC"
] | null | null | null | gssapi/_utils.py | atodorov/python-gssapi | 99ee548cd871451047612d2fd1cd0be88bb56bfd | [
"ISC"
] | null | null | null | gssapi/_utils.py | atodorov/python-gssapi | 99ee548cd871451047612d2fd1cd0be88bb56bfd | [
"ISC"
] | null | null | null | import sys
import types
import six
import decorator as deco
from typing import Optional
from gssapi.raw.misc import GSSError
def import_gssapi_extension(name):
"""Import a GSSAPI extension module
This method imports a GSSAPI extension module based
on the name of the extension (not including the
'ext_' prefix). If the extension is not available,
the method retuns None.
Args:
name (str): the name of the extension
Returns:
module: Either the extension module or None
"""
try:
path = 'gssapi.raw.ext_{0}'.format(name)
__import__(path)
return sys.modules[path]
except ImportError:
return None
def flag_property(flag):
def setter(self, val):
if val:
self.flags.add(flag)
else:
self.flags.discard(flag)
def getter(self):
return flag in self.flags
return property(getter, setter)
def inquire_property(name: str, doc: Optional[str] = None):
"""Creates a property based on an inquire result
This method creates a property that calls the
:python:`_inquire` method, and return the value of the
requested information.
Args:
name (str): the name of the 'inquire' result information
Returns:
property: the created property
"""
def inquire_property(self):
if not self._started:
msg = (f"Cannot read {name} from a security context whose "
"establishment has not yet been started.")
raise AttributeError(msg)
return getattr(self._inquire(**{name: True}), name)
return property(inquire_property, doc=doc)
# use UTF-8 as the default encoding, like Python 3
_ENCODING = 'UTF-8'
def _get_encoding():
"""Gets the current encoding used for strings.
This value is used to encode and decode string
values like names.
Returns:
str: the current encoding
"""
return _ENCODING
def set_encoding(enc):
"""Sets the current encoding used for strings
This value is used to encode and decode string
values like names.
Args:
enc: the encoding to use
"""
global _ENCODING
_ENCODING = enc
def _encode_dict(d):
"""Encodes any relevant strings in a dict"""
def enc(x):
if isinstance(x, six.text_type):
return x.encode(_ENCODING)
else:
return x
return dict((enc(k), enc(v)) for k, v in six.iteritems(d))
# in case of Python 3, just use exception chaining
@deco.decorator
def catch_and_return_token(func, self, *args, **kwargs):
"""Optionally defer exceptions and return a token instead
When `__DEFER_STEP_ERRORS__` is set on the implementing class
or instance, methods wrapped with this wrapper will
catch and save their :python:`GSSError` exceptions and
instead return the result token attached to the exception.
The exception can be later retrived through :python:`_last_err`
(and :python:`_last_tb` when Python 2 is in use).
"""
try:
return func(self, *args, **kwargs)
except GSSError as e:
if e.token is not None and self.__DEFER_STEP_ERRORS__:
self._last_err = e
# skip the "return func" line above in the traceback
if six.PY2:
self._last_tb = sys.exc_info()[2].tb_next.tb_next
else:
self._last_err.__traceback__ = e.__traceback__.tb_next
return e.token
else:
raise
@deco.decorator
def check_last_err(func, self, *args, **kwargs):
"""Check and raise deferred errors before running the function
This method checks :python:`_last_err` before running the wrapped
function. If present and not None, the exception will be raised
with its original traceback.
"""
if self._last_err is not None:
try:
if six.PY2:
six.reraise(type(self._last_err), self._last_err,
self._last_tb)
else:
# NB(directxman12): not using six.reraise in Python 3 leads
# to cleaner tracebacks, and raise x is valid
# syntax in Python 3 (unlike raise x, y, z)
raise self._last_err
finally:
if six.PY2:
del self._last_tb # in case of cycles, break glass
self._last_err = None
else:
return func(self, *args, **kwargs)
@deco.decorator
def check_last_err(func, self, *args, **kwargs):
if self._last_err is not None:
try:
raise self._last_err
finally:
self._last_err = None
else:
return func(self, *args, **kwargs)
class CheckLastError(type):
"""Check for a deferred error on all methods
This metaclass applies the :python:`check_last_err` decorator
to all methods not prefixed by '_'.
Additionally, it enabled `__DEFER_STEP_ERRORS__` by default.
"""
def __new__(cls, name, parents, attrs):
attrs['__DEFER_STEP_ERRORS__'] = True
for attr_name in attrs:
attr = attrs[attr_name]
# wrap only methods
if not isinstance(attr, types.FunctionType):
continue
if attr_name[0] != '_':
attrs[attr_name] = check_last_err(attr)
return super(CheckLastError, cls).__new__(cls, name, parents, attrs)
| 26.819512 | 79 | 0.618225 |
896e57c47139d74b05a23fda7594338a940cf40a | 1,206 | py | Python | custom_usermodel/tests/test_models.py | uktrade/return-to-office | d4c53c734611413c9f8a7624e52dc35910c5ff57 | [
"MIT"
] | 1 | 2020-10-25T18:16:47.000Z | 2020-10-25T18:16:47.000Z | custom_usermodel/tests/test_models.py | uktrade/return-to-office | d4c53c734611413c9f8a7624e52dc35910c5ff57 | [
"MIT"
] | 1 | 2020-10-27T07:11:26.000Z | 2020-10-27T07:11:26.000Z | custom_usermodel/tests/test_models.py | uktrade/return-to-office | d4c53c734611413c9f8a7624e52dc35910c5ff57 | [
"MIT"
] | null | null | null | from custom_usermodel.models import User
def test_get_contact_email():
user = User(email="[email protected]", contact_email="[email protected]")
assert user.get_contact_email() == "[email protected]"
def test_get_contact_email_falls_back_email():
user = User(email="[email protected]")
assert not user.contact_email
assert user.get_contact_email() == "[email protected]"
def test_get_by_email_contact(db):
user1 = User.objects.create(email="[email protected]", contact_email="[email protected]")
User.objects.create(email="[email protected]", contact_email="[email protected]")
assert User.get_by_email("[email protected]") == user1
def test_get_by_email_non_contact(db):
user1 = User.objects.create(email="[email protected]", contact_email="[email protected]")
User.objects.create(email="[email protected]", contact_email="[email protected]")
assert User.get_by_email("[email protected]") == user1
def test_get_by_email_none(db):
User.objects.create(email="[email protected]", contact_email="[email protected]")
User.objects.create(email="[email protected]", contact_email="[email protected]")
assert User.get_by_email("[email protected]") is None
| 32.594595 | 91 | 0.743781 |
35b6016e18ca191a147eb20e1123bf01d321acef | 10,529 | py | Python | ch09/improved_spark_mllib_model.py | wikibook/agile-data-science | 7769fc2d6c810e9f1a64e45d3684e9260d99d983 | [
"MIT"
] | 1 | 2020-02-13T05:45:13.000Z | 2020-02-13T05:45:13.000Z | ch09/improved_spark_mllib_model.py | wikibook/agile-data-science | 7769fc2d6c810e9f1a64e45d3684e9260d99d983 | [
"MIT"
] | null | null | null | ch09/improved_spark_mllib_model.py | wikibook/agile-data-science | 7769fc2d6c810e9f1a64e45d3684e9260d99d983 | [
"MIT"
] | null | null | null | # !/usr/bin/env python
import sys, os, re
import json
import datetime, iso8601
from tabulate import tabulate
# airflow에서 날짜와 기본 경로를 main()으로 전달
def main(base_path):
APP_NAME = "train_spark_mllib_model.py"
# SparkSession이 없으면 환경 생성
try:
sc and spark
except NameError as e:
import findspark
findspark.init()
import pyspark
import pyspark.sql
sc = pyspark.SparkContext()
spark = pyspark.sql.SparkSession(sc).builder.appName(APP_NAME).getOrCreate()
#
# {
# "ArrDelay":5.0,"CRSArrTime":"2015-12-31T03:20:00.000-08:00","CRSDepTime":"2015-12-31T03:05:00.000-08:00",
# "Carrier":"WN","DayOfMonth":31,"DayOfWeek":4,"DayOfYear":365,"DepDelay":14.0,"Dest":"SAN","Distance":368.0,
# "FlightDate":"2015-12-30T16:00:00.000-08:00","FlightNum":"6109","Origin":"TUS"
# }
#
from pyspark.sql.types import StringType, IntegerType, FloatType, DoubleType, DateType, TimestampType
from pyspark.sql.types import StructType, StructField
from pyspark.sql.functions import udf
schema = StructType([
StructField("ArrDelay", DoubleType(), True), # "ArrDelay":5.0
StructField("CRSArrTime", TimestampType(), True), # "CRSArrTime":"2015-12-31T03:20:00.000-08:00"
StructField("CRSDepTime", TimestampType(), True), # "CRSDepTime":"2015-12-31T03:05:00.000-08:00"
StructField("Carrier", StringType(), True), # "Carrier":"WN"
StructField("DayOfMonth", IntegerType(), True), # "DayOfMonth":31
StructField("DayOfWeek", IntegerType(), True), # "DayOfWeek":4
StructField("DayOfYear", IntegerType(), True), # "DayOfYear":365
StructField("DepDelay", DoubleType(), True), # "DepDelay":14.0
StructField("Dest", StringType(), True), # "Dest":"SAN"
StructField("Distance", DoubleType(), True), # "Distance":368.0
StructField("FlightDate", DateType(), True), # "FlightDate":"2015-12-30T16:00:00.000-08:00"
StructField("FlightNum", StringType(), True), # "FlightNum":"6109"
StructField("Origin", StringType(), True), # "Origin":"TUS"
])
input_path = "{}/data/simple_flight_delay_features.json".format(
base_path
)
features = spark.read.json(input_path, schema=schema)
features.first()
#
# FlightNum을 대체할 Route 변수 추가
#
from pyspark.sql.functions import lit, concat
features_with_route = features.withColumn(
'Route',
concat(
features.Origin,
lit('-'),
features.Dest
)
)
features_with_route.show(6)
#
# 예정된 도착/출발 시간 추가
#
from pyspark.sql.functions import hour
features_with_hour = features_with_route.withColumn(
"CRSDepHourOfDay",
hour(features.CRSDepTime)
)
features_with_hour = features_with_hour.withColumn(
"CRSArrHourOfDay",
hour(features.CRSArrTime)
)
features_with_hour.select("CRSDepTime", "CRSDepHourOfDay", "CRSArrTime", "CRSArrHourOfDay").show()
#
# pysmark.ml.feature.Bucketizer를 사용해서 ArrDelay를 on-time, slightly late, very late (0, 1, 2)으로 구간화
#
from pyspark.ml.feature import Bucketizer
# 구간화 모델 설정
splits = [-float("inf"), -15.0, 0, 30.0, float("inf")]
arrival_bucketizer = Bucketizer(
splits=splits,
inputCol="ArrDelay",
outputCol="ArrDelayBucket"
)
# 모델 저장
arrival_bucketizer_path = "{}/models/arrival_bucketizer_2.0.bin".format(base_path)
arrival_bucketizer.write().overwrite().save(arrival_bucketizer_path)
# 모델 적용
ml_bucketized_features = arrival_bucketizer.transform(features_with_hour)
ml_bucketized_features.select("ArrDelay", "ArrDelayBucket").show()
#
# pyspark.ml.feature의 특징 도구 임포트
#
from pyspark.ml.feature import StringIndexer, VectorAssembler
# 범주 필드를 인덱스로 전환
for column in ["Carrier", "Origin", "Dest", "Route"]:
string_indexer = StringIndexer(
inputCol=column,
outputCol=column + "_index"
)
string_indexer_model = string_indexer.fit(ml_bucketized_features)
ml_bucketized_features = string_indexer_model.transform(ml_bucketized_features)
# 파이프라인 모델 저장
string_indexer_output_path = "{}/models/string_indexer_model_3.0.{}.bin".format(
base_path,
column
)
string_indexer_model.write().overwrite().save(string_indexer_output_path)
# 연속형 수치 필드를 명목형 필드의 인덱스와 결합해서 하나의 특징 벡터로 만듦
numeric_columns = [
"DepDelay", "Distance",
"DayOfMonth", "DayOfWeek",
"DayOfYear", "CRSDepHourOfDay",
"CRSArrHourOfDay"]
index_columns = ["Carrier_index", "Origin_index",
"Dest_index", "Route_index"]
vector_assembler = VectorAssembler(
inputCols=numeric_columns + index_columns,
outputCol="Features_vec"
)
final_vectorized_features = vector_assembler.transform(ml_bucketized_features)
# 수치 벡터 어셈블러를 저장
vector_assembler_path = "{}/models/numeric_vector_assembler_3.0.bin".format(base_path)
vector_assembler.write().overwrite().save(vector_assembler_path)
# 인덱스 열 제거
for column in index_columns:
final_vectorized_features = final_vectorized_features.drop(column)
# 확정된 특징을 검사
final_vectorized_features.show()
#
# 분류 모델을 교차 검증, 훈련, 평가: 4개의 지표에 대해 5회 반복
#
from collections import defaultdict
scores = defaultdict(list)
feature_importances = defaultdict(list)
metric_names = ["accuracy", "weightedPrecision", "weightedRecall", "f1"]
split_count = 3
for i in range(1, split_count + 1):
print("\nRun {} out of {} of test/train splits in cross validation...".format(
i,
split_count,
)
)
# 훈련/테스트 데이터 분할
training_data, test_data = final_vectorized_features.randomSplit([0.8, 0.2])
# 전체 데이터에 대해 랜덤 포레스트 분류 모델을 인스턴스화하고 적합시킴
from pyspark.ml.classification import RandomForestClassifier
rfc = RandomForestClassifier(
featuresCol="Features_vec",
labelCol="ArrDelayBucket",
predictionCol="Prediction",
maxBins=4657,
)
model = rfc.fit(training_data)
# 예전 모델 대신 새 모델을 저장
model_output_path = "{}/models/spark_random_forest_classifier.flight_delays.baseline.bin".format(
base_path
)
model.write().overwrite().save(model_output_path)
# 테스트 데이터로 모델을 평가
predictions = model.transform(test_data)
# 이 테스트/훈련 데이터 분할의 결과를 각 지표별로평가
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
for metric_name in metric_names:
evaluator = MulticlassClassificationEvaluator(
labelCol="ArrDelayBucket",
predictionCol="Prediction",
metricName=metric_name
)
score = evaluator.evaluate(predictions)
scores[metric_name].append(score)
print("{} = {}".format(metric_name, score))
#
# 특징 중요도 수집
#
feature_names = vector_assembler.getInputCols()
feature_importance_list = model.featureImportances
for feature_name, feature_importance in zip(feature_names, feature_importance_list):
feature_importances[feature_name].append(feature_importance)
#
# 지표별 평균과 표준편차 평가 및 표로 출력
#
import numpy as np
score_averages = defaultdict(float)
# 표 데이터 계산
average_stds = [] # ha
for metric_name in metric_names:
metric_scores = scores[metric_name]
average_accuracy = sum(metric_scores) / len(metric_scores)
score_averages[metric_name] = average_accuracy
std_accuracy = np.std(metric_scores)
average_stds.append((metric_name, average_accuracy, std_accuracy))
# 표 출력
print("\nExperiment Log")
print("--------------")
print(tabulate(average_stds, headers=["Metric", "Average", "STD"]))
#
# 점수를 실행 사이에 존재하는 점수 로그에 유지
#
import pickle
# 점수 로그를 적재하거나 빈 로그를 초기화
try:
score_log_filename = "{}/models/score_log.pickle".format(base_path)
score_log = pickle.load(open(score_log_filename, "rb"))
if not isinstance(score_log, list):
score_log = []
except IOError:
score_log = []
# 기존 점수 로그 계산
score_log_entry = {metric_name: score_averages[metric_name] for metric_name in metric_names}
# 각 지표에 대한 점수 변화를 계산하고 디스플레이
try:
last_log = score_log[-1]
except (IndexError, TypeError, AttributeError):
last_log = score_log_entry
experiment_report = []
for metric_name in metric_names:
run_delta = score_log_entry[metric_name] - last_log[metric_name]
experiment_report.append((metric_name, run_delta))
print("\nExperiment Report")
print("-----------------")
print(tabulate(experiment_report, headers=["Metric", "Score"]))
# 기존 평균 점수를 로그에 추가
score_log.append(score_log_entry)
# Persist the log for next run
pickle.dump(score_log, open(score_log_filename, "wb"))
#
# 특징 중요도 변화를 분석하고 보고
#
# 각 특징에 대한 평균 계산
feature_importance_entry = defaultdict(float)
for feature_name, value_list in feature_importances.items():
average_importance = sum(value_list) / len(value_list)
feature_importance_entry[feature_name] = average_importance
# 특징 중요도를 내림차순으로 정렬하고 출력
import operator
sorted_feature_importances = sorted(
feature_importance_entry.items(),
key=operator.itemgetter(1),
reverse=True
)
print("\nFeature Importances")
print("-------------------")
print(tabulate(sorted_feature_importances, headers=['Name', 'Importance']))
#
# 이번 실행 결과인 특징 중요도를 이전 실행 결과와 비교
#
# 특징 중요도 로그를 적재하거나 빈 로그를 초기화
try:
feature_log_filename = "{}/models/feature_log.pickle".format(base_path)
feature_log = pickle.load(open(feature_log_filename, "rb"))
if not isinstance(feature_log, list):
feature_log = []
except IOError:
feature_log = []
# 각 특징에 대한 점수 변화를 계산하고 디스플레이
try:
last_feature_log = feature_log[-1]
except (IndexError, TypeError, AttributeError):
last_feature_log = defaultdict(float)
for feature_name, importance in feature_importance_entry.items():
last_feature_log[feature_name] = importance
# 변동 값 계산
feature_deltas = {}
for feature_name in feature_importances.keys():
run_delta = feature_importance_entry[feature_name] - last_feature_log[feature_name]
feature_deltas[feature_name] = run_delta
# 특징 변동 값을 정렬해 가장 큰 변동이 있는 특징을 먼저 나오게 한다
import operator
sorted_feature_deltas = sorted(
feature_deltas.items(),
key=operator.itemgetter(1),
reverse=True
)
# 정렬된 특징 변동 값 디스플레이
print("\nFeature Importance Delta Report")
print("-------------------------------")
print(tabulate(sorted_feature_deltas, headers=["Feature", "Delta"]))
# 로그에 기존 평균 변동 값을 추가
feature_log.append(feature_importance_entry)
# 다음 실행을 위해 로그 유지
pickle.dump(feature_log, open(feature_log_filename, "wb"))
if __name__ == "__main__":
main(sys.argv[1])
| 30.607558 | 113 | 0.690759 |
37fb5576a209175df8ff268d4c7d651d74f53d74 | 9,977 | py | Python | FSM Equivalence Checker/trace2dot.py | sizaif/DIKEUE | ed13e16e560003ae9561db6a39662f321b01ef60 | [
"Apache-2.0"
] | 3 | 2021-11-16T05:09:23.000Z | 2022-03-19T21:51:27.000Z | FSM Equivalence Checker/trace2dot.py | sizaif/DIKEUE | ed13e16e560003ae9561db6a39662f321b01ef60 | [
"Apache-2.0"
] | null | null | null | FSM Equivalence Checker/trace2dot.py | sizaif/DIKEUE | ed13e16e560003ae9561db6a39662f321b01ef60 | [
"Apache-2.0"
] | 2 | 2021-11-18T00:33:28.000Z | 2021-12-15T05:06:21.000Z | #!/usr/bin/env python
"""
simple script to visualize the trace output of smv / NuSMV
via Graphviz's dot-format. first, the trace is parsed and
then coded as dot-graph with states as nodes and input
(transitions) as arcs between them. even if the counterexample's
loop start- and end-state are the same, they are represented by
two different nodes as there can be differences in the completeness
of the state variables' representation.
this is only a simple hack to get quick and dirty trace graphs ;-)
"""
import os,sys,getopt
from collections import OrderedDict
digraph = ""
try:
import pydot
except:
print ("this module depends on pydot\nplease visit http://dkbza.org/ to obtain these bindings")
sys.exit(2)
# CHANGE HERE:
VIEW_CMD="gv -antialias" #used as: VIEW_CMD [file]
DOT_CMD="dot -Tps -o" #used as: DOT_CMD [outfile] [infile]
TEMPDIR="/tmp" #store dot-file and rendering if viewmode without output-file
# for internal purposes, change only, if you know, what you do
DEBUG=False
PROFILE=False
PSYCO=True
if PSYCO:
try:
import psyco
except (ImportError, ):
pass
else:
psyco.full()
if DEBUG: print ("psyco enabled")
def trace2dotlist(traces):
"""this function takes the trace output of (nu)smv as a string;
then, after cleaning the string of warnings and compiler messages,
decomposes the output into separate single traces which are translated
to dot-graphs by _singletrace2dot. as a traceoutput can combine several
traces, this method returns a list of the dot-graphs"""
# beautify ;-)
lines = [line for line in traces if not (line.startswith("***") or
line.startswith("WARNING") or line == "\n")]
map(lambda x: x.lstrip(" "), lines)
#print ('lines = \n', lines)
# cut list at each "-- specification"
index=0
trace_list=[]
# trace_list = traces for multiple properties.
# each trace consists of sequence of states.
# each state consists of a list of variables and their values
for line in lines:
if (line.startswith("-- no counterexample found with bound")):
index = lines.index(line)
continue
elif line.startswith("-- specification"):
# TODO: need to commemnt out the following line
formulae = line.rstrip("is false\n").lstrip("-- specification")
#print ('formulae = ', formulae)
last = index
index = lines.index(line)
trace_list.append(lines[last: index])
trace_list.append(lines[index: len(lines)])
#sort out postive results. And filter out the empty trace.
trace_list = [trace for trace in trace_list if len(trace)>1 and not str(trace[0]).endswith("true")]
#print ('### trace_list = #### ', trace_list)
# Draw graph for each trace
graph=[]
for trace in trace_list:
graph.append(_singletrace2dot(trace,True))
return graph
def _singletrace2dot(trace,is_beautified=False):
"""translate a single trace into a corresponding dot-graph;
wheras the parsing assumes a correct trace given as
trace ::= state ( input state )*
"""
# if not is_beautified:
# lines = [line for line in trace if not (line.startswith("***") or
# line.startswith("WARNING") or line == "\n"
# or line.startswith("-- specification") or line.startswith("-- as demonstrated")
# or line.startswith("Trace Description: ") or line.startswith("Trace Type: "))]
# map(lambda x: x.lstrip(" "), lines)
# else:
# lines = trace
# strip the headers of each trace.
global digraph
lines = []
#print ('trace = ', trace)
for line in trace:
#print(line)
if( not (line.startswith("***") or
line.startswith("WARNING") or line == "\n"
or line.startswith("-- specification") or line.startswith("-- as demonstrated")
or line.startswith("Trace Description: ") or line.startswith("Trace Type: "))):
lines.append(line.lstrip(" "))
#print (lines)
#slice list at "->"
index=0
states=[]
for item in lines:
#print ('item = ', item)
if item.startswith("->"):
last=index
index=lines.index(item)
states.append(lines[last:index]) # the first state is empty
states.append(lines[index:len(lines)])
#print ('states', states)
lines=False #free space!
graph = pydot.Graph()
loop=False #flag to finally add an additional dotted edge for loop
assert states[1][0].startswith("-> State:") #starting with state!
digraph = 'Digraph G{\n'
digraph += 'rankdir=LR\n'
stateVariablesDict = OrderedDict()
counter = 0
for item in states[1:]: #first item is header
name= item[0].lstrip("-> ").rstrip(" <-\n")
if (name.startswith("State")):
state=name.lstrip("State: ")
node=pydot.Node(state)
props=name+'\\n' #to reach pydotfile: need double '\'
digraph = digraph + 'S' + str(counter) + '[shape=box,label=\"' + name + '\\n'
counter = counter + 1
#print (name)
for i in (item[1:]):
#props+=i.rstrip('\n')
#props+="\\n"
isNewValue = False
s = str(i).rstrip('\n')
variable = s[:s.rfind('=')].strip()
value = s[s.rfind('=')+1:].strip()
if(variable not in stateVariablesDict):
isNewValue = False
else:
(val, newValInd) = stateVariablesDict[variable]
if(str(val) != str(value)):
isNewValue = True
stateVariablesDict[variable] = (value, isNewValue)
#stateVariablesList = [[k, v] for k, v in stateVariablesDict.items()]
for var, (val, newValInd) in stateVariablesDict.items():
if(newValInd == True):
props += '*' + str(var) + ' = ' + str(val) + '\\n'
digraph = digraph + '*' + str(var) + ' = ' + str(val) + '\\n'
else:
props += str(var) + ' = ' + str(val) + '\\n'
digraph = digraph + str(var) + ' = ' + str(val) + '\\n'
node.set_label('"'+props+'"')
digraph = digraph + '\"]\n'
graph.add_node(node)
for var, (val, newValInd) in stateVariablesDict.items():
stateVariablesDict[var] = (val, False)
elif name.startswith("Input"):
assert state #already visited state
trans=name.lstrip("Input: ")
edge=pydot.Edge(state,trans)
hasLoop = [it for it in item[1:] if it.startswith("-- Loop starts here")]
#TODO: check trace-syntax, if this can happen only in the last line of a transition
# then list-compreh. can be avoided
if hasLoop:
loop=state #remember state at which loop starts
item.remove(hasLoop[0])
props=""
for i in (item[1:]):
props+=i.rstrip('\n')
props+="\\n"
edge.set_label(props)
graph.add_edge(edge)
else:
assert False #only states and transitions!
if loop:
edge=pydot.Edge(state,loop)
edge.set_style("dotted,bold")
edge.set_label(" LOOP")
graph.add_edge(edge)
for i in range(1, counter):
digraph = digraph + 'S' + str(i-1) + ' -> ' + 'S' + str(i) + '\n'
digraph = digraph + '\n}\n'
return graph
def usage():
print ("usage:")
print (str(os.path.basename(sys.argv[0]))+" [-h|--help] [-o|--output=<filename>] filename")
print ()
print (" -o : output to file (else to std.output)")
print (" --view : generate preview & open viewer")
def main():
global digraph
try:
opts, args = getopt.getopt(sys.argv[1:], "hvo:", ["view","help","output="])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
outputfilename = None
verbose = False
view=False
tempdir=None
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
if o in ("-o", "--output"):
outputfilename = a
if o == "--view":
view=True
if args.__len__():
filename=args[0]
trace = open(filename,'r').readlines()
#trace.close()
else:
trace=sys.stdin.readlines()
graph= trace2dotlist(trace)
if outputfilename:
outputfile=open(outputfilename,'w')
elif view:
import tempfile
tempdir=tempfile.mkdtemp(dir=TEMPDIR)
outputfilename=os.path.join(tempdir,"trace.dot")
outputfile=open(outputfilename,'w')
else:
outputfile=sys.stdout
for g in graph:
outputfile.write(g.to_string())
outputfile.close()
# Draw Digraph:
#print (digraph)
outputfilename = str(outputfilename) + '_digraph.dot'
outputfile = open(outputfilename, 'w')
outputfile.write(digraph)
outputfile.close()
if view:
if not tempdir: #for view & output
import tempfile
tempdir=tempfile.mkdtemp(dir=TEMPDIR)
visualgraphfile=os.path.join(tempdir,"trace.ps")
os.system("%s %s %s"%(DOT_CMD,visualgraphfile,outputfilename))
os.system("%s %s"%(VIEW_CMD,visualgraphfile))
#
if __name__=="__main__":
if DEBUG:
# for post-mortem debugging
import pydb,sys
sys.excepthook = pydb.exception_hook
elif PROFILE:
if PSYCO:
raise (Exception, "cannot profile whilst using psyco!!!")
import hotshot
prof = hotshot.Profile("_hotshot",lineevents=1)
prof.runcall(main)
prof.close()
else:
main()
| 31.773885 | 103 | 0.573519 |
d50f7f0e1065f7b8f4ec000bc8954587328fe0ad | 4,141 | py | Python | pandasio/utils/tests/test_binary.py | BrianKopp/pandas-io | cbab1146289a6fdbbd2ff7e3aaa55ff64e228fb7 | [
"MIT"
] | 1 | 2019-05-11T22:09:35.000Z | 2019-05-11T22:09:35.000Z | pandasio/utils/tests/test_binary.py | BrianKopp/pandas-io | cbab1146289a6fdbbd2ff7e3aaa55ff64e228fb7 | [
"MIT"
] | null | null | null | pandasio/utils/tests/test_binary.py | BrianKopp/pandas-io | cbab1146289a6fdbbd2ff7e3aaa55ff64e228fb7 | [
"MIT"
] | null | null | null | from pandasio.utils.binary import determine_required_bytes_unsigned_integer, read_unsigned_int, \
determine_required_bytes_signed_integer
from pandasio.utils.exceptions import (
IntegerLargerThan64BitsException,
IntegerNotUnsignedException,
NotIntegerException
)
import unittest
class TestBinaryUtils(unittest.TestCase):
def test_determine_byte_requirements(self):
with self.assertRaises(IntegerNotUnsignedException):
determine_required_bytes_unsigned_integer(-1)
with self.assertRaises(NotIntegerException):
determine_required_bytes_unsigned_integer(None)
with self.assertRaises(NotIntegerException):
determine_required_bytes_unsigned_integer([])
self.assertEqual(1, determine_required_bytes_unsigned_integer(0))
self.assertEqual(1, determine_required_bytes_unsigned_integer(1))
self.assertEqual(1, determine_required_bytes_unsigned_integer(2))
self.assertEqual(1, determine_required_bytes_unsigned_integer(3))
self.assertEqual(1, determine_required_bytes_unsigned_integer(255))
self.assertEqual(2, determine_required_bytes_unsigned_integer(256))
self.assertEqual(2, determine_required_bytes_unsigned_integer(65535))
self.assertEqual(4, determine_required_bytes_unsigned_integer(65536))
self.assertEqual(4, determine_required_bytes_unsigned_integer(4294967295))
self.assertEqual(8, determine_required_bytes_unsigned_integer(4294967296))
self.assertEqual(8, determine_required_bytes_unsigned_integer(18446744073709551615))
with self.assertRaises(IntegerLargerThan64BitsException):
determine_required_bytes_unsigned_integer(18446744073709551616)
return
def test_signed_int_bytes(self):
with self.assertRaises(NotIntegerException):
determine_required_bytes_signed_integer(None)
with self.assertRaises(NotIntegerException):
determine_required_bytes_signed_integer([])
self.assertEqual(1, determine_required_bytes_signed_integer(0))
self.assertEqual(1, determine_required_bytes_signed_integer(1))
self.assertEqual(1, determine_required_bytes_signed_integer(2))
self.assertEqual(1, determine_required_bytes_signed_integer(3))
self.assertEqual(1, determine_required_bytes_signed_integer(-1))
self.assertEqual(1, determine_required_bytes_signed_integer(-2))
self.assertEqual(1, determine_required_bytes_signed_integer(-3))
self.assertEqual(1, determine_required_bytes_signed_integer(127))
self.assertEqual(1, determine_required_bytes_signed_integer(-128))
self.assertEqual(2, determine_required_bytes_signed_integer(128))
self.assertEqual(2, determine_required_bytes_signed_integer(-129))
self.assertEqual(2, determine_required_bytes_signed_integer(32767))
self.assertEqual(2, determine_required_bytes_signed_integer(-32768))
self.assertEqual(4, determine_required_bytes_signed_integer(32768))
self.assertEqual(4, determine_required_bytes_signed_integer(-32769))
self.assertEqual(4, determine_required_bytes_signed_integer(2147483647))
self.assertEqual(4, determine_required_bytes_signed_integer(-2147483648))
self.assertEqual(8, determine_required_bytes_signed_integer(2147483648))
self.assertEqual(8, determine_required_bytes_signed_integer(-2147483649))
with self.assertRaises(IntegerLargerThan64BitsException):
determine_required_bytes_signed_integer(9223372036854775808)
return
def test_read_unsigned_int(self):
self.assertEqual(0, read_unsigned_int(b'\x00'))
self.assertEqual(0, read_unsigned_int(b'\x00\x00'))
self.assertEqual(0, read_unsigned_int(b'\x00\x00\x00'))
self.assertEqual(0, read_unsigned_int(b'\x00\x00\x00\x00'))
self.assertEqual(1, read_unsigned_int(b'\x01\x00'))
self.assertEqual(255, read_unsigned_int(bytes([255])))
self.assertEqual(256, read_unsigned_int(b'\x00\x01'))
return
if __name__ == '__main__':
unittest.main()
| 51.123457 | 97 | 0.765274 |
f413e5221b44192deeca79bf84f7a89b86acbb30 | 5,336 | py | Python | syntropy_sdk/models/change_path_object_data_costs.py | SyntropyNet/syntropy-python-sdk | 27b7756b136f83886fd2a6e342fa4d4073779ff7 | [
"MIT"
] | 1 | 2020-12-17T17:30:12.000Z | 2020-12-17T17:30:12.000Z | syntropy_sdk/models/change_path_object_data_costs.py | SyntropyNet/syntropy-python-sdk | 27b7756b136f83886fd2a6e342fa4d4073779ff7 | [
"MIT"
] | null | null | null | syntropy_sdk/models/change_path_object_data_costs.py | SyntropyNet/syntropy-python-sdk | 27b7756b136f83886fd2a6e342fa4d4073779ff7 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
syntropy-controller
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ChangePathObjectDataCosts(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
"price": "float",
"latency": "float",
"jitter": "float",
"bandwidth": "float",
}
attribute_map = {
"price": "price",
"latency": "latency",
"jitter": "jitter",
"bandwidth": "bandwidth",
}
def __init__(
self, price=None, latency=None, jitter=None, bandwidth=None
): # noqa: E501
"""ChangePathObjectDataCosts - a model defined in Swagger""" # noqa: E501
self._price = None
self._latency = None
self._jitter = None
self._bandwidth = None
self.discriminator = None
if price is not None:
self.price = price
if latency is not None:
self.latency = latency
if jitter is not None:
self.jitter = jitter
if bandwidth is not None:
self.bandwidth = bandwidth
@property
def price(self):
"""Gets the price of this ChangePathObjectDataCosts. # noqa: E501
:return: The price of this ChangePathObjectDataCosts. # noqa: E501
:rtype: float
"""
return self._price
@price.setter
def price(self, price):
"""Sets the price of this ChangePathObjectDataCosts.
:param price: The price of this ChangePathObjectDataCosts. # noqa: E501
:type: float
"""
self._price = price
@property
def latency(self):
"""Gets the latency of this ChangePathObjectDataCosts. # noqa: E501
:return: The latency of this ChangePathObjectDataCosts. # noqa: E501
:rtype: float
"""
return self._latency
@latency.setter
def latency(self, latency):
"""Sets the latency of this ChangePathObjectDataCosts.
:param latency: The latency of this ChangePathObjectDataCosts. # noqa: E501
:type: float
"""
self._latency = latency
@property
def jitter(self):
"""Gets the jitter of this ChangePathObjectDataCosts. # noqa: E501
:return: The jitter of this ChangePathObjectDataCosts. # noqa: E501
:rtype: float
"""
return self._jitter
@jitter.setter
def jitter(self, jitter):
"""Sets the jitter of this ChangePathObjectDataCosts.
:param jitter: The jitter of this ChangePathObjectDataCosts. # noqa: E501
:type: float
"""
self._jitter = jitter
@property
def bandwidth(self):
"""Gets the bandwidth of this ChangePathObjectDataCosts. # noqa: E501
:return: The bandwidth of this ChangePathObjectDataCosts. # noqa: E501
:rtype: float
"""
return self._bandwidth
@bandwidth.setter
def bandwidth(self, bandwidth):
"""Sets the bandwidth of this ChangePathObjectDataCosts.
:param bandwidth: The bandwidth of this ChangePathObjectDataCosts. # noqa: E501
:type: float
"""
self._bandwidth = bandwidth
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value)
)
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict")
else item,
value.items(),
)
)
else:
result[attr] = value
if issubclass(ChangePathObjectDataCosts, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ChangePathObjectDataCosts):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.364103 | 119 | 0.571402 |
01347536c2c2020872bc115a011eab009dc8c69d | 751 | py | Python | test/unit/rules/parameters/test_configuration.py | tomislacker/cfn-python-lint | f209ddfef9bcc1a005adfebcfcc16220b18deddb | [
"MIT-0"
] | 1,134 | 2019-03-02T14:58:34.000Z | 2021-05-15T00:57:16.000Z | test/unit/rules/parameters/test_configuration.py | tomislacker/cfn-python-lint | f209ddfef9bcc1a005adfebcfcc16220b18deddb | [
"MIT-0"
] | 1,122 | 2019-03-03T04:27:15.000Z | 2021-05-14T20:51:16.000Z | test/unit/rules/parameters/test_configuration.py | tomislacker/cfn-python-lint | f209ddfef9bcc1a005adfebcfcc16220b18deddb | [
"MIT-0"
] | 297 | 2019-03-11T09:56:57.000Z | 2021-05-14T16:41:19.000Z | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from test.unit.rules import BaseRuleTestCase
from cfnlint.rules.parameters.Configuration import Configuration # pylint: disable=E0401
class TestParameterConfiguration(BaseRuleTestCase):
"""Test template parameter configurations"""
def setUp(self):
"""Setup"""
super(TestParameterConfiguration, self).setUp()
self.collection.register(Configuration())
def test_file_positive(self):
"""Test Positive"""
self.helper_file_positive()
def test_file_negative(self):
"""Test failure"""
self.helper_file_negative('test/fixtures/templates/bad/parameters/configuration.yaml', 7)
| 31.291667 | 97 | 0.719041 |
3991d30c11043707a184da175a9ffae8a84746f1 | 829 | py | Python | kedro-intro/src/kedro_intro/hooks.py | avallarino-ar/kedro_lab | e91bf61c21978f18222e4c7affef39e4707890b8 | [
"MIT"
] | 11 | 2022-02-06T18:01:29.000Z | 2022-02-23T15:51:48.000Z | kedro-intro/src/kedro_intro/hooks.py | avallarino-ar/kedro_lab | e91bf61c21978f18222e4c7affef39e4707890b8 | [
"MIT"
] | 6 | 2022-03-12T02:21:28.000Z | 2022-03-20T11:44:29.000Z | kedro-intro/src/kedro_intro/hooks.py | avallarino-ar/kedro_lab | e91bf61c21978f18222e4c7affef39e4707890b8 | [
"MIT"
] | 6 | 2021-09-24T16:12:02.000Z | 2021-12-12T18:31:14.000Z | """Project hooks."""
from typing import Any, Dict, Iterable, Optional
from kedro.config import ConfigLoader
from kedro.framework.hooks import hook_impl
from kedro.io import DataCatalog
from kedro.versioning import Journal
class ProjectHooks:
@hook_impl
def register_config_loader(
self, conf_paths: Iterable[str], env: str, extra_params: Dict[str, Any],
) -> ConfigLoader:
return ConfigLoader(conf_paths)
@hook_impl
def register_catalog(
self,
catalog: Optional[Dict[str, Dict[str, Any]]],
credentials: Dict[str, Dict[str, Any]],
load_versions: Dict[str, str],
save_version: str,
journal: Journal,
) -> DataCatalog:
return DataCatalog.from_config(
catalog, credentials, load_versions, save_version, journal
)
| 28.586207 | 80 | 0.674306 |
cf7b26098ec943c1cec36af08a970bcbd978abcc | 5,758 | py | Python | sdk/python/pulumi_azure/lb/backend_address_pool.py | kenny-wealth/pulumi-azure | e57e3a81f95bf622e7429c53f0bff93e33372aa1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/lb/backend_address_pool.py | kenny-wealth/pulumi-azure | e57e3a81f95bf622e7429c53f0bff93e33372aa1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/lb/backend_address_pool.py | kenny-wealth/pulumi-azure | e57e3a81f95bf622e7429c53f0bff93e33372aa1 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class BackendAddressPool(pulumi.CustomResource):
backend_ip_configurations: pulumi.Output[list]
"""
The Backend IP Configurations associated with this Backend Address Pool.
"""
load_balancing_rules: pulumi.Output[list]
"""
The Load Balancing Rules associated with this Backend Address Pool.
"""
loadbalancer_id: pulumi.Output[str]
"""
The ID of the Load Balancer in which to create the Backend Address Pool.
"""
location: pulumi.Output[str]
name: pulumi.Output[str]
"""
Specifies the name of the Backend Address Pool.
"""
resource_group_name: pulumi.Output[str]
"""
The name of the resource group in which to create the resource.
"""
def __init__(__self__, resource_name, opts=None, loadbalancer_id=None, location=None, name=None, resource_group_name=None, __props__=None, __name__=None, __opts__=None):
"""
Manages a Load Balancer Backend Address Pool.
> **NOTE:** When using this resource, the Load Balancer needs to have a FrontEnd IP Configuration Attached
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] loadbalancer_id: The ID of the Load Balancer in which to create the Backend Address Pool.
:param pulumi.Input[str] name: Specifies the name of the Backend Address Pool.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the resource.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/lb_backend_address_pool.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if loadbalancer_id is None:
raise TypeError("Missing required property 'loadbalancer_id'")
__props__['loadbalancer_id'] = loadbalancer_id
__props__['location'] = location
__props__['name'] = name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['backend_ip_configurations'] = None
__props__['load_balancing_rules'] = None
super(BackendAddressPool, __self__).__init__(
'azure:lb/backendAddressPool:BackendAddressPool',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, backend_ip_configurations=None, load_balancing_rules=None, loadbalancer_id=None, location=None, name=None, resource_group_name=None):
"""
Get an existing BackendAddressPool resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] backend_ip_configurations: The Backend IP Configurations associated with this Backend Address Pool.
:param pulumi.Input[list] load_balancing_rules: The Load Balancing Rules associated with this Backend Address Pool.
:param pulumi.Input[str] loadbalancer_id: The ID of the Load Balancer in which to create the Backend Address Pool.
:param pulumi.Input[str] name: Specifies the name of the Backend Address Pool.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the resource.
> This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/r/lb_backend_address_pool.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["backend_ip_configurations"] = backend_ip_configurations
__props__["load_balancing_rules"] = load_balancing_rules
__props__["loadbalancer_id"] = loadbalancer_id
__props__["location"] = location
__props__["name"] = name
__props__["resource_group_name"] = resource_group_name
return BackendAddressPool(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 50.508772 | 175 | 0.697464 |
6865b9982e6a2ff3ea84fd9d651a105791078365 | 3,221 | py | Python | app/recipe/tests/test_tags_api.py | mzwamshandu/recipe-app-api | cc7da9c7e72da318ca3f36bd4a3fcf173ef1a929 | [
"MIT"
] | null | null | null | app/recipe/tests/test_tags_api.py | mzwamshandu/recipe-app-api | cc7da9c7e72da318ca3f36bd4a3fcf173ef1a929 | [
"MIT"
] | null | null | null | app/recipe/tests/test_tags_api.py | mzwamshandu/recipe-app-api | cc7da9c7e72da318ca3f36bd4a3fcf173ef1a929 | [
"MIT"
] | null | null | null | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Tag, Recipe
from recipe.serializers import TagSerializer
TAGS_URL = reverse('recipe:tag-list')
class PublicTagsApiTests(TestCase):
# Test the publicly available tags API
def setUp(self):
self.client = APIClient()
'''def test_login_required(self):
# test the login is required for retrieving tags
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)'''
class PrivateTagsApiTests(TestCase):
# Test the authorized user tags API
def setUp(self):
self.user = get_user_model().objects.create_user(
'[email protected]',
'password123'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_tags(self):
# Test retrieving tags
Tag.objects.create(user=self.user, name='Vegan')
Tag.objects.create(user=self.user, name='Dessert')
res = self.client.get(TAGS_URL)
tags = Tag.objects.all().order_by('-name')
serializer = TagSerializer(tags, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_tags_limited_to_user(self):
# Testing that tags returned are for the authnticated user
user2 = get_user_model().objects.create_user(
'[email protected]',
'password123'
)
Tag.objects.create(user=user2, name='Fruity')
tag = Tag.objects.create(user=self.user, name='Comfort Food')
res = self.client.get(TAGS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], tag.name)
def test_create_tag_successful(self):
# Test create a new tag
payload = {'name': 'Test tag'}
self.client.post(TAGS_URL, payload)
exists = Tag.objects.filter(
user=self.user,
name=payload['name']
).exists()
self.assertTrue(exists)
def test_create_tag_invalid(self):
# Test creating a new tag with invalid payload
payload = {'name': ''}
res = self.client.post(TAGS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_tags_assigned_to_recipe(self):
# Test filter tags by those assigned to recipes
tag1 = Tag.objects.create(user=self.user, name='Breakfast')
tag2 = Tag.objects.create(user=self.user, name='Lunch')
recipe = Recipe.objects.create(
title='Coriander eggs on toast',
time_minutes=10,
price=45.00,
user=self.user
)
recipe.tags.add(tag1)
res = self.client.get(TAGS_URL, {'assigned_only': 1})
serializer1 = TagSerializer(tag1)
serializer2 = TagSerializer(tag2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
| 32.535354 | 74 | 0.652903 |
8921100f6af7a1a68238269131cbcbe0121a025b | 29,247 | py | Python | ChemicalReactorNetwork/combustion_chamber_design.py | giovaniceotto/Noelle | 436a91a6e2a2baf5ede419e9633cdf3479213786 | [
"MIT"
] | 6 | 2020-08-31T12:29:03.000Z | 2022-01-10T01:35:24.000Z | ChemicalReactorNetwork/combustion_chamber_design.py | giovaniceotto/LMS | 436a91a6e2a2baf5ede419e9633cdf3479213786 | [
"MIT"
] | 2 | 2020-07-27T18:12:57.000Z | 2020-08-06T02:20:14.000Z | ChemicalReactorNetwork/combustion_chamber_design.py | giovaniceotto/LMS | 436a91a6e2a2baf5ede419e9633cdf3479213786 | [
"MIT"
] | 2 | 2021-06-11T01:44:46.000Z | 2021-06-14T05:01:41.000Z | import numpy as np
# np.set_printoptions(precision=2)
import scipy.integrate
import scipy.signal
from rocketpy import Function
import CoolProp.CoolProp as CoolProp
from CoolProp.CoolProp import PropsSI
import cantera as ct
print('Runnning Cantera version: ' + ct.__version__)
# Matplotlib
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['figure.figsize'] = [10.0, 6.0]
mpl.rcParams['figure.dpi'] = 120
mpl.rcParams['savefig.dpi'] = 120
font = {'weight' : 'bold',
'size' : 17}
mpl.rc('font', **font)
plt.style.use(['science', 'grid'])
def create_solution_mechanism():
# Defining Reaction Mechanism
# Mechanism II - Marinov + Mevel
marinov_species = ct.Species.listFromFile('marinov_ethanol_mechanism.cti')
marinov_reactions = ct.Reaction.listFromFile('marinov_ethanol_mechanism.cti')
mevel_species = ct.Species.listFromFile('mevel_ethanol_mechanism.cti')
mevel_reactions = ct.Reaction.listFromFile('mevel_ethanol_mechanism.cti')
new_species = []
new_reactions = []
# Filter species
for specie in mevel_species:
# Include all nitrogen compounds except for N2
if 'N' in specie.composition and specie.composition != {'N':2}: new_species.append(specie)
new_species_names = {specie.name for specie in new_species}
# print('N based species: {0}'.format(', '.join(name for name in new_species_names)))
marinov_mevel_species = marinov_species + new_species
marinov_mevel_species_names = {specie.name.upper() for specie in marinov_mevel_species}
# Filter reactions, keeping only those that only involve the selected species
# print('\nReactions:')
for R in mevel_reactions:
if any(reactant in new_species_names for reactant in R.reactants) or any(product in new_species_names for product in R.products):
# for reactant in R.reactants:
# if reactant not in marinov_mevel_species_names:
# print('Missing reactant:', reactant, 'when analyzing reaction', R.equation)
# for product in R.products:
# if product not in marinov_mevel_species_names:
# print('Missing product:', product, 'when analyzing reaction', R.equation)
if all(reactant in marinov_mevel_species_names for reactant in R.reactants):
if all(product in marinov_mevel_species_names for product in R.products):
new_reactions.append(R)
# print('Accepted reaction:', R.equation)
# print('\n')
marinov_mevel_species = marinov_species + new_species
marinov_mevel_reactions = marinov_reactions + new_reactions
marinov_mevel_gas = ct.Solution(
thermo='IdealGas',
kinetics='GasKinetics',
species=marinov_mevel_species,
reactions=marinov_mevel_reactions
)
marinov_mevel_gas = ct.Solution('sandiego2016_plus_N_CK.cti')
print('Number of species:', marinov_mevel_gas.n_species)
print('Number of reactions:', marinov_mevel_gas.n_reactions)
return marinov_mevel_gas
# Droplet Fed Variable Area Plug Flow Reactor Model
class NoelleReactor(object):
def __init__(self, gas, area, liquid, N_dot, q_dot_prime=0):
# Parameters of the ODE system and auxiliary data are stored in the
# ReactorOde object.
self.gas = gas
self.Tmin = 1
self.A = area
self.dA_dx = Function(lambda x: area.differentiate(x))
self.N_dot = N_dot
# Liquid information - always at boiling temperature
self.droplets_exist = True
self.liquid = liquid
self.liquid.update(CoolProp.PQ_INPUTS, self.gas.P, 0)
## Density
self.rho_l = self.liquid.rhomass()
## Boiling temperature
self.T_l = self.liquid.T()
## Heat of vaporization
self.liquid.update(CoolProp.PQ_INPUTS, self.gas.P, 1)
h_v = self.liquid.hmass()
self.liquid.update(CoolProp.PQ_INPUTS, self.gas.P, 0)
h_l = self.liquid.hmass()
self.h_fg = h_v - h_l
# Heat Loss due to Regenerative Cooling
self.q_dot_prime = 0.0
self.last_x = -100
def state_derivate_with_droplets(self, x, u):
""" ODE function u'= f(x, u).
Parameters
----------
x : float
Axial position in meters.
u : np.array
State variable. Variables are:
u[0] = D^2 (droplet diameter (SMD) squared)
u[1] = ml (liquid phase flow rate)
u[2] = mg (gas phase flow rate)
u[3] = v_d (droplet velocity)
u[4] = rho_g (gas phase density)
u[5] = T (gas phase temperature)
u[6:6+N] = Y_i (mass fraction of the i-th species, out of N species)
"""
# Get variables from state variable
self.droplets_exist = False
D2, ml, mg, v_d, rho_g, T = u[:6]
if D2 <= 0 or rho_g <= 0 or T <=0:
return 0*u
D = (D2**0.5)*1e-6
Y = u[6:]
A = self.A(x)
dA_dx = self.dA_dx(x)
rho_g = max(0.5, rho_g)
v_g = mg/(rho_g*A)
rho_l = self.rho_l
# Update gas state
self.gas.set_unnormalized_mass_fractions(Y)
self.gas.TP = T, rho_g*ct.gas_constant*T/(self.gas.mean_molecular_weight)
if self.gas.P > 1e7:
self.gas.TP = T, 2e6
# Get cp, MW, omega, etc
R_u = ct.gas_constant
cp = self.gas.cp
omega_i = self.gas.net_production_rates
MW_i = self.gas.molecular_weights
MW_mix = self.gas.mean_molecular_weight
h_i = self.gas.partial_molar_enthalpies
mu_g = self.gas.viscosity
# Compute dD^2/dx
T_bar = 0.5*T + 0.5*self.T_l
try:
# Update states
self.gas.TP = T_bar, self.gas.P
self.liquid.update(CoolProp.PT_INPUTS, self.gas.P, T)
# Calculate K
k_v = self.liquid.conductivity()
k_inf = self.gas.thermal_conductivity # PropsSI('conductivity','T', T, 'P', P, 'Air')
kg = 0.4*k_v + 0.6*k_inf
c_pv = self.liquid.cpmass()
K = 8*kg/(rho_l*c_pv) * np.log(1 + c_pv*(T - self.T_l)/self.h_fg)
# Roll back states
self.gas.TP = T, self.gas.P
self.liquid.update(CoolProp.PQ_INPUTS, self.gas.P, 0)
except ValueError as E:
# print(E)
# print('ERROR! State Variable:', u)
# print('Using K = 7.25e-7 to continue.')
K = 7.25e-07
dD2_dx = -K/v_d * 1e12
# Compute dml/dx and dmg/dx
dml_dx = np.pi/4 * self.N_dot * rho_l * D * dD2_dx * 1e-12
dmg_dx = -dml_dx
# Compute dv_d/dx
v_rel = v_d - v_g
Re = rho_g*abs(v_d - v_g)*D/mu_g
Cd = 24/Re + 6/(1+np.sqrt(Re)) + 0.4
dv_d_dx = -(3*Cd*rho_g*v_rel**2)/(4*rho_l*v_d*D)*v_rel/abs(v_rel)
# Check Mach Number
# M2 = v_g**2 / (self.gas.cp/self.gas.cv * R_u/MW_mix * T)
# s = 0.0001
# dA_dx *= (1 - np.exp(-((M2-1)/s)**2))
# Compute drho_g/dx
# TODO: verify if droplets affect this equation
drho_g_dx = (
(1 - R_u/(cp*MW_mix)) * (rho_g**2) * (v_g**2) * (dA_dx/A) +
((rho_g*R_u)/(v_g*cp*MW_mix)) * sum(omega_i*(h_i - cp*T*MW_mix))
)/(
self.gas.P*(1+ (v_g**2)/(cp*T)) - (rho_g*v_g**2)
)
# Compute dT/dx
# TODO: remove heat due to cooling and recirculation
self.liquid.update(CoolProp.PT_INPUTS, self.gas.P, T)
h_g = self.liquid.hmass()
self.liquid.update(CoolProp.PQ_INPUTS, self.gas.P, 0)
h_l = self.liquid.hmass()
dT_dx = (
((v_g**2)/(rho_g*cp)) * drho_g_dx +
((v_g**2)/cp) * (dA_dx/A) -
(1/(v_g*rho_g*cp))*sum(h_i*omega_i) +
(h_g - h_l)*dml_dx/(mg*cp)
)
# drho_g_dx2 = rho_g * ( M2 / (1 - M2) * (1/A * dA_dx) )
# dT_dx2 = ( 1 + M2 / (1 - M2)) * ( (1/A * dA_dx) * M2 * T * (self.gas.cp/self.gas.cv - 1) )
# Compute dY_dx
dY_dx = omega_i * MW_i / (rho_g*v_g)
# Add droplet vaporization term to ethanol mass fraction
dY_dx[37] += dmg_dx/mg
return np.hstack(([dD2_dx, dml_dx, dmg_dx, dv_d_dx, drho_g_dx, dT_dx], dY_dx))
def state_derivate_vaporization_controlled_combustion(self, x, u):
""" ODE function u'= f(x, u).
Parameters
----------
x : float
Axial position in meters.
u : np.array
State variable. Variables are:
u[0] = D^2 (droplet diameter (SMD) squared)
u[1] = ml (liquid phase flow rate)
u[2] = mg (gas phase flow rate)
u[3] = v_d (droplet velocity)
u[4] = rho_g (gas phase density)
u[5] = T (gas phase temperature)
u[6] = Phi (gas equivalence ratio)
u[7:7+N] = Y_i (mass fraction of the i-th species, out of N species)
"""
# Get variables from state variable
self.droplets_exist = True
D2, ml, mg, v_d, rho_g, T, phi = u[:7]
if D2 <= 0 or rho_g <= 0 or T <=0:
return 0*u
D = (D2**0.5)*1e-6
Y = u[7:]
A = self.A(x)
dA_dx = self.dA_dx(x)
rho_l = self.rho_l
# Update gas state
self.gas.set_equivalence_ratio(phi, fuel='C2H5OH', oxidizer='N2O')
self.gas.TP = T, P # rho_g*ct.gas_constant*T/(self.gas.mean_molecular_weight)
self.gas.equilibrate('TP')
rho_g = self.gas.density
v_g = mg/(rho_g*A)
# Get cp, MW, omega, etc
R_u = ct.gas_constant
cp = self.gas.cp
omega_i = self.gas.net_production_rates
MW_i = self.gas.molecular_weights
MW_mix = self.gas.mean_molecular_weight
h_i = self.gas.partial_molar_enthalpies
mu_g = self.gas.viscosity
# Compute dD^2/dx
T_bar = 0.5*T + 0.5*self.T_l
try:
# Update states
self.gas.TP = T_bar, self.gas.P
self.liquid.update(CoolProp.PT_INPUTS, self.gas.P, T)
# Calculate K
k_v = self.liquid.conductivity()
k_inf = self.gas.thermal_conductivity # PropsSI('conductivity','T', T, 'P', P, 'Air')
kg = 0.4*k_v + 0.6*k_inf
c_pv = self.liquid.cpmass()
K = 8*kg/(rho_l*c_pv) * np.log(1 + c_pv*(T - self.T_l)/self.h_fg)
# Roll back states
self.gas.TP = T, self.gas.P
self.liquid.update(CoolProp.PQ_INPUTS, self.gas.P, 0)
except ValueError as E:
# print(E)
# print('ERROR! State Variable:', u)
# print('Using K = 7.25e-7 to continue.')
K = 7.25e-07
dD2_dx = -K/v_d * 1e12
# Compute dml/dx and dmg/dx
dml_dx = np.pi/4 * self.N_dot * rho_l * D * dD2_dx * 1e-12
dmg_dx = -dml_dx
# Compute dv_d/dx
v_rel = v_d - v_g
Re = rho_g*abs(v_d - v_g)*D/mu_g
Cd = 24/Re + 6/(1+np.sqrt(Re)) + 0.4
dv_d_dx = -(3*Cd*rho_g*v_rel**2)/(4*rho_l*v_d*D)*v_rel/abs(v_rel)
# Compute dphi_dx
FOst = 0.18445603193
dphi_dx = 1/(FOst) * dmg_dx /mg_0
# Compute dT_dx
# h_g = self.enthalpy(T, P, phi)
self.liquid.update(CoolProp.PT_INPUTS, self.gas.P, T)
h_g = self.liquid.hmass()
self.liquid.update(CoolProp.PQ_INPUTS, self.gas.P, 0)
h_l = self.liquid.hmass()
dh_dphi = self.enthalpy_partial_phi(T, P, phi)
dT_dx = ((h_g - h_l)*dml_dx/mg - dh_dphi*dphi_dx + self.q_dot_prime/mg)/cp
return np.hstack(([dD2_dx, dml_dx, dmg_dx, dv_d_dx, 0, dT_dx, dphi_dx], 0*Y))
def state_derivate_reacting_nozzle(self, x, u):
""" ODE function u'= f(x, u).
Parameters
----------
x : float
Axial position in meters.
u : np.array
State variable. Variables are:
u[0] = D^2 (droplet diameter (SMD) squared)
u[1] = ml (liquid phase flow rate)
u[2] = mg (gas phase flow rate)
u[3] = v_d (droplet velocity)
u[4] = rho_g (gas phase density)
u[5] = T (gas phase temperature)
u[6] = 0
u[7:7+N] = Y_i (mass fraction of the i-th species, out of N species)
"""
# Get variables from state variable
self.droplets_exist = False
D2, ml, mg, v_d, rho_g, T, phi = u[:7]
if D2 <= 0 or rho_g <= 0 or T <=0:
return 0*u
Y = u[7:]
A = self.A(x)
dA_dx = self.dA_dx(x)
v_g = mg/(rho_g*A)
rho_l = self.rho_l
# Update gas state
self.gas.set_unnormalized_mass_fractions(Y)
self.gas.TP = T, rho_g*ct.gas_constant*T/(self.gas.mean_molecular_weight)
# Get cp, MW, omega, etc
R_u = ct.gas_constant
cp = self.gas.cp
omega_i = self.gas.net_production_rates
MW_i = self.gas.molecular_weights
MW_mix = self.gas.mean_molecular_weight
h_i = self.gas.partial_molar_enthalpies
mu_g = self.gas.viscosity
dD2_dx = 0
dml_dx = 0
dmg_dx = 0
dv_d_dx = 0
# Check Mach Number
# M2 = v_g**2 / (self.gas.cp/self.gas.cv * R_u/MW_mix * T)
# s = 0.0001
# dA_dx *= (1 - np.exp(-((M2-1)/s)**2))
# Compute drho_g/dx
drho_g_dx = (
(1 - R_u/(cp*MW_mix)) * (rho_g**2) * (v_g**2) * (dA_dx/A) +
((rho_g*R_u)/(v_g*cp*MW_mix)) * sum(omega_i*(h_i - cp*T*MW_mix))
)/(
self.gas.P*(1+ (v_g**2)/(cp*T)) - (rho_g*v_g**2)
)
# Compute dT/dx
dT_dx = (
((v_g**2)/(rho_g*cp)) * drho_g_dx +
((v_g**2)/cp) * (dA_dx/A) -
(1/(v_g*rho_g*cp))*sum(h_i*omega_i) +
self.q_dot_prime/cp/mg
)
# drho_g_dx2 = rho_g * ( M2 / (1 - M2) * (1/A * dA_dx) )
# dT_dx2 = ( 1 + M2 / (1 - M2)) * ( (1/A * dA_dx) * M2 * T * (self.gas.cp/self.gas.cv - 1) )
# Compute dY_dx
dY_dx = omega_i * MW_i / (rho_g*v_g)
return np.hstack(([dD2_dx, dml_dx, dmg_dx, dv_d_dx, drho_g_dx, dT_dx, 0], dY_dx))
def state_derivate_equilibrium_nozzle(self, x, u):
""" ODE function u'= f(x, u).
Parameters
----------
x : float
Axial position in meters.
u : np.array
State variable. Variables are:
u[0] = D^2 (droplet diameter (SMD) squared)
u[1] = ml (liquid phase flow rate)
u[2] = mg (gas phase flow rate)
u[3] = v_d (droplet velocity)
u[4] = rho_g (gas phase density)
u[5] = T (gas phase temperature)
u[6:6+N] = Y_i (mass fraction of the i-th species, out of N species)
"""
# Get variables from state variable
self.droplets_exist = False
D2, ml, mg, v_d, rho_g, T = u[:6]
Y = u[6:]
A = self.A(x)
dA_dx = self.dA_dx(x)
v_g = mg/(rho_g*A)
rho_l = self.rho_l
if rho_g < 0:
print('x:', x, 'r:', rho_g)
rho_g = abs(rho_g)
if T < 0: print(x, T)
T = max(self.Tmin, T)
# Update gas state
# self.gas.set_unnormalized_mass_fractions(Y)
diff = []
for i in range(3):
self.gas.TP = T, rho_g*ct.gas_constant*T/(self.gas.mean_molecular_weight)
self.gas.equilibrate('TP')
diff += [(rho_g - self.gas.density)]
# Get cp, MW, omega, etc
R_u = ct.gas_constant
cp = self.gas.cp
omega_i = self.gas.net_production_rates
MW_i = self.gas.molecular_weights
MW_mix = self.gas.mean_molecular_weight
h_i = self.gas.partial_molar_enthalpies
mu_g = self.gas.viscosity # PropsSI('viscosity','T', T, 'P', P, 'Air') # Pa*s
dD2_dx = 0
dml_dx = 0
dmg_dx = 0
dv_d_dx = 0
dY_dx = 0*Y
# Check Mach Number
M2 = v_g**2 / (self.gas.cp/self.gas.cv * R_u/MW_mix * T)
s = 0.0001
# dA_dx *= (1 - np.exp(-((M2-1)/s)**2))
if abs(M2 - 1) < 1e-6:
dA_dx = abs(dA_dx)
# v_g += 10.0 * (M2 - 1)*abs(M2 - 1)
# print(M2)
# M2 = v_g**2 / (self.gas.cp/self.gas.cv * R_u/MW_mix * T)
print(M2)
# Compute drho_g/dx
drho_g_dx = (
(1 - R_u/(cp*MW_mix)) * (rho_g**2) * (v_g**2) * (dA_dx/A)
)/(
self.gas.P*(1+ (v_g**2)/(cp*T)) - (rho_g*v_g**2)
)
drho_g_dx2 = rho_g * ( M2 / (1 - M2) * (1/A * dA_dx) )
if 100*abs((drho_g_dx2 - drho_g_dx)/drho_g_dx2) > 1.0:
print('x:', x)
print('Delta rho:', drho_g_dx2 - drho_g_dx)
# Compute dT/dx
dT_dx = (
((v_g**2)/(rho_g*cp)) * drho_g_dx +
((v_g**2)/cp) * (dA_dx/A)
)
dT_dx2 = ( 1 + M2 / (1 - M2)) * ( (1/A * dA_dx) * M2 * T * (self.gas.cp/self.gas.cv - 1) )
if 100*abs((dT_dx2 - dT_dx)/dT_dx2) > 1.0:
print('x:', x)
print('Delta T:', 100*abs((dT_dx2 - dT_dx)/dT_dx2))
return np.hstack(([dD2_dx, dml_dx, dmg_dx, dv_d_dx, drho_g_dx, dT_dx], dY_dx))
def enthalpy(self, T, P, phi):
# T: temperature in K
# P: pressure in Pa
# phi: equivalence ratio
# gas.enthalpy_mass: J/kg
# Set initial state
self.gas.TP = T, P
self.gas.set_equivalence_ratio(phi, fuel='C2H5OH', oxidizer='N2O')
# Calculate equilibrium under constant temperature and pressure
self.gas.equilibrate('TP')
return self.gas.enthalpy_mass
def enthalpy_partial_T(self, T, P, phi, dT=1):
return (self.enthalpy(T+dT, P, phi) - self.enthalpy(T, P, phi))/(dT)
def enthalpy_partial_phi(self, T, P, phi, dphi=1e-8):
return (self.enthalpy(T, P, phi+dphi) - self.enthalpy(T, P, phi))/(dphi)
# Setting Up Gas, Reactor and Initial Conditions
## State
T_0 = 2922.58 # K
P = 15e5 # Pa
T_0 = 2000.0 # K
# P = 10e5 # Pa
## Gas
gas = create_solution_mechanism()
gas.TPY = T_0, P, 'N2O: 1.0'
# gas.TP = T_0, P
# gas.set_equivalence_ratio(1.0, fuel='C2H5OH', oxidizer='N2O')
# gas.equilibrate('TP')
## Liquid
liquid = CoolProp.AbstractState("HEOS", "Ethanol") # &Water")
# liquid.set_mass_fractions([0.92, 0.08])
liquid.update(CoolProp.PQ_INPUTS, gas.P, 0)
liquid_density = liquid.rhomass()
## Intial conditions
D_0 = 40.002*1e-6 # micro m
D2_0 = (D_0*1e6)**2
ml_0 = 0.314 # kg/s
mg_0 = 1.103 # kg/s
# mg_0 = 1.417 # kg/s
rho_g_0 = gas.density # kg/m3
v_d_0 = 93.75 # m/s
phi_0 = 0.0
## Geometry
# radius = 1.005*0.9395177184726075*Function('nozzle_geometry2.csv', interpolation='linear')
radius = Function('nozzle_geometry.csv', interpolation='linear')
radius.source[:, 1] = scipy.signal.savgol_filter(radius.source[:, 1], 21, 3)
radius.source = radius.source[::3, :]
radius.setInterpolation('spline')
# radius = Function(0.053)
# radius = Function([(0.0, 0.053), (0.1, 0.053), (0.15, 0.0)], interpolation='linear')
area = np.pi*radius**2
## Droplet flow rate
N_dot = 6*ml_0/(liquid_density*np.pi*D_0**3)
## Reactor
q_dot_prime = -0*87.8e3 / 0.0838
reactor = NoelleReactor(gas, area, liquid, N_dot, q_dot_prime)
# reactor.A()
# reactor.dA_dx.plot(-0.275, 0.6)
# Analytical Model - Spalding
k = gas.thermal_conductivity
cp_g = gas.cp
Pr = gas.cp_mass * gas.viscosity / gas.thermal_conductivity
B = 5.35
G_t = (mg_0 + ml_0)/area(0.0)
S = 9*Pr/(2*np.log(1+B))
X0 = rho_g_0 * v_d_0 / G_t
xsi_star = (X0 + 3*S/10)/(S +2)
x_star = xsi_star * G_t * (D_0/2)**2 / (rho_g_0 * np.log(1+B) * k/cp_g/liquid_density)
print(1000*x_star)
# Numerical Integration
## Vaporization-Controlled Combustion
print('Simulating Vaporization-Controlled Combustion')
x_init = -0.275
x_max = 0.060
# x_init = 0.0
# x_max = 0.283
initial_state = np.hstack(([D2_0, ml_0, mg_0, v_d_0, rho_g_0, T_0, phi_0], gas.Y))
def fully_evaporated_event(x, u):
return min(u[0], u[1])
fully_evaporated_event.terminal = True
def choke_event(x, u):
D2, ml, mg, v_d, rho_g, T, phi = u[:7]
Y = u[7:]
gas.set_equivalence_ratio(phi, fuel='C2H5OH', oxidizer='N2O')
A = area(x)
v_g = mg/(rho_g*A)
gas.TP = T, rho_g*ct.gas_constant*T/(gas.mean_molecular_weight)
M2 = v_g**2 / (gas.cp/gas.cv * ct.gas_constant/gas.mean_molecular_weight * T)
return M2 - 1
choke_event.terminal = True
sol_vaporization_controlled_combustion = scipy.integrate.solve_ivp(
fun=reactor.state_derivate_vaporization_controlled_combustion,
t_span=(x_init, x_max),
y0=initial_state,
method='BDF',
t_eval=None,
dense_output=True,
events=[choke_event, fully_evaporated_event],
max_step=0.001
)
print(sol_vaporization_controlled_combustion.status)
### Process solution to compute mass fractions
states = ct.SolutionArray(gas)
for i in range(sol_vaporization_controlled_combustion.y.shape[1]):
u = sol_vaporization_controlled_combustion.y[:, i]
D2, ml, mg, v_d, rho_g, T, phi = u[:7]
gas.set_equivalence_ratio(phi, fuel='C2H5OH', oxidizer='N2O')
gas.TP = T, P
gas.equilibrate('TP')
states.append(gas.state)
sol_vaporization_controlled_combustion.y[4, i] = gas.density
sol_vaporization_controlled_combustion.y[7:, i] = gas.Y
## Reacting Nozzle
print('Simulating Reacting Nozzle - Converging')
# x_init = -0.050
x_max = -0.006625
x_init = sol_vaporization_controlled_combustion.t[-1]
# x_max = 0.283
initial_state = sol_vaporization_controlled_combustion.y[:, -1]
def choke_event(x, u):
D2, ml, mg, v_d, rho_g, T, phi = u[:7]
Y = u[7:]
gas.set_unnormalized_mass_fractions(Y)
A = area(x)
v_g = mg/(rho_g*A)
gas.TP = T, rho_g*ct.gas_constant*T/(gas.mean_molecular_weight)
M2 = v_g**2 / (gas.cp/gas.cv * ct.gas_constant/gas.mean_molecular_weight * T)
return M2 - 1
choke_event.terminal = True
sol_reacting_nozzle_converging = scipy.integrate.solve_ivp(
fun=reactor.state_derivate_reacting_nozzle,
t_span=(x_init, x_max),
y0=initial_state,
method='BDF',
t_eval=None,
dense_output=True,
events=choke_event,
max_step=0.001
)
print(sol_reacting_nozzle_converging.status)
print('Simulating Reacting Nozzle - Diverging')
# x_init = -0.050
x_max = 0.060
x_init = 0.0011452 # sol_reacting_nozzle_converging.t[-1] + 0.00556
# x_max = 0.283
initial_state = sol_reacting_nozzle_converging.y[:, -1]
sol_reacting_nozzle_diverging = scipy.integrate.solve_ivp(
fun=reactor.state_derivate_reacting_nozzle,
t_span=(x_init, x_max),
y0=initial_state,
method='LSODA',
t_eval=None,
dense_output=True,
events=choke_event,
max_step=0.001
)
print(sol_reacting_nozzle_diverging.status)
solution_y = np.hstack([sol_vaporization_controlled_combustion.y, sol_reacting_nozzle_converging.y, sol_reacting_nozzle_diverging.y])
solution_t = np.hstack([1000*sol_vaporization_controlled_combustion.t, 1000*sol_reacting_nozzle_converging.t, 1000*(sol_reacting_nozzle_diverging.t-0.0077702)])
# solution_y = np.hstack([sol_vaporization_controlled_combustion.y, sol_reacting_nozzle_converging.y])
# solution_t = np.hstack([1000*sol_vaporization_controlled_combustion.t, 1000*sol_reacting_nozzle_converging.t])
# Plot
# Hard variables
states = ct.SolutionArray(gas)
pressure = []
sound_speed = []
for u in solution_y.T:
D2, ml, mg, v_d, rho_g, T, phi = u[:7]
Y = u[7:]
gas.set_unnormalized_mass_fractions(Y)
gas.TP = T, rho_g*ct.gas_constant*T/(gas.mean_molecular_weight)
# gas.equilibrate('TP')
states.append(gas.state)
pressure += [gas.P]
sound_speed += [(gas.cp/gas.cv * gas.P/gas.density)**0.5]
sound_speed = np.array(sound_speed)
pressure = np.array(pressure)
# Easy ones
diameter_ratio = np.sqrt(solution_y[0])/(D_0*1e6)
droplet_velocity_ratio = solution_y[3]/solution_y[3][0]
ethanol_mass_fraction = solution_y[37+7]
equivalence_ratio = solution_y[6]
temperature_ratio = solution_y[5]/(1.29*3187.5)
gas_density = solution_y[4]/solution_y[4, 0]
gas_velocity = solution_y[2]/(solution_y[4]*area(solution_t/1000))
gas_mach = gas_velocity/sound_speed
## Ethanol Droplet Plots
# plt.figure(figsize=(10,6))
# plt.plot(solution_t, diameter_ratio, label='Droplet diameter $D/D_0$', linewidth=2)
# plt.plot(solution_t, droplet_velocity_ratio, label='Droplet velocity ratio', linewidth=2)
# plt.plot(solution_t, gas_velocity/solution_y[3][0], label='Gas velocity ratio', linewidth=2)
# plt.plot(solution_t, ethanol_mass_fraction, label=r'Ethanol mass fraction', linewidth=2)
# plt.xlabel('Chamber $x$-coordinate (mm)')
# plt.ylabel('Non-dimensional parameters')
# plt.legend()
# plt.show()
## Nozzle Flow Plots
# plt.figure(figsize=(12,4))
# plt.plot(solution_t, radius(solution_t/1000)/min(radius(solution_t/1000)), linewidth=5, c='k')
# plt.ylim(0, 3.2)
# plt.xlabel('Coordenada Axial $x$ (mm)')
# plt.ylabel('Valores Adimensionais')
# plt.savefig('CRN.svg')
# plt.show()
plt.figure(figsize=(12,4))
# plt.plot(solution_t, radius(solution_t/1000)/min(radius(solution_t/1000)), linewidth=5, c='k')
plt.ylim(0, 2.6)
plt.xlim(0, solution_t[-1]-solution_t[0])
plt.plot(solution_t-solution_t[0], diameter_ratio, label='Diâmetro de Gotículas $D/SMD_0$', linewidth=2)
plt.xlabel('Coordenada Axial $x$ (mm)')
plt.ylabel('Valores Adimensionais')
# plt.legend()
plt.savefig('CRN_diameter.svg')
plt.show()
# plt.figure(figsize=(12,4))
# plt.plot(solution_t, radius(solution_t/1000)/min(radius(solution_t/1000)), linewidth=5, c='k')
# plt.plot(solution_t, diameter_ratio, label='Diâmetro de Gotículas $D/SMD_0$', linewidth=2)
# plt.plot(solution_t, equivalence_ratio, label='Razão de Equivalência $\Phi$', linewidth=2)
# # plt.plot(solution_t, temperature_ratio, label='Temperature ratio $T/T_{ad}$', linewidth=2)
# # plt.plot(solution_t, gas_density, label=r'Gas Density $\rho/\rho_0$', linewidth=2)
# # plt.plot(solution_t, gas_mach, label=r'Gas Mach Number', linewidth=2)
# # plt.plot(solution_t, pressure/15e5, label=r'Pressure Ratio', linewidth=2)
# plt.xlabel('Coordenada Axial $x$ (mm)')
# plt.ylabel('Valores Adimensionais')
# # plt.legend()
# plt.savefig('CRN_diameter_equivratio.svg')
# plt.show()
plt.figure(figsize=(12,4))
plt.ylim(0, 2.6)
plt.xlim(0, solution_t[-1]-solution_t[0])
plt.plot(solution_t-solution_t[0], diameter_ratio, label='Diâmetro de Gotículas $D/SMD_0$', linewidth=2)
# plt.plot(solution_t, equivalence_ratio, label='Razão de Equivalência $\Phi$', linewidth=2)
plt.plot(solution_t-solution_t[0], temperature_ratio, label='Temperatura $T/T_{ad}$', linewidth=2)
plt.xlabel('Coordenada Axial $x$ (mm)')
plt.ylabel('Valores Adimensionais')
# plt.legend()
plt.savefig('CRN_diameter_temp.svg')
plt.show()
plt.figure(figsize=(12,4))
plt.ylim(0, 2.6)
plt.xlim(0, solution_t[-1]-solution_t[0])
plt.plot(solution_t-solution_t[0], diameter_ratio, label='Diâmetro de Gotículas $D/SMD_0$', linewidth=2)
plt.plot(solution_t-solution_t[0], equivalence_ratio, label='Razão de Equivalência $\Phi$', linewidth=2)
# plt.plot(solution_t, temperature_ratio, label='Temperatura $T/T_{ad}$', linewidth=2)
plt.plot(solution_t-solution_t[0], gas_mach, label='Número de Mach', linewidth=2)
plt.xlabel('Coordenada Axial $x$ (mm)')
plt.ylabel('Valores Adimensionais')
# plt.legend()
plt.savefig('CRN_diameter_temp_mach.svg')
plt.show()
plt.figure(figsize=(12,4))
plt.ylim(0, 2.6)
plt.xlim(0, solution_t[-1]-solution_t[0])
plt.plot(solution_t-solution_t[0], diameter_ratio, label='Diâmetro de Gotículas $D/SMD_0$', linewidth=2)
# plt.plot(solution_t, equivalence_ratio, label='Razão de Equivalência $\Phi$', linewidth=2)
plt.plot(solution_t-solution_t[0], temperature_ratio, label='Temperatura $T/T_{ad}$', linewidth=2)
plt.plot(solution_t-solution_t[0], gas_mach, label='Número de Mach', linewidth=2)
plt.xlabel('Coordenada Axial $x$ (mm)')
plt.ylabel('Valores Adimensionais')
plt.legend()
plt.savefig('CRN_legend.svg')
plt.show()
## Combustion Flow
plt.figure(figsize=(10,6))
plt.plot(solution_t, area(solution_t/1000)/min(solution_t/1000), label='Area ratio $A/A_{*}$', linewidth=5, c='k')
plt.plot(solution_t, states('CO2').Y, label=r'$Y_{CO_2}$', linewidth=2)
# plt.plot(solution_t, states('N2O').Y, label=r'$Y_{N_2O}$', linewidth=2)
# plt.plot(solution_t, states('C2H5OH').Y, label=r'$Y_{C_2H_5OH}$', linewidth=2)
plt.plot(solution_t, states('H2O').Y, label=r'$Y_{H_2O}$', linewidth=2)
plt.plot(solution_t, states('O2').Y, label=r'$Y_{O_2}$', linewidth=2)
# plt.plot(solution_t, temperature_ratio, label='Temperature ratio $T/T_{ad}$', linewidth=2)
# plt.plot(solution_t, pressure/15e5, label=r'Pressure Ratio', linewidth=2)
# plt.plot(1000*area.source[:, 0], area.source[:, 1]/max(area.source[:, 1]), label='Area ratio $A/A_{*}$', linewidth=5, c='k')
plt.xlabel('Chamber $x$-coordinate (mm)')
plt.ylabel('Non-dimensional parameters')
plt.legend()
# plt.show()
# reactor.gas.set_unnormalized_mass_fractions(sol_with_droplets.y[6:, -1])
# reactor.gas()
| 36.107407 | 160 | 0.609943 |
f4d5719da80cd81b9498f918d2cb3fcef37ae073 | 1,696 | py | Python | tchannel/retry.py | srcclr/tchannel-python | 5e82535f96ba247ade5a040010edfa026e0f7dce | [
"MIT"
] | null | null | null | tchannel/retry.py | srcclr/tchannel-python | 5e82535f96ba247ade5a040010edfa026e0f7dce | [
"MIT"
] | 4 | 2021-03-17T08:31:59.000Z | 2021-06-25T15:49:37.000Z | tchannel/retry.py | susanstdemos/test-python-pip | 5e82535f96ba247ade5a040010edfa026e0f7dce | [
"MIT"
] | 4 | 2016-06-05T14:20:55.000Z | 2020-08-24T16:28:41.000Z | # Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
#: Retry the request on failures to connect to a remote host. This is the
#: default retry behavior.
CONNECTION_ERROR = 'c'
#: Never retry the request.
NEVER = 'n'
#: Retry the request on timeouts waiting for a response.
TIMEOUT = 't'
#: Retry the request on failures to connect and timeouts after connecting.
CONNECTION_ERROR_AND_TIMEOUT = 'ct'
DEFAULT = CONNECTION_ERROR
#: The default number of times to retry a request. This is in addition to the
#: original request.
DEFAULT_RETRY_LIMIT = 4
| 39.44186 | 79 | 0.772406 |
7101e726fa86a28ff667ccace12722a4d65a8fa7 | 5,104 | py | Python | tidepool_data_science_models/models/simple_metabolism_OLD.py | tidepool-org/data-science-models | cd06e9aad95a0bc6cc2a81871e567c88159b86d3 | [
"BSD-2-Clause"
] | 1 | 2020-10-17T19:48:38.000Z | 2020-10-17T19:48:38.000Z | tidepool_data_science_models/models/simple_metabolism_OLD.py | tidepool-org/data-science-models | cd06e9aad95a0bc6cc2a81871e567c88159b86d3 | [
"BSD-2-Clause"
] | 4 | 2020-05-27T16:38:56.000Z | 2020-11-21T21:09:23.000Z | tidepool_data_science_models/models/simple_metabolism_OLD.py | tidepool-org/data-science-models | cd06e9aad95a0bc6cc2a81871e567c88159b86d3 | [
"BSD-2-Clause"
] | null | null | null | """
These are the original functions for modeling insulin and carbs for the FDA loop risk analysis
written by Ed (with superficial modifications). They are here for reference and testing.
"""
import numpy as np
from tidepool_data_science_models.models.simple_metabolism_model import STEADY_STATE_IOB_FACTOR_FDA
def simple_metabolism_model(
carb_amount=0, # grams (g)
insulin_amount=np.nan, # units of insulin (U)
cir=12.5, # carb-to-insulin-ratio (g/U)
isf=50, # insulin sensitivity factor (mg/dL/U)
):
"""
Compute an 8 hour long, 5-min interval time series metabolic response to insulin and carbs inputs
at t0. If insulin is not given,
Args:
carb_amount: carb amount at t0 (g)
insulin_amount: insulin amount at t0 (U)
cir: carb to insulin ratio (g/U)
isf: insulin sensitivity factor (mg/dL/U)
Returns:
tuple: (
net_change_in_bg,
t_5min,
carb_amount,
insulin_amount,
iob_5min
)
"""
# +CS
minutes_in_model = 8 * 60 # 8 hr * 60 minutes/hr
# +CS why do we have 2 time series? Reduce computation with just 5 min time series?
# create a time series
t = np.arange(0, minutes_in_model, 1) # in minutes
t_5min = np.arange(0, minutes_in_model, 5)
# +CS Why do we assume the an insulin amount if it's not given?
# This could be more generalized?
# if insulin amount is not given,
# calculate carb amount like a bolus calculator
if np.isnan(insulin_amount):
insulin_amount = carb_amount / cir # insulin amount
# insulin model
if insulin_amount != 0:
# model constants
tau1 = 55
tau2 = 70
Kcl = 1
insulin_equation = (
insulin_amount
* (1 / (Kcl * (tau2 - tau1)))
* (np.exp(-t / tau2) - np.exp(-t / tau1))
)
ia = np.cumsum(insulin_equation)
iob = insulin_amount - ia
iob_5min = iob[t_5min]
insulin_effect = -isf * ia
ie_5min = insulin_effect[t_5min]
decrease_due_to_insulin_5min = np.append(0, ie_5min[1:] - ie_5min[:-1])
else:
decrease_due_to_insulin_5min = np.zeros(len(t_5min))
iob_5min = np.zeros(len(t_5min))
# carb model
if carb_amount > 0:
K = isf / cir # carb gain
tau = 42
theta = 20
c_t = (
K
* carb_amount
* (1 - np.exp((theta - t) / tau))
* np.heaviside(t - theta, 1)
)
ce_5min = c_t[t_5min]
increase_due_to_carbs_5min = np.append(0, ce_5min[1:] - ce_5min[:-1])
else:
increase_due_to_carbs_5min = np.zeros(len(t_5min))
net_change_in_bg_5min = decrease_due_to_insulin_5min + increase_due_to_carbs_5min
# +CS - Why are we returning the carb and insulin amt?
return net_change_in_bg_5min, t_5min, carb_amount, insulin_amount, iob_5min
def get_iob_from_sbr(sbr_actual):
"""
Compute insulin on board for 8 hours following with the initial condition
being insulin on board from the scheduled basal rate for 8 hours.
Parameters
----------
sbr_actual
isf
cir
Returns
-------
"""
# TODO: Further clarify this
# Cameron added explanation since it was unclear what was going on until I stared
# at it for a while. Ed, please edit if these aren't correct.
# Step 1: Get 8 hr iob from a bolus that is 1/12 of the scheduled basal rate.
# This assumes basal rate is a series of boluses at 5 min intervals.
_, _, _, _, iob_sbr = simple_metabolism_model(
carb_amount=0,
insulin_amount=sbr_actual / 12,
cir=0, # This doesn't matter for this use of the model
isf=0, # Same as above
)
# Step 2: Allocate
iob_with_zeros = np.append(iob_sbr, np.zeros(8 * 12))
# Step 3: Copy the decay curves across the whole matrix
iob_matrix = np.tile(iob_with_zeros, (8 * 12, 1)).T
# Step 4: Shift each decay curve by the number of time steps
nrows, ncols = np.shape(iob_matrix)
for t_pre in np.arange(1, ncols):
iob_matrix[:, t_pre] = np.roll(iob_matrix[:, t_pre], t_pre)
# Step 5: Fill the upper triangle with zeros
# NOTE 2020-04-28: Cameron commented this out since he and Ed
# determined it isn't necessary in this algo version. Now
# the refactored code matches this exactly for testing.
# iob_matrix_tri = iob_matrix * np.tri(nrows, ncols, 0)
# Step 6: Sum across the curves to get the iob at every time step
iob_sbr_t = np.sum(iob_matrix, axis=1)
# Step 7: Just get the last 8 hours
iob_sbr_t = iob_sbr_t[95:-1]
return iob_sbr_t
def get_steady_state_iob_from_sbr(sbr):
"""
Get the steady state insulin on board for a given scheduled basal rate. This is
the iob once the basal insulin stacking and metabolism clearing reach equilibrium.
Parameters
----------
sbr
Returns
-------
"""
return sbr * STEADY_STATE_IOB_FACTOR_FDA
| 30.201183 | 101 | 0.627155 |
f1505314eef374ce1b7cebf3c65b692d219c1017 | 1,076 | py | Python | nova/policies/image_size.py | viveknandavanam/nova | 556377b6915936467436c9d5bb33bc0e22244e1e | [
"Apache-2.0"
] | 1 | 2015-11-30T19:44:00.000Z | 2015-11-30T19:44:00.000Z | nova/policies/image_size.py | viveknandavanam/nova | 556377b6915936467436c9d5bb33bc0e22244e1e | [
"Apache-2.0"
] | 11 | 2017-06-19T01:28:55.000Z | 2017-06-23T02:01:47.000Z | nova/policies/image_size.py | viveknandavanam/nova | 556377b6915936467436c9d5bb33bc0e22244e1e | [
"Apache-2.0"
] | 3 | 2018-04-04T15:15:01.000Z | 2018-04-19T18:14:25.000Z | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:image-size'
POLICY_ROOT = 'os_compute_api:image-size:%s'
image_size_policies = [
policy.RuleDefault(
name=POLICY_ROOT % 'discoverable',
check_str=base.RULE_ANY),
policy.RuleDefault(
name=BASE_POLICY_NAME,
check_str=base.RULE_ADMIN_OR_OWNER),
]
def list_rules():
return image_size_policies
| 29.081081 | 78 | 0.729554 |
0f645f459c9bbd8f6b8427684f08752b746ea9d2 | 2,500 | py | Python | main.py | stevenraphael/pathsize | 981be00f08a6d3c2e07f480e827ed078111b3e46 | [
"MIT"
] | null | null | null | main.py | stevenraphael/pathsize | 981be00f08a6d3c2e07f480e827ed078111b3e46 | [
"MIT"
] | null | null | null | main.py | stevenraphael/pathsize | 981be00f08a6d3c2e07f480e827ed078111b3e46 | [
"MIT"
] | null | null | null | """
Usage:
main.py <path> [--top=<top>] [--nofiles]
main.py -h | --help
Options:
-h --help show this screen
--top=<top> how many of the largest items to show [default: 1000].
--nofiles excludes files in the path from being printed.
"""
import os, sys, docopt
def listsizes(path, top, nofiles):
"""
Prints the sizes of items (folders/files) directly contained in the given path, sorted from largest to smallest.
Arguments:
path: a path to the folder that the function works with.
top: an integer detailing the number of items to be printed (how many of the largest items will be printed).
nofiles: excludes files in the path from being printed if nofiles is true.
"""
sizes = []
# Creates a list of (file/directory, size) pairs
for f in os.listdir(path):
newpath = os.path.join(path, f)
if os.path.isfile(newpath):
if not nofiles:
sizes.append([f, os.path.getsize(newpath)])
elif not os.path.islink(newpath):
try:
sizes.append(['<DIR> ' + f, recursivesize(newpath)])
except PermissionError:
print(f'Unable to fully access {f}: Permission denied')
sizes.sort(key=lambda s: -1 * s[1])
sizes = sizes[:top]
alignspacing = max(len(s[0]) for s in sizes) + 5
MB = 2 ** 20
KB = 2 ** 10
for s in sizes:
if s[1] > MB:
print(f'{s[0] : <{alignspacing}} {round(s[1] / MB, 2):.2f} MB')
elif s[1] > KB:
print(f'{s[0] : <{alignspacing}} {round(s[1] / KB, 2):.2f} KB')
else:
print(f'{s[0] : <{alignspacing}} {s[1]} bytes')
def recursivesize(path='.'):
"""
Given a path to a folder, gets the cumulative size of the folder, excluding simlinks contained in the folder.
If no path is specified, the path defaults to the current path.
"""
total = 0
for f in os.listdir(path):
newpath = os.path.join(path,f)
if os.path.isfile(newpath):
total += os.path.getsize(newpath)
elif not os.path.islink(newpath):
total += recursivesize(newpath)
return total
def main():
args = docopt.docopt(__doc__)
nofiles = args['--nofiles']
path = args['<path>']
top = int(args['--top']) if args['--top'] else 1000
listsizes(path, top, nofiles)
if __name__ == '__main__':
main()
| 32.051282 | 116 | 0.5648 |
811256eda4ed17632e5268ee45dbbda9f22608d4 | 31,378 | py | Python | heudiconv/tests/test_bids.py | neurorepro/heudiconv | a694f83204df3d3d7c0aa70be492253f9659367d | [
"Apache-2.0"
] | 1 | 2019-11-01T18:25:57.000Z | 2019-11-01T18:25:57.000Z | heudiconv/tests/test_bids.py | neurorepro/heudiconv | a694f83204df3d3d7c0aa70be492253f9659367d | [
"Apache-2.0"
] | 1 | 2021-06-21T20:42:25.000Z | 2021-06-21T21:30:42.000Z | heudiconv/tests/test_bids.py | neurorepro/heudiconv | a694f83204df3d3d7c0aa70be492253f9659367d | [
"Apache-2.0"
] | 2 | 2018-08-13T19:35:00.000Z | 2022-02-11T16:03:40.000Z | """Test functions in heudiconv.bids module.
"""
import re
import os
import os.path as op
from random import random
from datetime import (datetime,
timedelta,
)
from collections import (namedtuple,
OrderedDict,
)
from glob import glob
import nibabel
from heudiconv.utils import (
load_json,
save_json,
create_tree,
)
from heudiconv.bids import (
maybe_na,
treat_age,
find_fmap_groups,
populate_intended_for,
get_shim_setting,
get_key_info_for_fmap_assignment,
find_compatible_fmaps_for_run,
find_compatible_fmaps_for_session,
select_fmap_from_compatible_groups,
SHIM_KEY,
AllowedCriteriaForFmapAssignment,
)
import pytest
def test_maybe_na():
for na in '', ' ', None, 'n/a', 'N/A', 'NA':
assert maybe_na(na) == 'n/a'
for notna in 0, 1, False, True, 'value':
assert maybe_na(notna) == str(notna)
def test_treat_age():
assert treat_age(0) == '0'
assert treat_age('0') == '0'
assert treat_age('0000') == '0'
assert treat_age('0000Y') == '0'
assert treat_age('000.1Y') == '0.1'
assert treat_age('1M') == '0.08'
assert treat_age('12M') == '1'
assert treat_age('0000.1') == '0.1'
assert treat_age(0000.1) == '0.1'
SHIM_LENGTH = 6
TODAY = datetime.today()
# Test scenarios:
# -file with "ShimSetting" field
# -file with no "ShimSetting", in "foo" dir, should return "foo"
# -file with no "ShimSetting", in "fmap" dir, acq-CatchThis, should return
# "CatchThis"
# -file with no "ShimSetting", in "fmap" dir, acq-fMRI, should return "func"
A_SHIM = ['{0:.4f}'.format(random()) for i in range(SHIM_LENGTH)]
@pytest.mark.parametrize(
"fname, content, expected_return", [
(op.join('foo', 'bar.json'), {SHIM_KEY: A_SHIM}, A_SHIM),
(op.join('dont_catch_this', 'foo', 'bar.json'), {}, 'foo'),
(op.join('dont_catch_this', 'fmap', 'bar_acq-CatchThis.json'), {}, 'CatchThis'),
(op.join('dont_catch_this', 'fmap', 'bar_acq-fMRI.json'), {}, 'func'),
]
)
def test_get_shim_setting(tmpdir, fname, content, expected_return):
""" Tests for get_shim_setting """
json_name = op.join(str(tmpdir), fname)
json_dir = op.dirname(json_name)
if not op.exists(json_dir):
os.makedirs(json_dir)
save_json(json_name, content)
assert get_shim_setting(json_name) == expected_return
def test_get_key_info_for_fmap_assignment(tmpdir, monkeypatch):
"""
Test get_key_info_for_fmap_assignment
"""
# Stuff needed to mock reading of a NIfTI file header:
# affines (qforms/sforms) are 4x4 matrices
MY_AFFINE = [[random() for i in range(4)] for j in range(4)]
# dims are arrays with 8 elements with the first one indicating the number
# of dims in the image; remaining elements are 1:
MY_DIM = [4] + [round(256 * random()) for i in range(4)] + [1] * 3
# We use namedtuples so that we can use the .dot notation, to mock
# nibabel headers:
MyHeader = namedtuple('MyHeader', 'affine dim')
MY_HEADER = MyHeader(MY_AFFINE, MY_DIM)
MyMockNifti = namedtuple('MyMockNifti', 'header')
def mock_nibabel_load(file):
"""
Pretend we run nibabel.load, but return only a header with just a few fields
"""
return MyMockNifti(MY_HEADER)
monkeypatch.setattr(nibabel, "load", mock_nibabel_load)
json_name = op.join(str(tmpdir), 'foo.json')
# 1) Call for a non-existing file should give an error:
with pytest.raises(FileNotFoundError):
assert get_key_info_for_fmap_assignment('foo.json')
# 2) matching_parameters = 'Shims'
save_json(json_name, {SHIM_KEY: A_SHIM}) # otherwise get_key_info_for_fmap_assignment will give an error
key_info = get_key_info_for_fmap_assignment(
json_name, matching_parameter='Shims'
)
assert key_info == [A_SHIM]
# 3) matching_parameters = 'ImagingVolume'
key_info = get_key_info_for_fmap_assignment(
json_name, matching_parameter='ImagingVolume'
)
assert key_info == [MY_AFFINE, MY_DIM[1:3]]
# 4) invalid matching_parameters:
with pytest.raises(ValueError):
assert get_key_info_for_fmap_assignment(
json_name, matching_parameter='Invalid'
)
def generate_scans_tsv(session_struct):
"""
Generates the contents of the "_scans.tsv" file, given a session structure.
Currently, it will have the columns "filename" and "acq_time".
The acq_time will increase by one minute from run to run.
Parameters:
----------
session_struct : dict
structure for the session, as a dict with modality: files
Returns:
-------
scans_file_content : str
multi-line string with the content of the file
"""
# for each modality in session_struct (k), get the filenames:
scans_fnames = [
op.join(k, vk)
for k, v in session_struct.items()
for vk in sorted(v.keys())
if vk.endswith('.nii.gz')
]
# for each file, increment the acq_time by one minute:
scans_file_content = ['filename\tacq_time'] + [
'%s\t%s' % (fn, (TODAY + timedelta(minutes=i)).isoformat()) for fn, i in
zip(scans_fnames, range(len(scans_fnames)))
]
# convert to multiline string:
return "\n".join(scans_file_content)
def create_dummy_pepolar_bids_session(session_path):
"""
Creates a dummy BIDS session, with slim json files and empty nii.gz
The fmap files are pepolar
The json files have ShimSettings
Parameters:
----------
session_path : str or os.path
path to the session (or subject) level folder
Returns:
-------
session_struct : dict
Structure of the directory that was created
expected_result : dict
dictionary with fmap names as keys and the expected "IntendedFor" as
values.
expected_fmap_groups : dict
dictionary with the expected fmap groups
expected_compatible_fmaps : dict
dictionary with the expected fmap groups for each non-fmap run in the
session
"""
session_parent, session_basename = op.split(session_path.rstrip(op.sep))
if session_basename.startswith('ses-'):
prefix = op.split(session_parent)[1] + '_' + session_basename
else:
prefix = session_basename
# 1) Simulate the file structure for a session:
# Generate some random ShimSettings:
dwi_shims = ['{0:.4f}'.format(random()) for i in range(SHIM_LENGTH)]
func_shims_A = ['{0:.4f}'.format(random()) for i in range(SHIM_LENGTH)]
func_shims_B = ['{0:.4f}'.format(random()) for i in range(SHIM_LENGTH)]
# Dict with the file structure for the session:
# -anat:
anat_struct = {
'{p}_{m}.{e}'.format(p=prefix, m=mod, e=ext): dummy_content
for ext, dummy_content in zip(['nii.gz', 'json'], ['', {}])
for mod in ['T1w', 'T2w']
}
# -dwi:
dwi_struct = {
'{p}_acq-A_run-{r}_dwi.nii.gz'.format(p=prefix, r=runNo): '' for runNo in [1, 2]
}
dwi_struct.update({
'{p}_acq-A_run-{r}_dwi.json'.format(p=prefix, r=runNo): {'ShimSetting': dwi_shims} for runNo in [1, 2]
})
# -func:
func_struct = {
'{p}_acq-{a}_bold.nii.gz'.format(p=prefix, a=acq): '' for acq in ['A', 'B', 'unmatched']
}
func_struct.update({
'{p}_acq-A_bold.json'.format(p=prefix): {'ShimSetting': func_shims_A},
'{p}_acq-B_bold.json'.format(p=prefix): {'ShimSetting': func_shims_B},
'{p}_acq-unmatched_bold.json'.format(p=prefix): {
'ShimSetting': ['{0:.4f}'.format(random()) for i in range(SHIM_LENGTH)]
},
})
# -fmap:
# * NIfTI files:
fmap_struct = {
'{p}_acq-{a}_dir-{d}_run-{r}_epi.nii.gz'.format(p=prefix, a=acq, d=d, r=r): ''
for acq in ['dwi', 'fMRI']
for d in ['AP', 'PA']
for r in [1, 2]
}
# * dwi shims:
expected_fmap_groups = {
'{p}_acq-dwi_run-{r}_epi'.format(p=prefix, r=r): [
'{p}_acq-dwi_dir-{d}_run-{r}_epi.json'.format(
p=op.join(session_path, 'fmap', prefix), d=d, r=r
) for d in ['AP', 'PA']
]
for r in [1, 2]
}
fmap_struct.update({
'{p}_acq-dwi_dir-{d}_run-{r}_epi.json'.format(p=prefix, d=d, r=r): {'ShimSetting': dwi_shims}
for d in ['AP', 'PA']
for r in [1, 2]
})
# * func_shims (_A and _B):
expected_fmap_groups.update({
'{p}_acq-fMRI_run-{r}_epi'.format(p=prefix, r=r): [
'{p}_acq-fMRI_dir-{d}_run-{r}_epi.json'.format(
p=op.join(session_path, 'fmap', prefix), d=d, r=r
) for d in ['AP', 'PA']
]
for r in [1, 2]
})
fmap_struct.update({
'{p}_acq-fMRI_dir-{d}_run-{r}_epi.json'.format(p=prefix, d=d, r=r): {'ShimSetting': shims}
for r, shims in {'1': func_shims_A, '2': func_shims_B}.items()
for d in ['AP', 'PA']
})
# structure for the full session (init the OrderedDict as a list to preserve order):
session_struct = OrderedDict([
('fmap', fmap_struct),
('anat', anat_struct),
('dwi', dwi_struct),
('func', func_struct),
])
# add "_scans.tsv" file to the session_struct
scans_file_content = generate_scans_tsv(session_struct)
session_struct.update({'{p}_scans.tsv'.format(p=prefix): scans_file_content})
create_tree(session_path, session_struct)
# 2) Now, let's create a dict with the fmap groups compatible for each run
# -anat: empty
expected_compatible_fmaps = {
'{p}_{m}.json'.format(p=op.join(session_path, 'anat', prefix), m=mod): {}
for mod in ['T1w', 'T2w']
}
# -dwi: each of the runs (1, 2) is compatible with both of the dwi fmaps (1, 2):
expected_compatible_fmaps.update({
'{p}_acq-A_run-{r}_dwi.json'.format(p=op.join(session_path, 'dwi', prefix), r=runNo): {
key: val for key, val in expected_fmap_groups.items() if key in [
'{p}_acq-dwi_run-{r}_epi'.format(p=prefix, r=r) for r in [1, 2]
]
}
for runNo in [1, 2]
})
# -func: acq-A is compatible w/ fmap fMRI run 1; acq-2 w/ fmap fMRI run 2
expected_compatible_fmaps.update({
'{p}_acq-{a}_bold.json'.format(p=op.join(session_path, 'func', prefix), a=acq): {
key: val for key, val in expected_fmap_groups.items() if key in [
'{p}_acq-fMRI_run-{r}_epi'.format(p=prefix, r=runNo)
]
}
for runNo, acq in {'1': 'A', '2': 'B'}.items()
})
# -func (cont): acq-unmatched is empty
expected_compatible_fmaps.update({
'{p}_acq-unmatched_bold.json'.format(p=op.join(session_path, 'func', prefix)): {}
})
# 3) Then, let's create a dict with what we expect for the "IntendedFor":
sub_match = re.findall('(sub-([a-zA-Z0-9]*))', session_path)
sub_str = sub_match[0][0]
expected_prefix = session_path.split(sub_str)[-1].split(op.sep)[-1]
# dict, with fmap names as keys and the expected "IntendedFor" as values.
expected_result = {
'{p}_acq-dwi_dir-{d}_run-{r}_epi.json'.format(p=prefix, d=d, r=runNo):
intended_for
# (runNo=1 goes with the long list, runNo=2 goes with None):
for runNo, intended_for in zip(
[1, 2],
[[op.join(expected_prefix, 'dwi', '{p}_acq-A_run-{r}_dwi.nii.gz'.format(p=prefix, r=r)) for r in [1,2]],
None]
)
for d in ['AP', 'PA']
}
expected_result.update(
{
'{p}_acq-fMRI_dir-{d}_run-{r}_epi.json'.format(p=prefix, d=d, r=runNo):
[
op.join(expected_prefix,
'func',
'{p}_acq-{a}_bold.nii.gz'.format(p=prefix, a=acq))
]
# runNo=1 goes with acq='A'; runNo=2 goes with acq='B'
for runNo, acq in zip([1, 2], ['A', 'B'])
for d in ['AP', 'PA']
}
)
return session_struct, expected_result, expected_fmap_groups, expected_compatible_fmaps
def create_dummy_no_shim_settings_bids_session(session_path):
"""
Creates a dummy BIDS session, with slim json files and empty nii.gz
The fmap files are pepolar
The json files don't have ShimSettings
Parameters:
----------
session_path : str or os.path
path to the session (or subject) level folder
Returns:
-------
session_struct : dict
Structure of the directory that was created
expected_result : dict
dictionary with fmap names as keys and the expected "IntendedFor" as
values.
None
it returns a third argument (None) to have the same signature as
create_dummy_pepolar_bids_session
"""
session_parent, session_basename = op.split(session_path.rstrip(op.sep))
if session_basename.startswith('ses-'):
prefix = op.split(session_parent)[1] + '_' + session_basename
else:
prefix = session_basename
# 1) Simulate the file structure for a session:
# Dict with the file structure for the session.
# All json files will be empty.
# -anat:
anat_struct = {
'{p}_{m}.{e}'.format(p=prefix, m=mod, e=ext): dummy_content
for ext, dummy_content in zip(['nii.gz', 'json'], ['', {}])
for mod in ['T1w', 'T2w']
}
# -dwi:
dwi_struct = {
'{p}_acq-A_run-{r}_dwi.{e}'.format(p=prefix, r=runNo, e=ext): dummy_content
for ext, dummy_content in zip(['nii.gz', 'json'], ['', {}])
for runNo in [1, 2]
}
# -func:
func_struct = {
'{p}_acq-{a}_bold.{e}'.format(p=prefix, a=acq, e=ext): dummy_content
for ext, dummy_content in zip(['nii.gz', 'json'], ['', {}])
for acq in ['A', 'B']
}
# -fmap:
fmap_struct = {
'{p}_acq-{a}_dir-{d}_run-{r}_epi.{e}'.format(p=prefix, a=acq, d=d, r=r, e=ext): dummy_content
for ext, dummy_content in zip(['nii.gz', 'json'], ['', {}])
for acq in ['dwi', 'fMRI']
for d in ['AP', 'PA']
for r in [1, 2]
}
expected_fmap_groups = {
'{p}_acq-{a}_run-{r}_epi'.format(p=prefix, a=acq, r=r): [
'{p}_acq-{a}_dir-{d}_run-{r}_epi.json'.format(
p=op.join(session_path, 'fmap', prefix), a=acq, d=d, r=r
) for d in ['AP', 'PA']
]
for acq in ['dwi', 'fMRI']
for r in [1, 2]
}
# structure for the full session (init the OrderedDict as a list to preserve order):
session_struct = OrderedDict([
('fmap', fmap_struct),
('anat', anat_struct),
('dwi', dwi_struct),
('func', func_struct),
])
# add "_scans.tsv" file to the session_struct
scans_file_content = generate_scans_tsv(session_struct)
session_struct.update({'{p}_scans.tsv'.format(p=prefix): scans_file_content})
create_tree(session_path, session_struct)
# 2) Now, let's create a dict with the fmap groups compatible for each run
# -anat: empty
expected_compatible_fmaps = {
'{p}_{m}.json'.format(p=op.join(session_path, 'anat', prefix), m=mod): {}
for mod in ['T1w', 'T2w']
}
# -dwi: each of the runs (1, 2) is compatible with both of the dwi fmaps (1, 2):
expected_compatible_fmaps.update({
'{p}_acq-A_run-{r}_dwi.json'.format(p=op.join(session_path, 'dwi', prefix), r=runNo): {
key: val for key, val in expected_fmap_groups.items() if key in [
'{p}_acq-dwi_run-{r}_epi'.format(p=prefix, r=r) for r in [1, 2]
]
}
for runNo in [1, 2]
})
# -func: each of the acq (A, B) is compatible w/ both fmap fMRI runs (1, 2)
expected_compatible_fmaps.update({
'{p}_acq-{a}_bold.json'.format(p=op.join(session_path, 'func', prefix), a=acq): {
key: val for key, val in expected_fmap_groups.items() if key in [
'{p}_acq-fMRI_run-{r}_epi'.format(p=prefix, r=r) for r in [1, 2]
]
}
for acq in ['A', 'B']
})
# 3) Now, let's create a dict with what we expect for the "IntendedFor":
# NOTE: The "expected_prefix" (the beginning of the path to the
# "IntendedFor") should be relative to the subject level (see:
# https://bids-specification.readthedocs.io/en/stable/04-modality-specific-files/01-magnetic-resonance-imaging-data.html#fieldmap-data)
sub_match = re.findall('(sub-([a-zA-Z0-9]*))', session_path)
sub_str = sub_match[0][0]
expected_prefix = session_path.split(sub_str)[-1].split(op.sep)[-1]
# dict, with fmap names as keys and the expected "IntendedFor" as values.
expected_result = {
# (runNo=1 goes with the long list, runNo=2 goes with None):
'{p}_acq-dwi_dir-{d}_run-{r}_epi.json'.format(p=prefix, d=d, r=runNo): intended_for
for runNo, intended_for in zip(
[1, 2],
[[op.join(expected_prefix, 'dwi', '{p}_acq-A_run-{r}_dwi.nii.gz'.format(p=prefix, r=r)) for r in [1,2]],
None]
)
for d in ['AP', 'PA']
}
expected_result.update(
{
# The first "fMRI" run gets all files in the "func" folder;
# the second shouldn't get any.
'{p}_acq-fMRI_dir-{d}_run-{r}_epi.json'.format(p=prefix, d=d, r=runNo): intended_for
for runNo, intended_for in zip(
[1, 2],
[[op.join(expected_prefix, 'func', '{p}_acq-{a}_bold.nii.gz'.format(p=prefix, a=acq))
for acq in ['A', 'B']],
None]
)
for d in ['AP', 'PA']
}
)
return session_struct, expected_result, expected_fmap_groups, expected_compatible_fmaps
def create_dummy_magnitude_phase_bids_session(session_path):
"""
Creates a dummy BIDS session, with slim json files and empty nii.gz
The fmap files are a magnitude/phase pair
The json files have ShimSettings
We just need to test a very simple case to make sure the mag/phase have
the same "IntendedFor" field:
Parameters:
----------
session_path : str or os.path
path to the session (or subject) level folder
Returns:
-------
session_struct : dict
Structure of the directory that was created
expected_result : dict
dictionary with fmap names as keys and the expected "IntendedFor" as
values.
expected_fmap_groups : dict
dictionary with the expected fmap groups
"""
session_parent, session_basename = op.split(session_path.rstrip(op.sep))
if session_basename.startswith('ses-'):
prefix = op.split(session_parent)[1] + '_' + session_basename
else:
prefix = session_basename
# 1) Simulate the file structure for a session:
# Generate some random ShimSettings:
dwi_shims = ['{0:.4f}'.format(random()) for i in range(SHIM_LENGTH)]
func_shims_A = ['{0:.4f}'.format(random()) for i in range(SHIM_LENGTH)]
func_shims_B = ['{0:.4f}'.format(random()) for i in range(SHIM_LENGTH)]
# Dict with the file structure for the session:
# -dwi:
dwi_struct = {
'{p}_acq-A_run-{r}_dwi.nii.gz'.format(p=prefix, r=runNo): '' for runNo in [1, 2]
}
dwi_struct.update({
'{p}_acq-A_run-{r}_dwi.json'.format(p=prefix, r=runNo): {'ShimSetting': dwi_shims} for runNo in [1, 2]
})
# -func:
func_struct = {
'{p}_acq-{a}_bold.nii.gz'.format(p=prefix, a=acq): '' for acq in ['A', 'B', 'unmatched']
}
func_struct.update({
'{p}_acq-A_bold.json'.format(p=prefix): {'ShimSetting': func_shims_A},
'{p}_acq-B_bold.json'.format(p=prefix): {'ShimSetting': func_shims_B},
'{p}_acq-unmatched_bold.json'.format(p=prefix): {
'ShimSetting': ['{0:.4f}'.format(random()) for i in range(SHIM_LENGTH)]
},
})
# -fmap:
# * Case 1 in https://bids-specification.readthedocs.io/en/stable/04-modality-specific-files/01-magnetic-resonance-imaging-data.html#fieldmap-data
fmap_struct = {
'{p}_acq-case1_{s}.nii.gz'.format(p=prefix, s=suffix): ''
for suffix in ['phasediff', 'magnitude1', 'magnitude2']
}
expected_fmap_groups = {
'{p}_acq-case1'.format(p=prefix): [
'{p}_acq-case1_phasediff.json'.format(p=op.join(session_path, 'fmap', prefix))
]
}
fmap_struct.update({
'{p}_acq-case1_phasediff.json'.format(p=prefix): {'ShimSetting': dwi_shims}
})
# * Case 2:
fmap_struct.update({
'{p}_acq-case2_{s}.nii.gz'.format(p=prefix, s=suffix): ''
for suffix in ['magnitude1', 'magnitude2', 'phase1', 'phase2']
})
expected_fmap_groups.update({
'{p}_acq-case2'.format(p=prefix): [
'{p}_acq-case2_phase{n}.json'.format(
p=op.join(session_path, 'fmap', prefix), n=n
) for n in [1, 2]
]
})
fmap_struct.update({
'{p}_acq-case2_phase{n}.json'.format(p=prefix, n=n): {'ShimSetting': func_shims_A}
for n in [1, 2]
})
# * Case 3:
fmap_struct.update({
'{p}_acq-case3_{s}.nii.gz'.format(p=prefix, s=suffix): ''
for suffix in ['magnitude', 'fieldmap']
})
expected_fmap_groups.update({
'{p}_acq-case3'.format(p=prefix): [
'{p}_acq-case3_fieldmap.json'.format(p=op.join(session_path, 'fmap', prefix))
]
})
fmap_struct.update({
'{p}_acq-case3_fieldmap.json'.format(p=prefix): {'ShimSetting': func_shims_B}
})
# structure for the full session (init the OrderedDict as a list to preserve order):
session_struct = OrderedDict([
('fmap', fmap_struct),
('dwi', dwi_struct),
('func', func_struct),
])
# add "_scans.tsv" file to the session_struct
scans_file_content = generate_scans_tsv(session_struct)
session_struct.update({'{p}_scans.tsv'.format(p=prefix): scans_file_content})
create_tree(session_path, session_struct)
# 2) Now, let's create a dict with the fmap groups compatible for each run
# -dwi: each of the runs (1, 2) is compatible with case1 fmap:
expected_compatible_fmaps = {
'{p}_acq-A_run-{r}_dwi.json'.format(p=op.join(session_path, 'dwi', prefix), r=runNo): {
key: val for key, val in expected_fmap_groups.items() if key in [
'{p}_acq-case1'.format(p=prefix)
]
}
for runNo in [1, 2]
}
# -func: acq-A is compatible w/ fmap case2; acq-B w/ fmap case3
expected_compatible_fmaps.update({
'{p}_acq-{a}_bold.json'.format(p=op.join(session_path, 'func', prefix), a=acq): {
key: val for key, val in expected_fmap_groups.items() if key in [
'{p}_acq-case{c}'.format(p=prefix, c=caseNo)
]
}
for caseNo, acq in {'2': 'A', '3': 'B'}.items()
})
# -func (cont): acq-unmatched is empty
expected_compatible_fmaps.update({
'{p}_acq-unmatched_bold.json'.format(p=op.join(session_path, 'func', prefix)): {}
})
# 3) Now, let's create a dict with what we expect for the "IntendedFor":
sub_match = re.findall('(sub-([a-zA-Z0-9]*))', session_path)
sub_str = sub_match[0][0]
expected_prefix = session_path.split(sub_str)[-1].split(op.sep)[-1]
# dict, with fmap names as keys and the expected "IntendedFor" as values.
expected_result = {
'{p}_acq-case1_{s}.json'.format(p=prefix, s='phasediff'):
[op.join(expected_prefix, 'dwi', '{p}_acq-A_run-{r}_dwi.nii.gz'.format(p=prefix, r=r)) for r in [1, 2]]
}
expected_result.update({
'{p}_acq-case2_phase{n}.json'.format(p=prefix, n=n):
# populate_intended_for writes lists:
[op.join(expected_prefix, 'func', '{p}_acq-A_bold.nii.gz'.format(p=prefix))]
for n in [1, 2]
})
expected_result.update({
'{p}_acq-case3_fieldmap.json'.format(p=prefix):
# populate_intended_for writes lists:
[op.join(expected_prefix, 'func', '{p}_acq-B_bold.nii.gz'.format(p=prefix))]
})
return session_struct, expected_result, expected_fmap_groups, expected_compatible_fmaps
# Test cases:
# A) pepolar fmaps with ShimSetting in json files
# B) same, with no ShimSetting
# C) magnitude/phase, with ShimSetting
@pytest.mark.parametrize(
"simulation_function", [create_dummy_pepolar_bids_session,
create_dummy_no_shim_settings_bids_session,
create_dummy_magnitude_phase_bids_session]
)
def test_find_fmap_groups(tmpdir, simulation_function):
""" Test for find_fmap_groups """
folder = op.join(str(tmpdir), 'sub-foo')
_, _, expected_fmap_groups, _ = simulation_function(folder)
fmap_groups = find_fmap_groups(op.join(folder, 'fmap'))
assert fmap_groups == expected_fmap_groups
# Test cases:
# A) pepolar fmaps with ShimSetting in json files
# B) same, with no ShimSetting
# C) magnitude/phase, with ShimSetting
@pytest.mark.parametrize(
"simulation_function", [create_dummy_pepolar_bids_session,
create_dummy_no_shim_settings_bids_session,
create_dummy_magnitude_phase_bids_session]
)
def test_find_compatible_fmaps_for_run(tmpdir, simulation_function):
"""
Test find_compatible_fmaps_for_run.
Parameters:
----------
tmpdir
simulation_function : function
function to create the directory tree and expected results
"""
folder = op.join(str(tmpdir), 'sub-foo')
_, _, expected_fmap_groups, expected_compatible_fmaps = simulation_function(folder)
for modality in ['anat', 'dwi', 'func']:
for json_file in glob(op.join(folder, modality, '*.json')):
compatible_fmaps = find_compatible_fmaps_for_run(
json_file,
expected_fmap_groups,
matching_parameters='Shims'
)
assert compatible_fmaps == expected_compatible_fmaps[json_file]
# Test two scenarios for each case:
# -study without sessions
# -study with sessions
# Cases:
# A) pepolar fmaps with ShimSetting in json files
# B) same, with no ShimSetting
# C) magnitude/phase, with ShimSetting
@pytest.mark.parametrize(
"folder, expected_prefix, simulation_function", [
(folder, expected_prefix, sim_func)
for folder, expected_prefix in zip(['no_sessions/sub-1', 'sessions/sub-1/ses-pre'], ['', 'ses-pre'])
for sim_func in [create_dummy_pepolar_bids_session,
create_dummy_no_shim_settings_bids_session,
create_dummy_magnitude_phase_bids_session]
]
)
def test_find_compatible_fmaps_for_session(tmpdir, folder, expected_prefix, simulation_function):
"""
Test find_compatible_fmaps_for_session.
Parameters:
----------
tmpdir
simulation_function : function
function to create the directory tree and expected results
"""
session_folder = op.join(str(tmpdir), folder)
_, _, _, expected_compatible_fmaps = simulation_function(session_folder)
compatible_fmaps = find_compatible_fmaps_for_session(session_folder, matching_parameters='Shims')
assert compatible_fmaps == expected_compatible_fmaps
# Test two scenarios for each case:
# -study without sessions
# -study with sessions
# Cases:
# A) pepolar fmaps with ShimSetting in json files
# B) same, with no ShimSetting
# C) magnitude/phase, with ShimSetting
@pytest.mark.parametrize(
"folder, expected_prefix, simulation_function", [
(folder, expected_prefix, sim_func)
for folder, expected_prefix in zip(['no_sessions/sub-1', 'sessions/sub-1/ses-pre'], ['', 'ses-pre'])
for sim_func in [create_dummy_pepolar_bids_session,
create_dummy_no_shim_settings_bids_session,
create_dummy_magnitude_phase_bids_session]
]
)
def test_select_fmap_from_compatible_groups(tmpdir, folder, expected_prefix, simulation_function):
"""Test select_fmap_from_compatible_groups"""
session_folder = op.join(str(tmpdir), folder)
_, _, _, expected_compatible_fmaps = simulation_function(session_folder)
for json_file, fmap_groups in expected_compatible_fmaps.items():
for criterion in AllowedCriteriaForFmapAssignment:
if not op.dirname(json_file).endswith('fmap'):
selected_fmap = select_fmap_from_compatible_groups(
json_file,
fmap_groups,
criterion=criterion
)
# when the criterion is 'First', you should get the first of
# the compatible_fmaps (for that json_file), if it is 'Closest',
# it should be the last one (the fmaps are "run" at the
# beginning of the session)
if selected_fmap:
if criterion == 'First':
assert selected_fmap == sorted(expected_compatible_fmaps[json_file])[0]
elif criterion == 'Closest':
assert selected_fmap == sorted(expected_compatible_fmaps[json_file])[-1]
else:
assert not expected_compatible_fmaps[json_file]
# Test two scenarios for each case:
# -study without sessions
# -study with sessions
# Cases:
# A) pepolar fmaps with ShimSetting in json files
# B) same, with no ShimSetting
# C) magnitude/phase, with ShimSetting
@pytest.mark.parametrize(
"folder, expected_prefix, simulation_function", [
(folder, expected_prefix, sim_func)
for folder, expected_prefix in zip(['no_sessions/sub-1', 'sessions/sub-1/ses-pre'], ['', 'ses-pre'])
for sim_func in [create_dummy_pepolar_bids_session,
create_dummy_no_shim_settings_bids_session,
create_dummy_magnitude_phase_bids_session]
]
)
def test_populate_intended_for(tmpdir, folder, expected_prefix, simulation_function):
"""
Test populate_intended_for.
Parameters:
----------
tmpdir
folder : str or os.path
path to BIDS study to be simulated, relative to tmpdir
expected_prefix : str
expected start of the "IntendedFor" elements
simulation_function : function
function to create the directory tree and expected results
"""
session_folder = op.join(str(tmpdir), folder)
session_struct, expected_result, _, _ = simulation_function(session_folder)
populate_intended_for(session_folder, matching_parameters='Shims', criterion='First')
# Now, loop through the jsons in the fmap folder and make sure it matches
# the expected result:
fmap_folder = op.join(session_folder, 'fmap')
for j in session_struct['fmap'].keys():
if j.endswith('.json'):
assert j in expected_result.keys()
data = load_json(op.join(fmap_folder, j))
if expected_result[j]:
assert data['IntendedFor'] == expected_result[j]
# Also, make sure the run with random shims is not here:
# (It is assured by the assert above, but let's make it
# explicit)
run_prefix = j.split('_acq')[0]
assert '{p}_acq-unmatched_bold.nii.gz'.format(p=run_prefix) not in data['IntendedFor']
else:
assert 'IntendedFor' not in data.keys()
| 38.126367 | 153 | 0.618969 |
16532b04692155bf7f8eb5cfafb62618641d6ef5 | 16,129 | py | Python | simulation/Growth_rate_under_both_effect_figure4AC.py | YuanxiaoGao/Evolution_of_reproductive_strategies_in_incipient_multicellularity | 13eb51639fcee630a76e197b50ef321e3a94ce0f | [
"MIT"
] | null | null | null | simulation/Growth_rate_under_both_effect_figure4AC.py | YuanxiaoGao/Evolution_of_reproductive_strategies_in_incipient_multicellularity | 13eb51639fcee630a76e197b50ef321e3a94ce0f | [
"MIT"
] | null | null | null | simulation/Growth_rate_under_both_effect_figure4AC.py | YuanxiaoGao/Evolution_of_reproductive_strategies_in_incipient_multicellularity | 13eb51639fcee630a76e197b50ef321e3a94ce0f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 16 11:56:54 2019
@author: gao
"""
#-*-encoding:utf-8 -*-
################################################################
# 2018-06-25 #
################################################################
""" code description:
Aim: get one given life cycle's growth rate-grate'
Parameters and Variables:
C: [# of germ cells, # of soma cells]=[defectors,ccooperactors] (np array: int elements)
T: time for one times disvision, totally depends on cell composition of last stepself.
essentially based on payoff on colony level.
P: probability for each cell type to divide.
np.array([pi,pj]), which decided by cell payoff (composition).
m: type switching probability.
b: benefits for gemrs.
c: costs for somas.
w: synergy or discounting effects.
W_i: intensity of selection.
z: simulation times for each trajectory.
grate: growth rate lambda.
x0: the initial guessing root.
---------------- """
import numpy as np
import operator as op
from functools import reduce
from scipy.misc import derivative
import sys
#------------------------------------------------------------------------------------------------------------
'''import all lcs; which is total 128 lcs for M <=10'''
with open("../simulation/LC.txt", "r") as file:
lcs = eval(file.readline()) # read lc list
num_lcs=len(lcs) # number of lcs
#------------------------------------------------------------------------------------------------------------
'''Parameter values b, c, m, k, Wi, in which k is the number of the cooperators--soma'''
'''exhaustive cluster parameters'''
t_pterb_cluster=int(sys.argv[1]) # from 0 to grid_num grid_num points
k_cluster=int(sys.argv[2]) # two value +-1 to make the two figures
i_th=int(sys.argv[3]) # i_th lc in lcc len(lc_data)
# i_th (0,7) from 0 to 6 = M<=4
'''transform cluster parameters into local parameters'''
grid_num=7 # grid size in figure, so check figure.py file first
grid=np.linspace(1,7,num=grid_num,endpoint=True)
ti=grid[t_pterb_cluster]
k=grid[k_cluster]
lc=lcs[i_th]
'''constant parameter'''
z=int(5000) # simulation times for each trajectory
b=10
c=1
Wi=0.1 # fixed intensity of selection
m=0.01
chi_ratio=0.4
#------------------------------------------------------------------------------------------------------------
'''find each lc's newborn compositions and crutial size for fragment;
Return:
1-- number of newborn state (int)
2-- newborn Composition (np.ndarray),
3-- group max size (int) for reproduction,
4-- offspring number of offspring group size(list): [#of 1 cell, #of 2 cells,....]
'''
def Newborn(lc): # lc is a life cycle in list form such as [1,1,2]
size_lc=int(sum(lc)) # max group size M = fragemnt size
#------- composition of all newborn states
offtype=list(set(lc)) # how many d
newborn=[] # newborn composition
for i in range(len(offtype)):
for j in range(offtype[i]+1):
newborn.append(np.array([offtype[i]-j,j]))
num_newbornstate=len(newborn) # number of newborn state
#------- offspring number of every offspring types,e.g how many 1 cells produced....
num_offtype=[]
for i in range(1,size_lc):
num_offtype.append(lc.count(i))
off_num_cell=np.sum(np.vstack(newborn),axis=1)
return num_newbornstate,np.vstack(newborn),size_lc,num_offtype,off_num_cell
num_newbornstate,newbornstate,size_lc,num_offtype,num_cell_newborn = Newborn(lc)
#------------------------------------------------------------------------------------------------------------
#---------- mode 2 ---------volunteer game
''' volunteer game: cooperators get b-c from C and D, while defectors get b (is at least there
is one cooperator, and get 0 from defectors); b>c.
Cell payoff Pay; germ are defectors Return pay_germ,pay_soma
C--colony composition [defectors,cooperactors]
b--benefit for germs
c--costs for somas
'''
def Pay(C,b,c,k):
if C[1]>=k: # defectors get b IF existing at least w cooperators
pay_g=b
else:
pay_g=0 # defectors get 0
pay_s=pay_g-c # cooperactor gets b-c
return pay_g,pay_s
#------------------------------------------------------------------------------------------------------------
'''fitness by usring e^payoff; Return f_germ,f_soma
C--colony composition [defectors,ccooperactors]
Wi--intensity of selection
WARNNING: here we may calculate the non-exist cells' fitness, but it doesn't play a role later.
'''
def Fitness(C,Wi):
p_g,p_s=Pay(C,b,c,k) # cll payoff
f_g=np.exp(Wi*p_g) # define fitness f_g=e**(w*pay_g)
f_s=np.exp(Wi*p_s) # define fitness f_s=e**(w*pay_s)
return f_g,f_s
#------------------------------------------------------------------------------------------------------------
'''Probability P for each possible division;
Return np.array([p_a*(n**2,2*m*n,m**2),p_b*(n**2,2*m*n,m**2)]) with shape (1,6)
crossponding to [g->2g, g->g+s, g->2s, s->2s, s->s+g, s->2g]
compositions changes with [[1,0], [0,1], [-1,2], [0,1], [1,0], [2,-1]]
C--colony composition
m--mutation rate
'''
def P(C,m):
f_g,f_s=Fitness(C,Wi) # cell fitness
ratio_f_g=C[0]*f_g/(C[0]*f_g+C[1]*f_s) # proba for germs ~ f_g
ratio_f_s=C[1]*f_s/(C[0]*f_g+C[1]*f_s) # proba for somas ~ f_s
muta=np.array([(1.0-m)**2,2*m*(1.0-m),m**2]) # mutation order: no-half-both
proba=np.hstack((ratio_f_g*muta,ratio_f_s*muta))
# proba * random mutation
return proba
#------------------------------------------------------------------------------------------------------------
'''Division time T=K/<average(f)>; Return - growth time for one step'''
def CHI_equal(item):
t=np.log((item+1)/item)
return t
def T(C):
num_cell=(C[0]+C[1])
if num_cell==ti:
coef=chi_ratio*np.log((num_cell+1)/num_cell) # netural coefficient ln[i+j+1]/[i+j]
else:
coef=np.log((num_cell+1)/num_cell) # netural coefficient ln[i+j+1]/[i+j]
f_g,f_s=Fitness(C,Wi) # call fitness
time=coef*(num_cell)/(C[0]*f_g+C[1]*f_s) # C[k]=0 makes sense to the non-exist Fitness ----linear with size effects
time_s=time
return time_s
#------------------------------------------------------------------------------------------------------------
'''One times division function; Return - next cell composition np.array([g,s])'''
'''here is the only random thing we code in this file!!!!!'''
def Division(C): # a tuple after calling
#---------- which cell type to divide
p=P(C,m).tolist() # call probability and convert into list
divi_id=np.random.multinomial(1, p, size=1) # divide ID or direction
index=np.nonzero(divi_id)[1]
c_delta=np.array([[1,0],[0,1],[-1,2],[0,1],[1,0],[2,-1]])
# composition changes with P(C,m)
next_c=C+c_delta[int(index)] # composition after division
return next_c # next cell composition && probability for this division
#------------------------------------------------------------------------------------------------------------
'''One trajectory for a given nrebornstate;
Return - final C(compositon), cumulative T(time).
One_tra{Fragment[ncr]}, so structure is the following
ncr() ->Fragment() -> One trajectory()
'''
#---------- step 1 ---------
'''combination function'''
def ncr(n, r):
if r>n:
return 0.0
else:
r = min(r, n-r) # take the smaller
numer = reduce(op.mul, range(n, n-r, -1), 1) # op.mul: operator.mul(a, b)¶
denom = reduce(op.mul, range(1, r+1), 1)
return numer//denom
#---------- step 2 ---------
'''fragment function; partition composition into offspring type(newbornstate);
Return a list [#of type 1, #of type 2,....];
read more in notebook: fragment analysis
'''
def Fragment(comp): # a given colony cell composition
off_dis=[]
for i in range(num_newbornstate): # for example lc [1,2] -> 1 and 2
offsize=np.sum(newbornstate[i]) # for example above 1->[1,0] or [0,1], while 2->[2,0],[1,1] or [0,2]
i_cell=newbornstate[i][0] # for example above [1,0]->1
j_cell=newbornstate[i][1] # for example above [1,0]->0
off_i=ncr(comp[0],i_cell)*ncr(comp[1],j_cell)/ncr(np.sum(comp),offsize)
# probability for comp to get i cells offspring newbornstate[i]
off_dis.append(num_offtype[offsize-1]*off_i)
# number of getting the offspring newbornstate[i]
return off_dis
#---------- step 3 ---------
'''one trajectory from newborn to final possible offsprings.
Give one a newbornstate: np.array([g,s]);
Return
1: []--final offspring number of each newborn type;
2: float--growth time
'''
def One_tra(C_newbron): # C_newbron: newborn cell composition
cum_t=0.0 # count growth time
newbron_size=C_newbron[0]+C_newbron[1] # size of newborn
division_times=size_lc-newbron_size # how many division times left
i=0 # count division_times
while i<division_times: # division_times
next_c=Division(C_newbron)
cum_t+=T(C_newbron)
C_newbron=next_c
i+=1
offspring=Fragment(C_newbron) # call fragment function to get offspring
return offspring, cum_t
#-------------------------------------------------------------------------------------------------------------------------
'''COLLECT all matrix data; Return offtype+T for z times simulation;
M_data()=[], with length newbornstates; in which each element is a np.array with shape(z,newbornstates+1);
and in each np.array, columns corresponds to -[#of newbornstate1, #of newbornstate2,...., t]
'''
def M_data():
Matrix=[]
for new_bron in newbornstate:
#--------- one row's data with shape z*(num_newbornstate+1)
z_off=[] # list of each offspring for z-th simulations and time T
for i in range(int(z)):
offspring, cum_t=One_tra(new_bron)
offspring.insert(len(offspring),cum_t) # insert the T at the end of offtype size z*(offtype+1)
z_off.append(offspring) # put offtype+T into a list; size z*(offtype+1)
row=np.array(z_off) # convert each row data into a np.array
Matrix.append(row) # collect all row data; size (num_newbornstate*z*(offtype+1))
return Matrix # a list containning np.array, each array is a matrix of z trajectories
#-------------------------------------------------------------------------------------------------------------------------
''' Construct Q by using the simulated data above. Return rooting function
grate ----- growth rate i.e. lambda
Warning: here we use the mass of the population i.e. the number of the whole cells
'''
data = M_data() # save the simulated data in case of changing when recall afterwards
def F(grate):
Q=[]
for i in range(num_newbornstate): # i means each newbornstate
#------construct e^(-grate*T) # z is simulation times i.e. trajectories lines
e1=np.full((1,int(z)),np.exp(-1.0)) # construct [e^-1,e^-1,e^-1]
e2=np.power(e1,data[i][:,-1]) # construct [e^-T,e^-T,e^-T]
e3=np.ones((1,z))*grate # construct z [grate,grate,...]
e4=np.power(e2,e3) # construct Z [e^(-grate*T),...]
#----- get N*e^(-grate*T)
off_time=np.multiply(data[i][:,:-1],e4.reshape((z,1)))
# each simulated line * t
#----sigma all column of off_time= sigma-tao(=z) N*e^(-grate*T)
row=(np.sum(off_time,axis=0))/float(z) # get a row of Q with shape(1,num_newbornstate)
Q.append(row.tolist()) # collect all rows
Q_np=np.array(Q) # change row list into np.array()
Q1=Q_np-np.eye(num_newbornstate) # ndarray Q-I
expr=np.linalg.det(Q1) # convert into matrix for calculating det
return expr
##------------------------------------------------------------------------------------------------------------
'''Solve equation to find growth rate; Return growth rate'''
#---------- step 1 ---------
''' Estimate the max lambda by finding the minimum time '''
t_row_min=[]
t_row_max=[]
for i in range(num_newbornstate):
t_row_min.append(np.amin(data[i][:,-1]))
t_row_max.append(np.amax(data[i][:,-1]))
T_min=min(t_row_min) # min time
T_max=max(t_row_max) # max time
x0=(np.log(sum(lc)))/T_min+0.1 # the first root guess -- right boundary
x_mini=(np.log(2))/T_max-0.1
root_step=1e-3 # sign of the right boundary
step=(x0-x_mini)/root_step +1 # for later check the f0 f1 having the same sign or not
#---------- step 2 ---------
''' methods1: Fine single roots by using Bisection'''
''' here the bisection cannot work because the maximum roots are the double roots!!!!!'''
def Find_single_root(func,x): # x0 is the first root guess
#--find the root left and right boundaries by setting the right first
f0=np.sign(func(x)) # sign of the first try
f1=np.sign(func(x-root_step))
#------find the max root boundary to the left
n=0
while f0*f1>0 and (x-n*root_step)>=x_mini:
f0=np.sign(func(x-n*root_step)) # right
f1=np.sign(func(x-(n+1)*root_step)) # left
n+=1
#---- cannot find the single roots
if (x-n*root_step)<=x_mini:
return None, None
#----- can find the single roots
else:
if f0*f1 !=0:
left=x-n*root_step
right=x-(n-1)*root_step
#------find the root between boundary (left, right) by bisection
while abs(left-right)>10**(-14):
left_sign=np.sign(func(left)) # left sign
mean=(left+right)/2
mean_sign=np.sign(func(mean)) # middle sign
if left_sign*mean_sign>0: # left and middle are the same sign
left=mean
else:
right=mean
elif f0==0:
mean=x-(n-1)*root_step # since n add extra 1 after f0 anf f1, here should remove it
elif f1==0:
mean=x-n*root_step
return mean, n
''' methods2: Fine double roots by using derivative '''
#--first derivative
def F_d(x): # derivative of f
f_d=derivative(F, x, dx=1e-6)
return f_d
def Find_double_root(x): # x0 is the first root guess
single_root,n=Find_single_root(F_d,x) # find the first deriviate=0 of the function
root0=1
while single_root is not None:
n0=n
if abs(F(single_root))<10**(-5): # first deriviate=0 is also the root
break
else: # if the first deriviate is not the root
new_single_root,new_n=Find_single_root(F_d,x-n0*root_step)
if new_single_root is None: # no double roots
root0=0
break
else:
single_root,n=new_single_root,new_n+n0
if root0==1:
return single_root
else:
return None
#------------------------------------------------------------------------------------------------------------
'''output result'''
single_root,n=Find_single_root(F,x0)
if single_root is not None:
root=single_root
else:
double_root=Find_double_root(x0)
root=double_root
with open('data/%d_%d_%d.txt'%(t_pterb_cluster,k_cluster,i_th), 'w') as f:
f.write(str(single_root))
| 38.130024 | 128 | 0.539339 |
bb52d97991aa92b0f056065eb4ebd1266985edfa | 10,555 | py | Python | snn_lib/utilities.py | zhongyuchen/snn-iir | 58f0cb4a0dfcf5be630543fdfd88741f6061bcac | [
"Apache-2.0"
] | 5 | 2021-03-10T11:57:43.000Z | 2022-03-02T13:15:45.000Z | snn_lib/utilities.py | zhongyuchen/snn-iir | 58f0cb4a0dfcf5be630543fdfd88741f6061bcac | [
"Apache-2.0"
] | null | null | null | snn_lib/utilities.py | zhongyuchen/snn-iir | 58f0cb4a0dfcf5be630543fdfd88741f6061bcac | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
# File Name : utilities.py
# Author: Haowen Fang
# Email: [email protected]
# Description: utility functions.
"""
import random
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import filters
import matplotlib
import torch
from torch.utils.data import Dataset, DataLoader
# matplotlib.use('Qt5Agg')
def generate_rand_pattern(pattern_num, synapse_num, length, min_spike_num, max_spike_num):
"""
Create random test case. Each pattern belongs to different class
Each test case has multiple spike trains, corresponding to different synapse.
1 indicates a spike, 0 indicates no spike.
pattern_num: number of random patterns
synapse_num: number of spike trains of each pattern
length: length of patterns
min_spike_num: minimum number of spikes in each spike train
max_spike_num: maximum number of spikes in each spike train
if min_spike_num == max_spike_num, all spike trains have same number of spikes
x_train: [pattern_idx, synapse_num, time]
y_train_onehot: [pattern_num, pattern_num], one hot label
y_train_cat: [pattern_number], categorical label
"""
x_train = np.zeros([pattern_num, synapse_num, length], dtype=np.float32)
y_train_onehot = np.zeros([pattern_num, pattern_num], dtype=np.float32)
y_train_cat = np.zeros(pattern_num, dtype=np.float32)
for i in range(pattern_num):
for j in range(synapse_num):
spike_number = random.randint(min_spike_num, max_spike_num)
spike_time = random.sample(range(length), spike_number)
x_train[i, j, spike_time] = 1
y_train_onehot[i, i] = 1
y_train_cat[i] = i
return x_train, y_train_onehot, y_train_cat
def filter_spike(spike_train, filter_type='exp', tau_m=10, tau_s=2.5,
normalize=True):
"""
generate filtered spike train
spike_train: 1d array, 1 represents spike
filter_type: exp or dual_exp
tau_m: time constant used by dual_exp
tau_s: time constant used by exp and dual exp
"""
length = len(spike_train)
eta = tau_m / tau_s
v_0 = np.power(eta, eta / (eta - 1)) / (eta - 1)
psp_m = 0
psp_s = 0
target_pattern = np.zeros([1, length], dtype=np.float32)
if filter_type == 'dual_exp':
for i in range(length):
psp_m = psp_m * np.exp(-1 / tau_m) + spike_train[i]
psp_s = psp_s * np.exp(-1 / tau_s) + spike_train[i]
if normalize:
target_pattern[0, i] = (psp_m - psp_s) * v_0
else:
target_pattern[0, i] = (psp_m - psp_s)
elif filter_type == 'exp':
for i in range(length):
psp_s = psp_s * np.exp(-1 / tau_s) + spike_train[i]
target_pattern[0, i] = psp_s
return target_pattern
def filter_spike_multiple(spike_trains, filter_type='exp', tau_m=10, tau_s=2.5,
normalize=True):
"""
create filtered spike train for a batch
spike_train_batch[number of spike_trains, time]
"""
spike_train_num, time = spike_trains.shape
filtered_spikes = np.zeros(spike_trains.shape, dtype=np.float32)
# for each spike train in the instance
for i in range(spike_train_num):
filtered_spikes[i] = filter_spike(spike_trains[i], filter_type=filter_type,
tau_m=tau_m,tau_s=tau_s, normalize=normalize)
return filtered_spikes
def mutate_spike_pattern(template_pattern, mean, sigma):
"""
create new spike pattern based on provided template, jitter follows normal distribution
:param template_pattern: 2d array[input_dimension, time]
:param mean: mean of normal distribution
:param sigma: standard deviation of normal distribution
:return: 2d array [input_dimension, time]
"""
input_size, length = template_pattern.shape
mutated_pattern = np.zeros([input_size, length],dtype=np.float32)
input_idx, spike_time = np.where(template_pattern != 0)
delta_t = np.rint(np.random.normal(mean, sigma, spike_time.shape)).astype(int)
mutated_spike_time = spike_time + delta_t
# print(delta_t)
# find the time larger than time range, set to maximum time
mutated_spike_time[np.where(mutated_spike_time >= length)] = length - 1
# find the time less than 0, set to 0
mutated_spike_time[np.where(mutated_spike_time < 0)] = 0
mutated_pattern[input_idx, mutated_spike_time] = 1
return mutated_pattern
def plot_raster(spike_mat, **kwargs):
"""
spike_mat[row, time]
"""
neuron_idx, spike_time = np.where(spike_mat != 0)
# plt.figure()
plt.plot(spike_time, neuron_idx, linestyle='None', marker='|', **kwargs)
# print(**kwargs)
if 'label' in kwargs:
plt.legend(loc='upper right', fontsize='x-large')
# plt.show()
def plot_raster_dot(spike_mat, label=False):
'''
another function to plot spikes
:param spike_mat: [row, length/time]
:return:
'''
h,w = spike_mat.shape
plt.figure()
point_coordinate = np.where(spike_mat != 0)
plt.scatter(point_coordinate[1], point_coordinate[0], s=1.5)
plt.gca().invert_yaxis()
plt.gca().set_xlim([0, w])
plt.gca().set_ylim([0, h])
if label is True:
plt.xlabel('time')
plt.ylabel('input spike train index')
def gaussian_filter_spike_train(spike_train, sigma):
"""
create a spike probability over time
:param spike_train: 1d array[time]
:param sigma:
:return: spike probability, 1d array[time]
"""
spike_probability = filters.gaussian_filter(spike_train, sigma, mode='constant', cval=0)
return spike_probability.astype(np.float32)
def gaussian_filter_spike_train_batch(spike_train_batch, sigma):
"""
:param spike_trains: 3d array [pattern_id, spike_train_id, time]
:param sigma:
:return:
"""
batch_size, spike_train_num, time = spike_train_batch.shape
filtered_spike_batch = np.zeros(spike_train_batch.shape, dtype=np.float32)
for i in range(batch_size):
for j in range(spike_train_num):
filtered_spike_batch[i, j] = gaussian_filter_spike_train(spike_train_batch[i, j], sigma)
return filtered_spike_batch
class RandPatternDataset(Dataset):
"""random pattern dataser"""
def __init__(self, dataset_path, label_path, transform=None):
self.dataset = np.load(dataset_path)
self.dataset = self.dataset.astype(np.float32)
self.label = np.load(label_path)
self.transform = transform
def __len__(self):
return self.dataset.shape[0]
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
return self.dataset[idx],self.label[idx]
class monitor():
def __init__(self, snn_model, batch_size, length):
'''
:param snn_model:
:param batch_size:
:param length:
'''
self.v = torch.zeros([batch_size, snn_model.neuron_number, length])
self.spike = torch.zeros([batch_size, snn_model.neuron_number, length])
self.filtered_spike = torch.zeros([batch_size, snn_model.neuron_number, length])
self.reset_v = torch.zeros([batch_size, snn_model.neuron_number, length])
self.v_0 = snn_model.v_0
self.step_counter = 0
def record_dict(self, spike, states):
self.spike[:, :, self.step_counter] = spike
self.filtered_spike[:, :, self.step_counter] = (states["filter_m"] - states["filter_s"]) * self.v_0
self.v[:, :, self.step_counter] = states["v"]
self.reset_v[:, :, self.step_counter] = states["reset_v"]
self.step_counter += 1
def record(self, spike, v, reset_v, filter_m, filter_s):
self.spike[:, :, self.step_counter] = spike
self.filtered_spike[:, :, self.step_counter] = (filter_m-filter_s) * self.v_0
self.v[:, :, self.step_counter] = v
self.reset_v[:, :, self.step_counter] = reset_v
self.step_counter += 1
def float_to_spike_train(value, spike_train_length):
'''
convert a floating value to a spike train
:param value: a floating value in [0,1.0]
:param spike_train_length: length of spike train
:return: spike_train: [spike_train_length]
'''
spike_train = np.zeros(spike_train_length)
spike_number = int(value*spike_train_length)
ticks = np.linspace(0,spike_train_length,num = spike_number, endpoint=False, dtype=np.int)
spike_train[ticks] = 1
return spike_train
if __name__ == '__main__':
random.seed(0)
np.random.seed(0)
template_num = 10
synapse_num = 40
length = 200
# test generate_rand_pattern
spike_train_template, labels_onehot, labels_cat = generate_rand_pattern(10, 40, 200, 5, 10)
# mutate spike pattern
new_patterns = mutate_spike_pattern(spike_train_template[0], 0, 0.5)
#plot new pattern and template pattern to see if they are similar
plot_raster(new_patterns)
plot_raster(spike_train_template[0])
# for each spike train template, mutate it to create 100 spike trains
mutate_num = 100
# test_cases = np.zeros(mutate_num*template_num, synapse_num, length)
# test_cases_label_onehot = np.zeros([mutate_num*template_num,template_num])
# test_cases_label_cat = np.zeros(mutate_num * template_num)
test_cases = []
test_cases_label_onehot = []
test_cases_label_cat = []
filtered_target = []
for template_idx, template in enumerate(spike_train_template):
for j in range(mutate_num):
test_cases.append(mutate_spike_pattern(template, 0, 0.5))
test_cases_label_onehot.append(labels_onehot[template_idx])
test_cases_label_cat.append(labels_cat[template_idx])
target = np.zeros([template_num, length])
target[template_idx, 10+template_idx*18] = 1
filtered_target.append(filter_spike_multiple(target, filter_type='dual_exp', tau_m=10, tau_s=2.5))
test_cases = np.stack(test_cases)
test_cases_label_cat = np.stack(test_cases_label_cat)
test_cases_label_onehot = np.stack(test_cases_label_onehot)
filtered_target = np.stack(filtered_target)
np.save("test_cases.npy", test_cases)
np.save("test_case_label_onehot", test_cases_label_onehot)
np.save("test_case_label_cat", test_cases_label_cat)
np.save("filtered_target", filtered_target)
for i in range(10):
plt.plot(filtered_target[mutate_num,i])
plt.show()
| 33.29653 | 110 | 0.672383 |
58a33dea0320154d39784a61316ecc953e1a009c | 1,682 | py | Python | data-science-onramp/data-ingestion/noxfile_config.py | InstantDomain/python-docs-samples | f8e293c722998b269da38b7fe11b98aae8932b8f | [
"Apache-2.0"
] | null | null | null | data-science-onramp/data-ingestion/noxfile_config.py | InstantDomain/python-docs-samples | f8e293c722998b269da38b7fe11b98aae8932b8f | [
"Apache-2.0"
] | null | null | null | data-science-onramp/data-ingestion/noxfile_config.py | InstantDomain/python-docs-samples | f8e293c722998b269da38b7fe11b98aae8932b8f | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default TEST_CONFIG_OVERRIDE for python repos.
# You can copy this file into your directory, then it will be imported from
# the noxfile.py.
# The source of truth:
# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py
TEST_CONFIG_OVERRIDE = {
# You can opt out from the test for specific Python versions.
# There's no google-cloud-bigquery package for Python 3.9.
"ignored_versions": ["2.7", "3.6", "3.9"],
# Old samples are opted out of enforcing Python type hints
# All new samples should feature them
"enforce_type_hints": False,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
"gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
"envs": {},
}
| 42.05 | 88 | 0.737218 |
28dde302535868dd71d4a15da8a260061482dccd | 1,474 | py | Python | tests/sentry/api/endpoints/test_group_tagkey_details.py | uandco/sentry | 5b8d45cb71c6617dac8e64265848623fbfce9c99 | [
"BSD-3-Clause"
] | 2 | 2019-03-04T12:45:54.000Z | 2019-03-04T12:45:55.000Z | tests/sentry/api/endpoints/test_group_tagkey_details.py | uandco/sentry | 5b8d45cb71c6617dac8e64265848623fbfce9c99 | [
"BSD-3-Clause"
] | 196 | 2019-06-10T08:34:10.000Z | 2022-02-22T01:26:13.000Z | tests/sentry/api/endpoints/test_group_tagkey_details.py | uandco/sentry | 5b8d45cb71c6617dac8e64265848623fbfce9c99 | [
"BSD-3-Clause"
] | 1 | 2017-02-09T06:36:57.000Z | 2017-02-09T06:36:57.000Z | from __future__ import absolute_import
import six
from sentry import tagstore
from sentry.testutils import APITestCase
class GroupTagDetailsTest(APITestCase):
def test_simple(self):
group = self.create_group()
group.data['tags'] = (['foo', 'bar'], )
group.save()
key, value = group.data['tags'][0]
tagkey = tagstore.create_tag_key(
project_id=group.project_id,
environment_id=None,
key=key,
values_seen=2
)
tagstore.create_tag_value(
project_id=group.project_id,
environment_id=None,
key=key,
value=value,
times_seen=4
)
tagstore.create_group_tag_key(
project_id=group.project_id,
group_id=group.id,
environment_id=None,
key=key,
values_seen=1,
)
tagstore.create_group_tag_value(
project_id=group.project_id,
group_id=group.id,
environment_id=None,
key=key,
value=value,
times_seen=3,
)
self.login_as(user=self.user)
url = u'/api/0/issues/{}/tags/{}/'.format(group.id, tagkey.key)
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert response.data['key'] == six.text_type(tagkey.key)
assert response.data['totalValues'] == 3
| 28.346154 | 71 | 0.571913 |
2130df02db838b0b8a27714d23b0f5d3a79e5cd2 | 38,850 | py | Python | src/optopus/_old.py | hindman/optopus | daaba31c6b1bd4f56e442326e36f7b3ea0b74b15 | [
"MIT"
] | 2 | 2021-05-04T23:44:42.000Z | 2021-07-25T20:45:33.000Z | src/optopus/_old.py | hindman/optopus | daaba31c6b1bd4f56e442326e36f7b3ea0b74b15 | [
"MIT"
] | null | null | null | src/optopus/_old.py | hindman/optopus | daaba31c6b1bd4f56e442326e36f7b3ea0b74b15 | [
"MIT"
] | null | null | null | import json
import re
import sys
import textwrap
from collections import defaultdict, OrderedDict
from six.moves.collections_abc import Iterable
from copy import deepcopy
from itertools import product
################
# Constants.
################
PATTERNS = dict(
simple = dict(
long_opt = r'--(\w[\w\-]*)',
short_opts = r'-(\w+)',
short_opt = r'-(\w)',
opt_arg = r'([A-Z][A-Z\d]*)',
pos_arg = r'\<([\w]+)\>',
),
)
PATTERNS['anchored'] = {
k : r'\A' + v + r'\Z'
for k, v in PATTERNS['simple'].items()
}
N_ZERO = 0
N_ONE = 1
N_MAX = 999999
ZERO_TUPLE = (N_ZERO, N_ZERO)
ONE_TUPLE = (N_ONE, N_ONE)
ZERO_OR_ONE_TUPLE = (N_ZERO, N_ONE)
ANY_TUPLE = (N_ZERO, N_MAX)
OPT_PREFIX = '-'
UNDERSCORE = '_'
WILDCARD_OPTION = '*'
LONG_OPT_PREFIX = OPT_PREFIX + OPT_PREFIX
SHORT_OPT_PREFIX = OPT_PREFIX
OPT_SPEC_STRIP_CHARS = OPT_PREFIX + '<>'
# Token types
WHITESPACE = 'WHITESPACE'
LONG_OPT = 'LONG_OPT'
SHORT_OPT = 'SHORT_OPT'
POS_OPT = 'POS_OPT'
OPT_ARG = 'OPT_ARG'
EOF = 'EOF'
# Regex components.
PATT_END = r'(?=\s|$)'
PATT_OPT_CHAR = r'[\w\-]+'
# Token types:
# - The type.
# - Whether the RegexLexer should emit the tokens of this type.
# - The regex to match the token.
# - TODO: should create a TokenType data object.
SIMPLE_SPEC_TOKENS = (
(WHITESPACE, False, re.compile(r'\s+')),
(LONG_OPT, True, re.compile(r'--' + PATT_OPT_CHAR + PATT_END)),
(SHORT_OPT, True, re.compile(r'-' + PATT_OPT_CHAR + PATT_END)),
(POS_OPT, True, re.compile(r'\<' + PATT_OPT_CHAR + r'\>' + PATT_END)),
(OPT_ARG, True, re.compile(r'[A-Z\d_\-]+' + PATT_END)),
)
################
# Parser.
################
class Parser(object):
'''
'''
VALID_KWARGS = {
'opts',
'simple_spec',
'wildcards',
'sections',
'formatter_config',
'program',
'add_help',
}
def __init__(self, *xs, **kws):
# This signature is bad for documentation.
for k in kws:
if k not in self.VALID_KWARGS:
fmt = 'Parser(): invalid keyword argument: {}'
msg = fmt.format(k)
raise OptoPyError(msg)
self.simple_spec = kws.get('simple_spec', None)
self.wildcards = kws.get('wildcards', None)
self.sections = kws.get('sections', None)
self.formatter_config = kws.get('formatter_config', FormatterConfig())
self.program = kws.get('program', None)
self.add_help = kws.get('add_help', False)
if self.simple_spec:
ssp = SimpleSpecParser(self.simple_spec)
self.opts = []
for otok in ssp.parse():
o = Opt(otok.option_spec)
o.option = otok.option
o.nargs = otok.nargs
o.arg_names = otok.arg_names
self.opts.append(o)
else:
opts = list(xs) + list(kws.get('opts', []))
self.opts = []
for x in opts:
if isinstance(x, Opt):
opt = x
elif isinstance(x, dict):
opt = Opt(**x)
else:
fmt = 'Parser(): invalid Opt: must be Opt or dict: {}'
msg = fmt.format(x)
raise OptoPyError(msg)
self.opts.append(opt)
if self.add_help:
opt = Opt('-h --help', text = 'Print help and exit.', tolerant = True)
self.opts.append(opt)
seen = set()
for o in self.opts:
nm = o.option
if nm in seen:
fmt = 'Parser(): duplicate Opt: {}'
msg = fmt.format(nm)
raise OptoPyError(msg)
else:
seen.add(nm)
def parse(self, args = None, should_exit = True, alt = False):
# If given no args, get them from sys.argv.
args = list(sys.argv[1:] if args is None else args)
# Add the wildcard Opt instances.
if self.wildcards:
self._add_wildcard_opts()
# Try to parse the args.
HELP = ('HELP',)
try:
if alt:
popts = self._do_alternative_parse(args)
else:
popts = self._do_parse(args)
if self.add_help and popts['help'].value:
raise OptoPyError(HELP)
return popts
except OptoPyError as e:
if should_exit:
if self.add_help and ('-h' in args or '--help' in args):
error_msg = HELP
else:
error_msg = e.args[0]
else:
raise
# If we did not return or raise above, it means an
# error occurred while parsing, and the user wanted the
# default behavior: print USAGE and exit.
if error_msg == HELP:
txt = self._get_help_text()
print(txt, end = '')
sys.exit(ExitCode.PARSE_HELP.code)
else:
txt = self._get_help_text(SectionName.USAGE, error_msg = error_msg)
print(txt, end = '')
sys.exit(ExitCode.PARSE_FAIL.code)
def _do_parse(self, args):
subphrases = [Phrase(opt = opt) for opt in self.opts]
phrase = Phrase(subphrases = subphrases)
self.parsed_options = ParsedOptions(opts = self.opts, args = args)
return phrase.parse(args, parsed_options = self.parsed_options)
def _do_alternative_parse(self, args):
subphrases = [Phrase(opt = opt) for opt in self.opts]
self.phrase = Phrase(subphrases = subphrases)
self.parsed_options = ParsedOptions(opts = self.opts, args = args)
return self.phrase.parse(args, parsed_options = self.parsed_options)
def _add_wildcard_opts(self):
self.opts.extend([
Opt('<positionals>', nargs = (N_ZERO, N_MAX)),
Opt(WILDCARD_OPTION),
])
@property
def wildcards(self):
# If user has not set the wildcards-mode, we infer it via the presense
# or absense of opts. Otherwise, we do what the user asked for.
if self._wildcards is None:
if self.simple_spec or self.opts:
return False
else:
return True
else:
return self._wildcards
@wildcards.setter
def wildcards(self, val):
if val is None:
self._wildcards = None
else:
self._wildcards = bool(val)
def help_text(self, *section_names):
return self._get_help_text(*section_names)
def _get_help_text(self, *section_names, **kws):
####
#
# Example usages:
#
# - All help-text sections, in order.
#
# p.help_text()
#
# - Specific help-text sections, in the requested order.
#
# p.help_text('usage')
# p.help_text('section-foo')
# p.help_text('section-foo', 'section-bar')
#
# Sections:
# - Declared implicitly via Opt instances.
# - Declared explicitly via FormatterConfig.
# - Defaults via SectionName.
#
# Section ordering:
# - SectionName.USAGE [unless declared in FormatterConfig]
# - FormatterConfig sections, in order
# - SectionName.POS [ditto]
# - SectionName.OPT [ditto]
#
# Issues:
# - Opt lacking sections:
# - allocate to SectionName.OPT or SectionName.POS.
#
# - FormatterConfig section lacking matching Opt instances:
# - prevent via validation
#
# Also see misc/examples/help-text.txt : API section.
#
####
####
# Setup the default sections.
####
default_sections = {
nm : Section(name = nm, label = nm.label)
for nm in SectionName
}
####
# Setup all sections that are eligible for use.
####
# A map of section names to Section instances.
all_sections = OrderedDict()
# First the USAGE section, unless the user explicitly
# declared its position in the FormatterConfig.
nm = SectionName.USAGE
if nm not in set(s.name for s in self.formatter_config.sections):
all_sections[nm] = default_sections[nm]
# Then any sections declared in FormatterConfig.
for s in self.formatter_config.sections:
all_sections[s.name] = s
# Then sections declared indirectly in Opt instances.
for o in self.opts:
for nm in o.sections:
all_sections[nm] = Section(name = nm)
# Then the default POS and OPT sections, if there are Opt instances lacking sections.
homeless = [o for o in self.opts if not o.sections]
needed = [
(SectionName.POS, any(o for o in homeless if o.is_positional_opt)),
(SectionName.OPT, any(o for o in homeless if not o.is_positional_opt)),
]
for nm, has_opts in needed:
if has_opts and nm not in all_sections:
all_sections[nm] = default_sections[nm]
# Then an aliases section.
if self.formatter_config.alias_style == AliasStyle.SEPARATE:
if any(o.aliases for o in self.opts):
nm = SectionName.ALIASES
all_sections[nm] = default_sections[nm]
####
# Validate the section names passed by the caller.
####
invalid = [nm for nm in section_names if nm not in all_sections]
if invalid:
fmt = 'Parser.help_text(): invalid sections: {}'
msg = fmt.format(' '.join(invalid))
raise OptoPyError(msg)
####
# Setup the sections for which we will build help text.
####
sections = OrderedDict(
(nm, all_sections[nm])
for nm in (section_names or all_sections)
)
####
# Add an errors section, if needed.
####
error_msg = kws.get('error_msg', None)
if error_msg:
nm = SectionName.ERR
s = default_sections[nm]
s.text = error_msg
sections[nm] = s
####
# Attach Opt instances to those sections.
####
for o in self.opts:
if o.sections:
for nm in o.sections:
if nm in sections:
sections[nm].opts.append(o)
else:
nm = SectionName.POS if o.is_positional_opt else SectionName.OPT
if nm in sections:
sections[nm].opts.append(o)
####
# Assemble the lines of help text.
####
MAX_WID = 80
lines = []
for nm, s in sections.items():
# Section label.
lines.append('')
lines.append(s.label + ':')
# The usage section.
if nm is SectionName.USAGE:
parts = []
for o in self.opts:
val = o.option_spec
if ' ' in val:
fmt = '({})' if o.required else '[{}]'
parts.append(fmt.format(val))
else:
parts.append(val)
prog = self.program or 'cli'
wid = MAX_WID - len(prog) - 1
txt = ' '.join(map(str, parts))
usage_lines = textwrap.wrap(txt, wid)
fmt = ' {} {}'
val = prog
blank = ' ' * len(prog)
for i, ln in enumerate(usage_lines):
lines.append(fmt.format(val, ln))
if i == 0:
val = blank
# Aliases section.
elif nm is SectionName.ALIASES:
fmt = ' {} {}'
for o in self.opts:
if o.aliases:
val = fmt.format(o.option, ' '.join(o.aliases))
lines.append(val)
# A Section with literal text.
elif s.text:
fmt = ' {}'
txt_lines = s.text.split('\n')
for ln in txt_lines:
lines.append(fmt.format(ln))
# Section with Opt instances.
else:
wid = MAX_WID - 23
fmt = ' {:<20} {}'
for o in s.opts:
opt_lines = textwrap.wrap(o.text or '', wid) or ['']
if self.formatter_config.alias_style == AliasStyle.SEPARATE:
val = o.option_spec
else:
# TODO: sloppy code; clean up.
val = o.option_spec
rest = ' '.join(val.split()[1:])
vals = [val]
for a in o.aliases:
vals.append('{} {}'.format(a, rest))
val = ', '.join(vals)
if len(val) > 20:
lines.append(' {}'.format(val))
val = ''
for i, ln in enumerate(opt_lines):
lines.append(fmt.format(val, ln))
if i == 0:
val = ''
####
# Return the help text.
####
lines.append('')
return '\n'.join(ln.rstrip() for ln in lines)
################
# Enum.
################
class Enum(object):
def __init__(self, enum_name, *members):
self._enum_name = enum_name
self._members = OrderedDict()
for value, d in enumerate(members):
if not isinstance(d, dict):
d = dict(name = d)
em = EnumMember(enum_name, value = value, **d)
self._members[d['name']] = em
self._rmembers = OrderedDict(
(em.value, em)
for em in self._members.values()
)
def __iter__(self):
return iter(self._members.values())
def __getattr__(self, name):
if name in self._members:
return self._members[name]
else:
raise AttributeError(name)
def __call__(self, value):
if value in self._rmembers:
return self._rmembers[value]
else:
raise ValueError(value)
################
# EnumMember.
################
class EnumMember(object):
def __init__(self, enum_name, name, value, **kws):
self.enum_name = enum_name
self.name = name
self.value = value
for k, v in kws.items():
setattr(self, k, v)
def __str__(self):
fmt = '{}({}, {!r})'
msg = fmt.format(self.enum_name, self.name, self.value)
return msg
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self is other
def __ne__(self, other):
return not self == other
def __hash__(self):
return self.value
################
# Enum instances: user facing.
################
AliasStyle = Enum('AliasStyle', 'SEPARATE', 'MERGED')
HelpTextStyle = Enum('HelpTextStyle', 'CLI', 'MAN')
OptTextStyle = Enum('OptTextStyle', 'CLI', 'MAN')
SectionName = Enum(
'SectionName',
dict(name = 'USAGE', label = 'Usage'),
dict(name = 'POS', label = 'Positional arguments'),
dict(name = 'OPT', label = 'Options'),
dict(name = 'ALIASES', label = 'Aliases'),
dict(name = 'ERR', label = 'Errors'),
)
################
# Enum instances: not user facing.
################
OptType = Enum('OptType', 'LONG', 'SHORT', 'POS', 'WILD')
PhraseLogicType = Enum('PhraseLogicType', 'AND', 'OR')
PhraseType = Enum('PhraseType', 'OPT', 'POS', 'PHRASE', 'WILD', 'ZONE')
ExitCode = Enum(
'ExitCode',
dict(name = 'SUCCESS', code = 0),
dict(name = 'PARSE_HELP', code = 0),
dict(name = 'PARSE_FAIL', code = 2),
)
################
# Errors.
################
class RegexLexerError(Exception):
pass
class OptoPyError(Exception):
'''
'''
pass
################
# FormatterConfig.
################
class FormatterConfig(object):
'''
'''
DEFAULTS = dict(
program_name = '',
section_label_punct = ':',
after_section_label = '',
after_section = '\n',
program_summary = '',
style = HelpTextStyle.CLI,
opt_style = OptTextStyle.CLI,
alias_style = AliasStyle.SEPARATE,
)
def __init__(self, *sections, **kws):
self.sections = sections
for k, v in self.DEFAULTS.items():
val = kws.get(k, v)
setattr(self, k, val)
################
# Section.
################
class Section(object):
'''
'''
def __init__(self, name, label = None, text = None, opts = None):
self.name = name
self.label = self._default_label if label is None else label
self.text = text
self.opts = opts or []
# TODO: validation: require either text or opts, and not both.
def __repr__(self):
return 'Section({})'.format(self.name)
@property
def _default_label(self):
if isinstance(self.name, EnumMember):
return self.name.label
else:
return (
self.name.
replace('-', ' ').
replace('_', ' ').
capitalize() + ' options'
)
################
# GrammarSpecParser.
################
class GrammarSpecParser(object):
pass
################
# Opt.
################
class Opt(object):
'''
'''
def __init__(self,
option_spec,
nargs = None,
ntimes = None,
required = None,
text = None,
sections = None,
aliases = None,
tolerant = False):
if option_spec == WILDCARD_OPTION:
self.option_spec = option_spec
self.option = option_spec
self.nargs = nargs or ZERO_TUPLE
self.destination = None
self._opt_type = OptType.WILD
else:
# Try to parse the option_spec.
try:
# TODO: validation. The last OptToken is authoritative.
# Elements 0..-1 are used only for aliases.
opts = list(SimpleSpecParser(option_spec).parse())
assert opts
otok = opts[-1]
otok.aliases = [otok.option for otok in opts]
otok.aliases.pop()
except (RegexLexerError, AssertionError) as e:
otok = None
# Raise if we did not get an OptToken.
if otok is None:
fmt = 'Opt: invalid option_spec: {}'
msg = fmt.format(option_spec)
raise OptoPyError(msg)
# Assign values from the OptToken to the Opt.
self.option_spec = otok.option_spec
self.option = otok.option
self.nargs = nargs or otok.nargs
self.arg_names = otok.arg_names
self.aliases = otok.aliases + (aliases or [])
# Determine the OptType.
self.destination = self.option.strip(OPT_SPEC_STRIP_CHARS).replace(OPT_PREFIX, UNDERSCORE)
self._opt_type = (
OptType.LONG if self.option.startswith(LONG_OPT_PREFIX) else
OptType.SHORT if self.option.startswith(SHORT_OPT_PREFIX) else
OptType.POS
)
# Set self.ntimes.
if required is not None and ntimes is not None:
msg = 'Opt: do not set both required and ntimes'
raise OptoPyError(msg)
elif ntimes is not None:
# If ntimes was given, just set it.
self.ntimes = ntimes
elif required is not None:
# If required was given, use it to set ntimes.
v0 = N_ONE if required else N_ZERO
v1 = N_MAX if self.is_wildcard_opt else N_ONE
self.ntimes = (v0, v1)
else:
# Neither was given, so use the defaults.
self.ntimes = (
ONE_TUPLE if self.is_positional_opt else
ANY_TUPLE if self.is_wildcard_opt else
ZERO_OR_ONE_TUPLE
)
self.text = text
self.sections = list(sections or [])
self.tolerant = tolerant
def _concrete_opts(self):
# TODO: this isn't correct. The cross-product does not make sense at
# the Opt-level. Rather, it must be done from the top level -- the full
# cross product of all possibilities (including those where an Opt
# might appear ntimes=0, which isn't a valid Opt).
xs = list(range(self.nargs[0], self.nargs[1] + 1))
ys = list(range(self.ntimes[0], self.ntimes[1] + 1))
zs = self.aliases or [self.option]
for nargs, ntimes, option in product(xs, ys, zs):
if ntimes:
o = Opt(
option,
nargs = nargs,
ntimes = ntimes,
text = self.text,
sections = self.sections,
)
def __str__(self):
fmt = 'Opt({})'
return fmt.format(self.option)
def __repr__(self):
return self.__str__()
@property
def is_long_opt(self):
return self._opt_type == OptType.LONG
@property
def is_short_opt(self):
return self._opt_type == OptType.SHORT
@property
def is_positional_opt(self):
return self._opt_type == OptType.POS
@property
def is_wildcard_opt(self):
return self._opt_type == OptType.WILD
@property
def nargs(self):
return self._nargs
@nargs.setter
def nargs(self, val):
self._nargs = self._get_ntuple(val, 'nargs')
@property
def ntimes(self):
return self._ntimes
@ntimes.setter
def ntimes(self, val):
self._ntimes = self._get_ntuple(val, 'ntimes')
@property
def required(self):
return self.ntimes[0] > N_ZERO
def _get_ntuple(self, val, attr_name):
#
# Convert val to a tuple. For example, these are
# valid inputs: (0, 1), (1, 1), 1, 2, etc.
if isinstance(val, Iterable):
tup = tuple(val)
else:
tup = (val, val)
#
# Get m, n values from the tuple.
try:
m, n = map(int, tup)
except Exception:
m, n = (None, None)
#
# Return the valid tuple or raise.
invalids = [
m is None,
n is None,
m < N_ZERO,
n < m,
(n == N_ZERO and attr_name == 'ntimes'),
]
if any(invalids):
fmt = 'Invalid {}: {}'
msg = fmt.format(attr_name, val)
raise OptoPyError(msg)
else:
return tup
################
# ParsedOptions.
################
class ParsedOptions(object):
'''
'''
def __init__(self, opts = None, args = None):
self.parsed_opts = OrderedDict()
self.args_index = -1
self.args = args
for opt in (opts or []):
po = ParsedOpt(opt, None)
self.parsed_opts[opt.destination] = po
def _add_opt(self, opt):
po = ParsedOpt(opt, None)
self.parsed_opts[opt.destination] = po
def _del_opt(self, opt):
del self.parsed_opts[opt.destination]
def __getattr__(self, a):
if a in self.parsed_opts:
return self.parsed_opts[a].value
else:
raise AttributeError(a)
def __getitem__(self, destination):
return self.parsed_opts[destination]
def __iter__(self):
# User can iterate directory over the ParsedOpt instances.
# In addition, because ParsedOpt also defines __iter__(), a
# ParsedOptions instance can be converted directly to a dict.
return iter(self.parsed_opts.values())
def _dump(self):
return dict(
args = self.args,
args_index = self.args_index,
parsed_opts = dict(self),
parsed_opts_raw = {
dest : po._values
for dest, po in self.parsed_opts.items()
},
)
################
# ParsedOpt.
################
class ParsedOpt(object):
'''
'''
def __init__(self, opt, value):
self.destination = opt.destination
self.opt = opt
self._values = []
def __iter__(self):
tup = (self.destination, self.value)
return iter(tup)
def _add_occurrence(self):
self._values.append([])
def _add_value(self, val):
try:
assert self._values
vs = self._values[-1]
assert isinstance(vs, list)
vs.append(val)
except AssertionError:
msg = 'ParsedOpt: cannot _add_value() without any occurrences'
raise OptoPyError(msg)
@property
def value(self):
# Setup.
mt, nt = self.opt.ntimes
ma, na = self.opt.nargs
vs = self._values
# Multiple ntimes and nargs: return a 2D list.
if nt > 1 and na > 1:
return vs or None
# Multiple ntimes. Return a flat list.
elif nt > 1:
return [xs[0] for xs in vs] if vs else None
# Multiple nargs. Return a flat list.
elif nt > 1 or na > 1:
return vs[0] if vs else None
# Dual option (flag or take a single arg). Return flat list, so that the
# user can distinguish option-not-given (None) from no-args (empty list).
elif self.opt.nargs == ZERO_OR_ONE_TUPLE:
return vs[0] if vs else None
# Single ntimes and simple option (flag or single-arg). Just return a value.
else:
if vs:
xs = vs[0]
return xs[0] if xs else None
else:
return None
@property
def _requires_occurrences(self):
vs = self._values
mt, nt = self.opt.ntimes
n = len(vs)
return n < mt
@property
def _can_occur_again(self):
vs = self._values
mt, nt = self.opt.ntimes
n = len(vs)
return n < nt
@property
def _requires_args(self):
vs = self._values
if vs:
xs = vs[-1]
ma, na = self.opt.nargs
n = len(xs)
return ma > n
else:
msg = 'ParsedOpt: cannot _requires_args() without any occurrences'
raise OptoPyError(msg)
@property
def _can_take_args(self):
vs = self._values
if vs:
xs = vs[-1]
ma, na = self.opt.nargs
n = len(xs)
return n < na
else:
msg = 'ParsedOpt: cannot _can_take_args() without any occurrences'
raise OptoPyError(msg)
def __str__(self):
fmt = 'ParsedOpt({}, {!r})'
msg = fmt.format(self.destination, self.value)
return msg
def __repr__(self):
return self.__str__()
################
# Phrase.
################
class Phrase(object):
def __init__(self,
subphrases = None,
opt = None):
self.subphrases = subphrases or []
self.opt = opt
def __str__(self):
if self.opt:
fmt = '{}'
return fmt.format(self.opt)
else:
fmt = 'Phrase({})'
return fmt.format(self.subphrases)
def __repr__(self):
return self.__str__()
@property
def phrase_type(self):
if self.opt is None:
return PhraseType.PHRASE
elif self.opt.is_wildcard_opt:
return PhraseType.WILD
elif self.opt.is_positional_opt:
return PhraseType.POS
else:
return PhraseType.OPT
def parse(self, args, parsed_options = None):
# Set up the ParsedOptions that we will return.
if parsed_options is None:
opts = [sph.opt for sph in self.subphrases]
popts = ParsedOptions(opts = opts)
else:
popts = parsed_options
# The expected positional Opt instances.
pos_opts = [
sph.opt
for sph in self.subphrases
if sph.phrase_type == PhraseType.POS
]
# Bookkeeping variables.
# - Indexes to args and pos_opts.
# - The most recently seen Opt (non-positional).
# - A set of already seen Opt.destination values.
pos_i = -1
prev_opt = None
prev_pos = None
seen = set()
# Process the args.
while True:
popts.args_index += 1
try:
arg = args[popts.args_index]
except IndexError:
break
# The arg is an option.
if arg.startswith('--') or arg.startswith('-'):
# Make sure we are not expecting an option-arg.
if prev_opt and popts[prev_opt]._requires_args:
fmt = 'Found option, but expected option-argument: {}'
msg = fmt.format(arg)
raise OptoPyError(msg)
# Try to find a matching Opt.
prev_opt = None
for sph in self.subphrases:
if sph.phrase_type == PhraseType.OPT:
if sph.opt.option == arg or arg in sph.opt.aliases:
prev_opt = sph.opt.destination
break
elif sph.phrase_type == PhraseType.WILD:
opt = Opt(arg)
popts._add_opt(opt)
prev_opt = opt.destination
break
# Failed to find a match.
if prev_opt is None:
fmt = 'Found unexpected option: {}'
msg = fmt.format(arg)
raise OptoPyError(msg)
# Found a match, but we've already seen it.
if prev_opt in seen:
fmt = 'Found repeated option: {}'
msg = fmt.format(arg)
raise OptoPyError(msg)
# Valid Opt.
seen.add(prev_opt)
po = popts[prev_opt]
po._add_occurrence()
if po.opt.nargs == ZERO_TUPLE:
po._add_value(True)
continue
# The arg is not an option, but the previous option
# can still take opt-args.
elif prev_opt and popts[prev_opt]._can_take_args:
po = popts[prev_opt]
po._add_value(arg)
continue
# Otherwise, treat the arg as a positional.
# - Either use the previous positional (if it can take more args).
# - Or use the next positional (if there is one).
if prev_pos and popts[prev_pos]._can_take_args:
po = popts[prev_pos]
else:
pos_i += 1
try:
prev_pos = pos_opts[pos_i].destination
po = popts[prev_pos]
po._add_occurrence()
except IndexError:
prev_pos = None
# No more positional args are expected.
if not prev_pos:
fmt = 'Found unexpected positional argument: {}'
msg = fmt.format(arg)
raise OptoPyError(msg)
# Valid positional.
po._add_value(arg)
# Delete the wildcard Opt from ParsedOptions.
wild = None
for po in popts:
if po.opt.is_wildcard_opt:
wild = po.opt
break
if wild:
popts._del_opt(wild)
# Check that all Opt instances occurred the required ntimes.
problems = sorted(po.opt.option for po in popts if po._requires_occurrences)
if problems:
fmt = 'Did not get expected N of occurrences: {}'
msg = fmt.format(', '.join(problems))
raise OptoPyError(msg)
# Check that all Opt instances got the required nargs.
problems = sorted(po.opt.option for po in popts if po._requires_args)
if problems:
fmt = 'Did not get expected N of arguments: {}'
msg = fmt.format(', '.join(problems))
raise OptoPyError(msg)
# Return the ParsedOptions.
return popts
################
# RegexLexer.
################
class RegexLexer(object):
def __init__(self, text, token_types):
self.text = text
self.token_types = token_types
self.pos = 0
self.max_pos = len(self.text) - 1
self.is_eof = None
def get_next_token(self):
# Starting at self.pos, try to emit the next Token.
# If we find a valid token, there are two possibilities:
#
# - A Token that we should emit: just return it.
#
# - A Token that we should suppress: break out of the for-loop,
# but try the while-loop again. This will allow the Lexer
# to be able to ignore any number of suppressed tokens.
#
tok = True
while tok:
for tt, emit, rgx in self.token_types:
tok = self.match_token(rgx, tt)
if tok:
if emit:
return tok
else:
break
# If we did not return a Token above, we should be
# at the end of the input text.
if self.pos > self.max_pos:
return Token(EOF, None)
else:
self.error()
def match_token(self, rgx, token_type):
m = rgx.match(self.text, pos = self.pos)
if m:
txt = m.group(0)
self.pos += len(txt)
return Token(token_type, txt)
else:
return None
def error(self):
fmt = 'RegexLexerError: pos={}'
msg = fmt.format(self.pos)
raise RegexLexerError(msg)
def __iter__(self):
self.is_eof = False
return self
def __next__(self):
if self.is_eof:
raise StopIteration
else:
tok = self.get_next_token()
if tok.isa(EOF):
self.is_eof = True
return tok
################
# GenericParserMixin.
################
class GenericParserMixin(object):
def parse(self):
# Setup: have the lexer get the first token.
self.current_token = self.lexer.get_next_token()
elem = True
# Consume and yield as many tokens as we can.
while elem:
for func in self.parser_functions:
elem = func()
if elem:
yield elem
break
# We expect EOF as the final token.
if not self.current_token.isa(EOF):
self.error()
def eat(self, token_type):
# If the current Token is of the expected type, return it
# after advancing the lexer. Otherwise, return None.
tok = self.current_token
if tok.isa(token_type):
self.current_token = self.lexer.get_next_token()
return tok
else:
return None
def error(self):
fmt = 'Invalid syntax: pos={}'
msg = fmt.format(self.lexer.pos)
raise Exception(msg)
################
# SimpleSpecParser.
################
class SimpleSpecParser(GenericParserMixin):
####
#
# To implement a parser:
#
# - Inherit from GenericParserMixin.
#
# - Define self.lexer and self.parser_functions.
#
# - Each of those functions should return some data element
# appropriate for the grammar (if the current Token matches)
# or None.
#
# Usage example:
#
# txt = '--foo FF GG -x --blort -z Z1 Z2 <q> <r> --debug'
# ssp = SimpleSpecParser(txt)
# tokens = list(ssp.parse())
#
####
def __init__(self, text):
self.lexer = RegexLexer(text, SIMPLE_SPEC_TOKENS)
self.parser_functions = (
self.long_opt,
self.short_opt,
self.pos_opt,
)
def long_opt(self):
return self._opt(LONG_OPT)
def short_opt(self):
return self._opt(SHORT_OPT)
def pos_opt(self):
tok = self.eat(POS_OPT)
if tok:
otok = OptToken()
otok.option = tok.value
otok.option_spec = tok.value
otok.nargs = ONE_TUPLE
otok.opt_type = OptType.POS
otok.arg_names = []
return otok
else:
return None
def _opt(self, opt_type):
# If the current Token is not the expected option type, bail out.
# Otherwise, count the N of OPT_ARG that the OptToken takes.
tok = self.eat(opt_type)
if not tok:
return None
otok = OptToken()
otok.option = tok.value
otok.option_spec = tok.value
otok.nargs = ZERO_TUPLE
otok.opt_type = OptType.SHORT if opt_type == SHORT_OPT else OptType.LONG
otok.arg_names = []
while tok:
tok = self.eat(OPT_ARG)
if tok:
m, n = otok.nargs
otok.nargs = (m + 1, n + 1)
otok.arg_names.append(tok.value)
otok.option_spec += ' {}'.format(tok.value)
return otok
################
# Token.
################
class Token(object):
def __init__(self, token_type, value):
self.token_type = token_type
self.value = value
def isa(self, *types):
return self.token_type in types
def __str__(self):
fmt = 'Token({}, {!r})'
msg = fmt.format(self.token_type, self.value)
return msg
def __repr__(self):
return self.__str__()
class OptToken(object):
def __repr__(self):
fmt = 'OptToken({})'
return fmt.format(self.option)
################
# Helpers.
################
################
# Temporary stuff.
################
def dump_em(xs):
print('\n')
for x in xs:
dump(*x, tight = True)
print('\n')
def dump(x, label = None, tight = False):
if not tight:
print('\n')
if label:
print(label, '=>', x)
else:
print(x)
if not tight:
print('\n')
def jdump(d):
print(json.dumps(d, indent = 4))
| 29.166667 | 102 | 0.512458 |
242baf05e2b4b56e61200bf5de764c6d0af73de2 | 70,674 | py | Python | src/transformers/utils/dummy_pt_objects.py | ashirviskas/transformers | 8a085169328b2be2bff6939199597c5515d997f5 | [
"Apache-2.0"
] | null | null | null | src/transformers/utils/dummy_pt_objects.py | ashirviskas/transformers | 8a085169328b2be2bff6939199597c5515d997f5 | [
"Apache-2.0"
] | null | null | null | src/transformers/utils/dummy_pt_objects.py | ashirviskas/transformers | 8a085169328b2be2bff6939199597c5515d997f5 | [
"Apache-2.0"
] | null | null | null | # This file is autogenerated by the command `make fix-copies`, do not edit.
from ..file_utils import requires_backends
class PyTorchBenchmark:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PyTorchBenchmarkArguments:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DataCollator:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DataCollatorForLanguageModeling:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DataCollatorForPermutationLanguageModeling:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DataCollatorForSeq2Seq:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DataCollatorForSOP:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DataCollatorForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DataCollatorForWholeWordMask:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DataCollatorWithPadding:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def default_data_collator(*args, **kwargs):
requires_backends(default_data_collator, ["torch"])
class GlueDataset:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GlueDataTrainingArguments:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LineByLineTextDataset:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LineByLineWithRefDataset:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LineByLineWithSOPTextDataset:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SquadDataset:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SquadDataTrainingArguments:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TextDataset:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TextDatasetForNextSentencePrediction:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BeamScorer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BeamSearchScorer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ForcedBOSTokenLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ForcedEOSTokenLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class HammingDiversityLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class InfNanRemoveLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LogitsProcessorList:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LogitsWarper:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MinLengthLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NoBadWordsLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class NoRepeatNGramLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PrefixConstrainedLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RepetitionPenaltyLogitsProcessor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TemperatureLogitsWarper:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TopKLogitsWarper:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TopPLogitsWarper:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MaxLengthCriteria:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MaxTimeCriteria:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class StoppingCriteria:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class StoppingCriteriaList:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def top_k_top_p_filtering(*args, **kwargs):
requires_backends(top_k_top_p_filtering, ["torch"])
class Conv1D:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
def apply_chunking_to_forward(*args, **kwargs):
requires_backends(apply_chunking_to_forward, ["torch"])
def prune_layer(*args, **kwargs):
requires_backends(prune_layer, ["torch"])
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class AlbertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AlbertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_albert(*args, **kwargs):
requires_backends(load_tf_weights_in_albert, ["torch"])
MODEL_FOR_CAUSAL_LM_MAPPING = None
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None
MODEL_FOR_MASKED_LM_MAPPING = None
MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None
MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None
MODEL_FOR_PRETRAINING_MAPPING = None
MODEL_FOR_QUESTION_ANSWERING_MAPPING = None
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None
MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = None
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None
MODEL_MAPPING = None
MODEL_WITH_LM_HEAD_MAPPING = None
class AutoModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForNextSentencePrediction:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForSeq2SeqLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForTableQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AutoModelWithLMHead:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
BART_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BartForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BartForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BartForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BartForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BartModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BartPretrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PretrainedBartModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForNextSentencePrediction:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertLMHeadModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_bert(*args, **kwargs):
requires_backends(load_tf_weights_in_bert, ["torch"])
class BertGenerationDecoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BertGenerationEncoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_bert_generation(*args, **kwargs):
requires_backends(load_tf_weights_in_bert_generation, ["torch"])
BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BigBirdForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BigBirdPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_big_bird(*args, **kwargs):
requires_backends(load_tf_weights_in_big_bird, ["torch"])
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BlenderbotForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlenderbotForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlenderbotModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class BlenderbotSmallForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlenderbotSmallForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BlenderbotSmallModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CamembertForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CamembertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ConvBertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ConvBertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_convbert(*args, **kwargs):
requires_backends(load_tf_weights_in_convbert, ["torch"])
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class CTRLForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CTRLLMHeadModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CTRLModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class CTRLPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DebertaForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DebertaV2ForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2ForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2ForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2ForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2Model:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DebertaV2PreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DistilBertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DistilBertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST = None
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class DPRContextEncoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRPretrainedContextEncoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRPretrainedQuestionEncoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRPretrainedReader:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRQuestionEncoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class DPRReader:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ElectraForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ElectraPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_electra(*args, **kwargs):
requires_backends(load_tf_weights_in_electra, ["torch"])
class EncoderDecoderModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class FlaubertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertForQuestionAnsweringSimple:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FlaubertWithLMHeadModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FSMTForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FSMTModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PretrainedFSMTModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class FunnelBaseModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class FunnelModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_funnel(*args, **kwargs):
requires_backends(load_tf_weights_in_funnel, ["torch"])
GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GPT2DoubleHeadsModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPT2ForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPT2LMHeadModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPT2Model:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPT2PreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_gpt2(*args, **kwargs):
requires_backends(load_tf_weights_in_gpt2, ["torch"])
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST = None
class GPTNeoForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTNeoModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class GPTNeoPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_gpt_neo(*args, **kwargs):
requires_backends(load_tf_weights_in_gpt_neo, ["torch"])
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class IBertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class IBertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class IBertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class IBertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class IBertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class IBertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class IBertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LayoutLMForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LayoutLMModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
LED_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LEDForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LEDForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LEDForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LEDModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class LongformerForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LongformerSelfAttention:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertEncoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertVisualFeatureEncoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class LxmertXLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST = None
class M2M100ForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class M2M100Model:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MarianForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MarianModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MarianMTModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MBartForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MBartForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MBartForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MBartForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MBartModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MMBTForClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MMBTModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ModalEmbeddings:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class MobileBertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForNextSentencePrediction:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForPreTraining:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MobileBertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_mobilebert(*args, **kwargs):
requires_backends(load_tf_weights_in_mobilebert, ["torch"])
MPNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class MPNetForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MPNetPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MT5EncoderModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MT5ForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class MT5Model:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class OpenAIGPTDoubleHeadsModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OpenAIGPTForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OpenAIGPTLMHeadModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OpenAIGPTModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class OpenAIGPTPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_openai_gpt(*args, **kwargs):
requires_backends(load_tf_weights_in_openai_gpt, ["torch"])
class PegasusForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PegasusForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class PegasusModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ProphetNetDecoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetEncoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ProphetNetPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RagModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RagSequenceForGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RagTokenForGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ReformerAttention:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerLayer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ReformerModelWithLMHead:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RetriBertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RetriBertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class RobertaForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class RobertaModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class Speech2TextForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Speech2TextModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class SqueezeBertForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertModule:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class SqueezeBertPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
T5_PRETRAINED_MODEL_ARCHIVE_LIST = None
class T5EncoderModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class T5ForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class T5Model:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class T5PreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_t5(*args, **kwargs):
requires_backends(load_tf_weights_in_t5, ["torch"])
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = None
class TapasForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TapasForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TapasForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TapasModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST = None
class AdaptiveEmbedding:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TransfoXLForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TransfoXLLMHeadModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TransfoXLModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class TransfoXLPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_transfo_xl(*args, **kwargs):
requires_backends(load_tf_weights_in_transfo_xl, ["torch"])
VIT_PRETRAINED_MODEL_ARCHIVE_LIST = None
class ViTForImageClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViTModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class ViTPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST = None
class Wav2Vec2ForCTC:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2ForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2Model:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class Wav2Vec2PreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
XLM_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XLMForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMForQuestionAnsweringSimple:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMWithLMHeadModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XLMProphetNetDecoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMProphetNetEncoder:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMProphetNetForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMProphetNetForConditionalGeneration:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMProphetNetModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XLMRobertaForCausalLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaForMaskedLM:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLMRobertaModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = None
class XLNetForMultipleChoice:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetForQuestionAnswering:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetForQuestionAnsweringSimple:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetForSequenceClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetForTokenClassification:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetLMHeadModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
class XLNetPreTrainedModel:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_pretrained(self, *args, **kwargs):
requires_backends(self, ["torch"])
def load_tf_weights_in_xlnet(*args, **kwargs):
requires_backends(load_tf_weights_in_xlnet, ["torch"])
class Adafactor:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class AdamW:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def get_constant_schedule(*args, **kwargs):
requires_backends(get_constant_schedule, ["torch"])
def get_constant_schedule_with_warmup(*args, **kwargs):
requires_backends(get_constant_schedule_with_warmup, ["torch"])
def get_cosine_schedule_with_warmup(*args, **kwargs):
requires_backends(get_cosine_schedule_with_warmup, ["torch"])
def get_cosine_with_hard_restarts_schedule_with_warmup(*args, **kwargs):
requires_backends(get_cosine_with_hard_restarts_schedule_with_warmup, ["torch"])
def get_linear_schedule_with_warmup(*args, **kwargs):
requires_backends(get_linear_schedule_with_warmup, ["torch"])
def get_polynomial_decay_schedule_with_warmup(*args, **kwargs):
requires_backends(get_polynomial_decay_schedule_with_warmup, ["torch"])
def get_scheduler(*args, **kwargs):
requires_backends(get_scheduler, ["torch"])
class Trainer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
def torch_distributed_zero_first(*args, **kwargs):
requires_backends(torch_distributed_zero_first, ["torch"])
class Seq2SeqTrainer:
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
| 24.286598 | 84 | 0.669737 |
ae45d495c68a509c9e1fbdcdad7be1a0e9747a14 | 6,661 | py | Python | rmgradient/test_rmgradient.py | drnc/rmgradient | 2bc3253b54661bf546e7742b646b5da897182d23 | [
"Apache-2.0"
] | null | null | null | rmgradient/test_rmgradient.py | drnc/rmgradient | 2bc3253b54661bf546e7742b646b5da897182d23 | [
"Apache-2.0"
] | null | null | null | rmgradient/test_rmgradient.py | drnc/rmgradient | 2bc3253b54661bf546e7742b646b5da897182d23 | [
"Apache-2.0"
] | null | null | null | import logging
import numpy
import rmgradient
import unittest
import os
import tifffile
def datafile(filename):
return os.path.join('test_data', filename)
def filereader(filename):
return rmgradient.TiffReader(datafile(filename))
def filewriter(filename):
return rmgradient.TiffWriter(datafile(filename))
image = rmgradient.TiffReader(datafile('image.tif')).load()
image_points = [
[3, 3], [-3, 0], [7, 5], [13, 4],
[5, 8], [15, 8], [8, 11], [12, 12],
[9, 4], [10, 9], [9, 12], [9, 16],
[3, 16], [-3, -19], [8, 15], [16, 16],
[-16, -19], [-16, 0]]
# Maximum difference between image.tif and its background in the 10x10 middle square
def image_background_middle_max_diff(sigma, smooth):
model = rmgradient.BackgroundModel(image, sigma, smooth)
bg = model.run(image_points).astype(numpy.uint16)
return abs(image.astype(float)[5:15, 5:15] - bg.astype(float)[5:15, 5:15]).max()
class TestTiffReader(unittest.TestCase):
def test_name(self):
self.assertEqual(rmgradient.TiffReader(datafile('name')).name(), datafile('name'))
def test_invalid(self):
self.assertFalse(rmgradient.TiffReader(datafile('unexisting-file')).is_valid())
def test_load(self):
self.assertEqual(rmgradient.TiffReader(datafile('1d.tif')).load().tolist(), [10, 100, 1000, 10000])
self.assertEqual(rmgradient.TiffReader(datafile('image.tif')).load()[5, 3].tolist(), [4162, 7908, 2037])
def test_is_2d(self):
self.assertFalse(rmgradient.TiffReader(datafile('1d.tif')).is_2d())
self.assertTrue(rmgradient.TiffReader(datafile('image.tif')).is_2d())
def test_points_in_middle(self):
# check single point
self.assertFalse(rmgradient.TiffReader(datafile('1d.tif')).points_in_middle([[2, 2]], 1))
self.assertFalse(rmgradient.TiffReader(datafile('image.tif')).points_in_middle([[0, 0]], 1))
self.assertFalse(rmgradient.TiffReader(datafile('image.tif')).points_in_middle([[19, 19]], 2))
self.assertFalse(rmgradient.TiffReader(datafile('image.tif')).points_in_middle([[0, 18]], 2))
self.assertFalse(rmgradient.TiffReader(datafile('image.tif')).points_in_middle([[17, 18]], 3))
self.assertFalse(rmgradient.TiffReader(datafile('image.tif')).points_in_middle([[16, 6]], 4))
self.assertFalse(rmgradient.TiffReader(datafile('image.tif')).points_in_middle([[4, 3]], 4))
self.assertTrue(rmgradient.TiffReader(datafile('image.tif')).points_in_middle([[17, 18]], 1))
self.assertTrue(rmgradient.TiffReader(datafile('image.tif')).points_in_middle([[16, 16]], 3))
self.assertTrue(rmgradient.TiffReader(datafile('image.tif')).points_in_middle([[19, 19]], 0))
self.assertTrue(rmgradient.TiffReader(datafile('image.tif')).points_in_middle([[4, 3]], 3))
self.assertTrue(rmgradient.TiffReader(datafile('image.tif')).points_in_middle([[10, 10]], 8))
# check multiple points
self.assertFalse(rmgradient.TiffReader(datafile('image.tif')).points_in_middle([[17, 18], [16, 16], [4, 3]], 3))
self.assertFalse(rmgradient.TiffReader(datafile('image.tif')).points_in_middle([[4, 3], [16, 16], [17, 18]], 3))
self.assertTrue(rmgradient.TiffReader(datafile('image.tif')).points_in_middle([[4, 3], [16, 16], [17, 18]], 1))
self.assertTrue(rmgradient.TiffReader(datafile('image.tif')).points_in_middle([[4, 3], [16, 16]], 3))
class TestTiffWriter(unittest.TestCase):
def test_invalid(self):
self.assertFalse(rmgradient.TiffWriter(datafile('unexisting-dir/out.tif')).is_valid())
def test_write(self):
out_filename = datafile('out.tif')
out = rmgradient.TiffWriter(out_filename)
out.write(image, numpy.uint16)
out_read = rmgradient.TiffReader(out_filename).load()
self.assertEqual(out_read.tolist(), image.tolist())
self.assertEqual(out_read.dtype, numpy.uint16)
os.remove(out_filename)
def test_write_compressed(self):
out_filename = datafile('out.tif')
out = rmgradient.TiffWriter(out_filename)
out.write(image, numpy.uint16, 5)
out_read = rmgradient.TiffReader(out_filename).load()
self.assertEqual(out_read.tolist(), image.tolist())
self.assertEqual(out_read.dtype, numpy.uint16)
os.remove(out_filename)
class TestBlurImage(unittest.TestCase):
def test_blur_points_sigma_1(self):
blur = rmgradient.BlurImage(image, 1.0)
self.assertEqual([int(i) for i in blur.run([5, 5])], [5970, 12054, 3034])
self.assertEqual([int(i) for i in blur.run([15, 15])], [16006, 32023, 8143])
self.assertEqual([int(i) for i in blur.run([10, 10])], [10996, 22022, 5516])
self.assertEqual([int(i) for i in blur.run([12, 4])], [12816, 25887, 6659])
self.assertEqual([int(i) for i in blur.run([11, 8])], [12040, 23925, 6106])
def test_blur_points_sigma_25(self):
blur = rmgradient.BlurImage(image, 2.5)
self.assertEqual([int(i) for i in blur.run([10, 10])], [10993, 22028, 5531])
self.assertEqual([int(i) for i in blur.run([11, 8])], [11999, 23990, 6065])
class TestBackgroundModel(unittest.TestCase):
def test_bgmodel(self):
self.assertTrue(image_background_middle_max_diff(sigma=1.0, smooth=0) < 1500)
self.assertTrue(image_background_middle_max_diff(sigma=1.0, smooth=0.1) < 1500)
self.assertTrue(image_background_middle_max_diff(sigma=1.0, smooth=1.0) < 1500)
self.assertTrue(image_background_middle_max_diff(sigma=0.5, smooth=0) < 1500)
self.assertTrue(image_background_middle_max_diff(sigma=0.5, smooth=0.1) < 1500)
self.assertTrue(image_background_middle_max_diff(sigma=0.5, smooth=1.0) < 1500)
def test_bgmodel_rows(self):
models = [
rmgradient.BackgroundModel(image, 1.0, 0.1),
rmgradient.BackgroundModel(image, 1.0, 0.1, rows=10),
rmgradient.BackgroundModel(image, 1.0, 0.1, rows=2),
rmgradient.BackgroundModel(image, 1.0, 0.1, rows=1)]
bg = [model.run(image_points).tolist() for model in models]
self.assertEqual(bg[0], bg[1])
self.assertEqual(bg[0], bg[2])
self.assertEqual(bg[0], bg[3])
class TestGradientRemove(unittest.TestCase):
def test_rmgradient(self):
model = rmgradient.BackgroundModel(image, 1.0)
bg = model.run(image_points)
res = rmgradient.GradientRemove(image, bg).run()
self.assertTrue(res.min() > 0) # no clipped data
# no real check on the output
if __name__ == '__main__':
logging.basicConfig(level=100) # deactivate logging
unittest.main()
| 47.241135 | 120 | 0.67062 |
dc7b6bf696acd2a1fb5efabd7b446e7fbc6c30f5 | 617 | py | Python | nova/storage/__init__.py | bopopescu/nova-master | 58809056f3a219c6ea3667003f906eeaf581fa95 | [
"Apache-2.0"
] | 7 | 2017-06-19T19:37:00.000Z | 2019-06-16T02:06:14.000Z | nova/storage/__init__.py | bopopescu/nova-master | 58809056f3a219c6ea3667003f906eeaf581fa95 | [
"Apache-2.0"
] | null | null | null | nova/storage/__init__.py | bopopescu/nova-master | 58809056f3a219c6ea3667003f906eeaf581fa95 | [
"Apache-2.0"
] | 6 | 2015-06-20T16:07:28.000Z | 2020-08-19T14:57:59.000Z | # Copyright (c) 2013 Hewlett-Packard, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
| 44.071429 | 78 | 0.724473 |
9aa95555ae11852ac31b8c213593949da8d6f245 | 13,435 | py | Python | thonny/plugins/replayer.py | webduino-cn/thonny | 74da2278aa018eafec697c2b92e2355237669ecd | [
"MIT"
] | null | null | null | thonny/plugins/replayer.py | webduino-cn/thonny | 74da2278aa018eafec697c2b92e2355237669ecd | [
"MIT"
] | 13 | 2018-11-15T09:31:06.000Z | 2019-11-22T18:16:54.000Z | thonny/plugins/replayer.py | webduino-cn/thonny | 74da2278aa018eafec697c2b92e2355237669ecd | [
"MIT"
] | 3 | 2018-11-24T14:00:30.000Z | 2019-07-02T02:32:26.000Z | import ast
import json
import os.path
import tkinter as tk
from datetime import datetime
from tkinter import ttk
from thonny import codeview, get_workbench, ui_utils, THONNY_USER_DIR
from thonny.base_file_browser import BaseLocalFileBrowser
from thonny.plugins.coloring import SyntaxColorer
from thonny.ui_utils import lookup_style_option, CommonDialog
class ReplayWindow(CommonDialog):
def __init__(self):
super().__init__(get_workbench(), background=lookup_style_option("TFrame", "background"))
ui_utils.set_zoomed(self, True)
self.main_pw = ReplayerPanedWindow(self, orient=tk.HORIZONTAL, sashwidth=10)
self.center_pw = ReplayerPanedWindow(self.main_pw, orient=tk.VERTICAL, sashwidth=10)
self.right_frame = ttk.Frame(self.main_pw)
self.right_pw = ReplayerPanedWindow(self.right_frame, orient=tk.VERTICAL, sashwidth=10)
self.editor_notebook = ReplayerEditorNotebook(self.center_pw)
shell_book = ttk.Notebook(self.main_pw)
self.shell = ShellFrame(shell_book)
self.details_frame = EventDetailsFrame(self.right_pw)
self.log_frame = LogFrame(
self.right_pw, self.editor_notebook, self.shell, self.details_frame
)
self.browser = ReplayerFileBrowser(self.main_pw, self.log_frame)
self.control_frame = ControlFrame(self.right_frame)
self.main_pw.grid(padx=10, pady=10, sticky=tk.NSEW)
self.main_pw.add(self.browser, width=200)
self.main_pw.add(self.center_pw, width=1000)
self.main_pw.add(self.right_frame, width=200)
self.center_pw.add(self.editor_notebook, height=700)
self.center_pw.add(shell_book, height=300)
shell_book.add(self.shell, text="Shell")
self.right_pw.grid(sticky=tk.NSEW)
self.control_frame.grid(sticky=tk.NSEW)
self.right_pw.add(self.log_frame, height=600)
self.right_pw.add(self.details_frame, height=200)
self.right_frame.columnconfigure(0, weight=1)
self.right_frame.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
class ReplayerFileBrowser(BaseLocalFileBrowser):
def __init__(self, master, log_frame):
super().__init__(master, True)
self.log_frame = log_frame
self.configure(border=1, relief=tk.GROOVE)
user_logs_path = os.path.join(THONNY_USER_DIR, "user_logs")
if os.path.exists(user_logs_path):
self.focus_into(user_logs_path)
else:
self.focus_into(os.path.expanduser("~"))
def on_double_click(self, event):
# self.save_current_folder()
path = self.get_selected_path()
if path:
kind = self.get_selected_kind()
if kind == "dir":
self.focus_into(path)
else:
self.log_frame.load_log(path)
return "break" # avoid default action of opening the node
class ControlFrame(ttk.Frame):
def __init__(self, master, **kw):
ttk.Frame.__init__(self, master=master, **kw)
self.toggle_button = ttk.Button(self, text="Play")
self.speed_scale = ttk.Scale(self, from_=1, to=100, orient=tk.HORIZONTAL)
self.toggle_button.grid(row=0, column=0, sticky=tk.NSEW, pady=(10, 0), padx=(0, 5))
self.speed_scale.grid(row=0, column=1, sticky=tk.NSEW, pady=(10, 0), padx=(5, 0))
self.columnconfigure(1, weight=1)
class LogFrame(ui_utils.TreeFrame):
def __init__(self, master, editor_book, shell, details_frame):
ui_utils.TreeFrame.__init__(self, master, ("desc", "pause"))
self.tree.heading("desc", text="Event", anchor=tk.W)
self.tree.heading("pause", text="Pause (sec)", anchor=tk.W)
self.configure(border=1, relief=tk.GROOVE)
self.editor_notebook = editor_book
self.shell = shell
self.details_frame = details_frame
self.all_events = []
self.last_event_index = -1
self.loading = False
def load_log(self, filename):
self._clear_tree()
self.details_frame._clear_tree()
self.all_events = []
self.last_event_index = -1
self.loading = True
self.editor_notebook.reset()
self.shell.reset()
with open(filename, encoding="UTF-8") as f:
events = json.load(f)
last_event_time = None
for event in events:
node_id = self.tree.insert("", "end")
self.tree.set(node_id, "desc", event["sequence"])
if len(event["time"]) == 19:
# 0 fraction may have been skipped
event["time"] += ".0"
event_time = datetime.strptime(event["time"], "%Y-%m-%dT%H:%M:%S.%f")
if last_event_time:
delta = event_time - last_event_time
pause = delta.seconds
else:
pause = 0
self.tree.set(node_id, "pause", str(pause if pause else ""))
self.all_events.append(event)
last_event_time = event_time
self.loading = False
def replay_event(self, event):
"this should be called with events in correct order"
# print("log replay", event)
if "text_widget_id" in event:
if (
event.get("text_widget_context", None) == "shell"
or event.get("text_widget_class") == "ShellText"
):
self.shell.replay_event(event)
else:
self.editor_notebook.replay_event(event)
def reset(self):
self.shell.reset()
self.editor_notebook.reset()
self.last_event_index = -1
def on_select(self, event):
# parameter "event" is here tkinter event
if self.loading:
return
iid = self.tree.focus()
if iid != "":
self.select_event(self.tree.index(iid))
def select_event(self, event_index):
event = self.all_events[event_index]
self.details_frame.load_event(event)
# here event means logged event
if event_index > self.last_event_index:
# replay all events between last replayed event up to and including this event
while self.last_event_index < event_index:
self.replay_event(self.all_events[self.last_event_index + 1])
self.last_event_index += 1
elif event_index < self.last_event_index:
# Undo by reseting and replaying again
self.reset()
self.select_event(event_index)
class EventDetailsFrame(ui_utils.TreeFrame):
def __init__(self, master):
ui_utils.TreeFrame.__init__(self, master, columns=("attribute", "value"))
self.tree.heading("attribute", text="Attribute", anchor=tk.W)
self.tree.heading("value", text="Value", anchor=tk.W)
self.configure(border=1, relief=tk.GROOVE)
def load_event(self, event):
self._clear_tree()
for name in self.order_keys(event):
node_id = self.tree.insert("", "end")
self.tree.set(node_id, "attribute", name)
self.tree.set(node_id, "value", event[name])
def order_keys(self, event):
return event.keys()
class ReplayerCodeView(ttk.Frame):
def __init__(self, master):
ttk.Frame.__init__(self, master)
self.vbar = ttk.Scrollbar(self, orient=tk.VERTICAL)
self.vbar.grid(row=0, column=2, sticky=tk.NSEW)
self.hbar = ttk.Scrollbar(self, orient=tk.HORIZONTAL)
self.hbar.grid(row=1, column=0, sticky=tk.NSEW, columnspan=2)
self.text = codeview.SyntaxText(
self,
yscrollcommand=self.vbar.set,
xscrollcommand=self.hbar.set,
borderwidth=0,
font="EditorFont",
wrap=tk.NONE,
insertwidth=2,
# selectborderwidth=2,
inactiveselectbackground="gray",
# highlightthickness=0, # TODO: try different in Mac and Linux
# highlightcolor="gray",
padx=5,
pady=5,
undo=True,
autoseparators=False,
)
self.text.grid(row=0, column=1, sticky=tk.NSEW)
self.hbar["command"] = self.text.xview
self.vbar["command"] = self.text.yview
self.columnconfigure(1, weight=1)
self.rowconfigure(0, weight=1)
class ReplayerEditor(ttk.Frame):
def __init__(self, master):
ttk.Frame.__init__(self, master)
self.code_view = ReplayerCodeView(self)
self.code_view.grid(sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
def replay_event(self, event):
if event["sequence"] in ["TextInsert", "TextDelete"]:
if event["sequence"] == "TextInsert":
self.code_view.text.insert(
event["index"], event["text"], ast.literal_eval(event["tags"])
)
elif event["sequence"] == "TextDelete":
if event["index2"] and event["index2"] != "None":
self.code_view.text.delete(event["index1"], event["index2"])
else:
self.code_view.text.delete(event["index1"])
self.see_event(event)
def see_event(self, event):
for key in ["index", "index1", "index2"]:
if key in event and event[key] and event[key] != "None":
self.code_view.text.see(event[key])
def reset(self):
self.code_view.text.delete("1.0", "end")
class ReplayerEditorProper(ReplayerEditor):
def __init__(self, master):
ReplayerEditor.__init__(self, master)
self.set_colorer()
def set_colorer(self):
self.colorer = SyntaxColorer(self.code_view.text)
def replay_event(self, event):
ReplayerEditor.replay_event(self, event)
# TODO: some problem when doing fast rewind
# self.colorer.notify_range("1.0", "end")
def reset(self):
ReplayerEditor.reset(self)
self.set_colorer()
class ReplayerEditorNotebook(ttk.Notebook):
def __init__(self, master):
ttk.Notebook.__init__(self, master, padding=0)
self._editors_by_text_widget_id = {}
def clear(self):
for child in self.winfo_children():
child.destroy()
self._editors_by_text_widget_id = {}
def get_editor_by_text_widget_id(self, text_widget_id):
if text_widget_id not in self._editors_by_text_widget_id:
editor = ReplayerEditorProper(self)
self.add(editor, text="<untitled>")
self._editors_by_text_widget_id[text_widget_id] = editor
return self._editors_by_text_widget_id[text_widget_id]
def replay_event(self, event):
if "text_widget_id" in event:
editor = self.get_editor_by_text_widget_id(event["text_widget_id"])
# print(event.editor_id, id(editor), event)
self.select(editor)
editor.replay_event(event)
if "filename" in event:
self.tab(editor, text=os.path.basename(event["filename"]))
def reset(self):
for editor in self.winfo_children():
self.forget(editor)
editor.destroy()
self._editors_by_text_widget_id = {}
class ShellFrame(ReplayerEditor):
def __init__(self, master):
ReplayerEditor.__init__(self, master)
# TODO: use same source as shell
vert_spacing = 10
io_indent = 16
self.code_view.text.tag_configure("toplevel", font="EditorFont")
self.code_view.text.tag_configure("prompt", foreground="purple", font="BoldEditorFont")
self.code_view.text.tag_configure("command", foreground="black")
self.code_view.text.tag_configure("version", foreground="DarkGray")
self.code_view.text.tag_configure("automagic", foreground="DarkGray")
self.code_view.text.tag_configure(
"value", foreground="DarkBlue"
) # TODO: see also _text_key_press and _text_key_release
self.code_view.text.tag_configure("error", foreground="Red")
self.code_view.text.tag_configure(
"io", lmargin1=io_indent, lmargin2=io_indent, rmargin=io_indent, font="IOFont"
)
self.code_view.text.tag_configure("stdin", foreground="Blue")
self.code_view.text.tag_configure("stdout", foreground="Black")
self.code_view.text.tag_configure("stderr", foreground="Red")
self.code_view.text.tag_configure("hyperlink", foreground="#3A66DD", underline=True)
self.code_view.text.tag_configure("vertically_spaced", spacing1=vert_spacing)
self.code_view.text.tag_configure("inactive", foreground="#aaaaaa")
class ReplayerPanedWindow(tk.PanedWindow):
def __init__(self, master=None, cnf={}, **kw):
cnf = cnf.copy()
cnf.update(kw)
cnf["background"] = lookup_style_option("TFrame", "background")
super().__init__(master=master, cnf=cnf)
def load_plugin() -> None:
def open_replayer():
win = ReplayWindow()
ui_utils.show_dialog(win)
get_workbench().set_default("tools.replayer_last_browser_folder", None)
if get_workbench().get_ui_mode() == "expert":
get_workbench().add_command(
"open_replayer", "tools", _("Open replayer..."), open_replayer, group=110
)
| 36.808219 | 97 | 0.627019 |
c0d9a88e13760e8cf59883c3a44871d1fe05b804 | 917 | py | Python | api/app/models/Product.py | ValerianThomas/Titanic_kaggle_in_production | 45cff05b32a0193f8e75f37a151c2a588c927a03 | [
"MIT"
] | null | null | null | api/app/models/Product.py | ValerianThomas/Titanic_kaggle_in_production | 45cff05b32a0193f8e75f37a151c2a588c927a03 | [
"MIT"
] | null | null | null | api/app/models/Product.py | ValerianThomas/Titanic_kaggle_in_production | 45cff05b32a0193f8e75f37a151c2a588c927a03 | [
"MIT"
] | null | null | null | from marshmallow import fields, Schema
from .. import db
class Product (db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True)
dscription = db.Column(db.String(200))
price = db.Column(db.Float)
qty = db.Column(db.Integer)
def __init__(self, name, description, price, qty):
self.name = name
self.description = description
self.price = price
self.qty = qty
def save(self):
db.session.add(self)
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
def update(self, data):
for key, item in data.items():
setattr(self, key, item)
db.session.commit()
class ProductSchema(Schema):
id = fields.Int(dump_only=True)
name = fields.Str(required=True)
description = fields.Str(required=True)
qty = fields.Int(required=True) | 26.2 | 54 | 0.642312 |
e8469cd04bf7b012811f67e712a4d81b7745285b | 578 | py | Python | api/apps/users/migrations/0002_auto_20170216_1234.py | Bibliotecaio/biblioteca | 584268b7615f2be5f011fad09b472ee8a06914e0 | [
"MIT"
] | null | null | null | api/apps/users/migrations/0002_auto_20170216_1234.py | Bibliotecaio/biblioteca | 584268b7615f2be5f011fad09b472ee8a06914e0 | [
"MIT"
] | null | null | null | api/apps/users/migrations/0002_auto_20170216_1234.py | Bibliotecaio/biblioteca | 584268b7615f2be5f011fad09b472ee8a06914e0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-02-16 12:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='role',
field=models.CharField(choices=[('admin', 'Администратор'), ('editor', 'Редактор'), ('site_user', 'Обычный пользователь')], default='site_user', max_length=100, verbose_name='Роль'),
),
]
| 27.52381 | 194 | 0.624567 |
fd5944b095f1dde8522b8cedba321a5d8e4a3284 | 4,163 | py | Python | quizzes/00.organize.me/Cracking the Coding Interview/B_quickSort_comp.py | JiniousChoi/encyclopedia-in-code | 77bc551a03a2a3e3808e50016ece14adb5cfbd96 | [
"MIT"
] | 2 | 2018-07-20T10:15:49.000Z | 2018-07-20T10:16:54.000Z | quizzes/00.organize.me/Cracking the Coding Interview/B_quickSort_comp.py | JiniousChoi/encyclopedia-in-code | 77bc551a03a2a3e3808e50016ece14adb5cfbd96 | [
"MIT"
] | 2 | 2018-06-26T09:12:44.000Z | 2019-12-18T00:09:14.000Z | quizzes/00.organize.me/Cracking the Coding Interview/B_quickSort_comp.py | JiniousChoi/encyclopedia-in-code | 77bc551a03a2a3e3808e50016ece14adb5cfbd96 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
def quicksort_wrapper(arr, compare):
quicksort ( arr, 0, len(arr)-1, compare)
def quicksort(arr, start_idx, end_idx, compare):
#arr has no element
if end_idx - start_idx < 0:
return
#this part is not necessary.
#it just prevent one more needless recursive in-step
#arr has 1 element
elif end_idx - start_idx == 0:
#practically sorted
return
#From this part, arr has more than 2 elements
#Initialize variables
the_idx = start_idx
the_val = arr[start_idx]
p = start_idx + 1
q = end_idx
###
###part1: bi-parting the given arr against pivot value
###
while p < q :
#if arr[p] <= the_val <= arr[q]:
if (compare(arr[p], the_val)<=0) and (compare(the_val,arr[q])<=0):
p+=1
q-=1
#right p, wrong q
#elif arr[p] <= the_val and the_val > arr[q] :
elif (compare(arr[p], the_val)<=0) and (compare(the_val,arr[q])>0):
p+=1
#wrong p, right q
#elif arr[p] > the_val and the_val <= arr[q]:
elif (compare(arr[p], the_val)>0) and (compare(the_val,arr[q])<=0):
q-=1
#both wrong
#elif arr[p] > the_val > arr[q]:
elif (compare(arr[p], the_val)>0) and (compare(the_val,arr[q])>0):
arr[p], arr[q] = arr[q], arr[p]
p+=1
p-=1
#to minimize logical bug
else:
print the_val, arr[p], arr[q]
assert(False)
###
###part2: put pivot value in between the bi-parts
###
# case1: completely bi-parted
if q + 1 == p:
arr[the_idx], arr[q] = arr[q], arr[the_idx]
the_idx = q
#part1_start_idx = start_idx
#part1_end_idx = q-1
## pivot_dix == q
#part2_start_idx = p
#part2_end_idx = end_idx
# case2: arr[p] element should be sorted
elif p == q:
#if the_val >= arr[p]:
if compare(the_val, arr[p])>=0:
arr[the_idx], arr[p] = arr[p], arr[the_idx]
the_idx = p
else:
arr[the_idx], arr[p-1] = arr[p-1], arr[the_idx]
the_idx = p-1
else:
assert(False)
quicksort( arr, start_idx, the_idx-1, compare )
quicksort( arr, the_idx+1, end_idx, compare )
from random import randint
def int_testcases_generator(minCnt=10, maxCnt=150, minInt=-10, maxInt=10):
print 'int_testcases_generator called'
testcases = []
for i in range(5):
testcases.append([])
for j in range(randint( minCnt, maxCnt )):
testcases[i].append( randint( minInt, maxInt ) )
return testcases
def str_testcases_generator(wordsMinCnt=10, wordsMaxCnt=10, charMinCnt=5, charMaxCnt=10):
sample_str= 'ABCDEFGHIJKLMNOPQRSTUVWXYZ!abcdefghijklmnopqrstuvwxyz'
sample_str_len=len(sample_str)
testcases=[]
set_count = 5
for i in range(set_count):
testcases.append([])
for testcase in testcases:
for word_cnt in range(randint(wordsMinCnt, wordsMaxCnt)):
temp_str=''
for char_cnt in range(randint(charMinCnt, charMaxCnt)):
sample_ch = sample_str[randint(0,sample_str_len-1)]
temp_str += sample_ch
testcase.append(temp_str)
return testcases
#callback functions definitions
def compare_int_incr(a,b):
return a-b
def compare_int_decr(a,b):
return compare_int_incr(b,a)
def compare_strlen_incr(s1,s2):
return len(s2)-len(s1)
def compare_strlen_decr(s1,s2):
return compare_strlen_incr(s1,s2)
def compare_str_incr(s1,s2):
if s1>s2: return 1
elif s1==s2: return 0
else: return -1
def compare_str_decr(s1,s2):
return compare_str_incr(s2,s1)
if __name__=='__main__':
#testcases = int_testcases_generator(10, 10, -10, 10)
testcases = str_testcases_generator(10, 10, 10, 10)
for testcase in testcases:
print testcase, '->',
quicksort_wrapper(testcase, compare_str_decr)
print testcase
| 27.753333 | 89 | 0.576027 |
230e696b17532fa772b1bfc645df4e0dce425242 | 1,157 | py | Python | cffex/cffex/spiders/cffex.py | rahrahr/CrawlCffex | 419a42c0fa70d9c04adcc19a01020011dc2acd56 | [
"MIT"
] | null | null | null | cffex/cffex/spiders/cffex.py | rahrahr/CrawlCffex | 419a42c0fa70d9c04adcc19a01020011dc2acd56 | [
"MIT"
] | null | null | null | cffex/cffex/spiders/cffex.py | rahrahr/CrawlCffex | 419a42c0fa70d9c04adcc19a01020011dc2acd56 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import scrapy
from cffex.items import CffexItem
import datetime
class CffexSpider(scrapy.Spider):
name = 'cffex'
start_urls = ['http://www.cffex.com.cn/lssjxz/']
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36', 'encoding': 'unicode'
}
endYear = datetime.datetime.now().year
endMonth = datetime.datetime.now().month
def parse(self, response):
item = CffexItem()
def f(x): return '0' + str(x) if x < 10 else str(x)
start_urls = ['http://www.cffex.com.cn/sj/historysj/2010' +
f(i) + '/zip/2010' + f(i) + '.zip' for i in range(4, 13)]
for year in range(2011, self.endYear + 1):
start_urls += ['http://www.cffex.com.cn/sj/historysj/' + str(year) +
f(i) + '/zip/' + str(year) + f(i) + '.zip' for i in range(1, 13)]
for url in start_urls:
item['file_url'] = url
yield item
if url.split('/')[-1] == '{}{}.zip'.format(self.endYear,self.endMonth):
break
| 38.566667 | 161 | 0.554019 |
57b31d6c3b21d5e2ab256afd9ab3fd595c4be4d8 | 1,612 | py | Python | friend.py | sharyar/friend-dir | d6a5e77157b2e2d50ba23bd981a8e2c60efcb781 | [
"MIT"
] | null | null | null | friend.py | sharyar/friend-dir | d6a5e77157b2e2d50ba23bd981a8e2c60efcb781 | [
"MIT"
] | null | null | null | friend.py | sharyar/friend-dir | d6a5e77157b2e2d50ba23bd981a8e2c60efcb781 | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from datetime import date
@dataclass
class Friend:
fname : str = None
lname : str = None
phone : str = None
birthdate: date = None
def __post_init__(self):
if self.birthdate != None:
self.birthdate = date.fromisoformat(self.birthdate)
self.last_contact = date.today()
def __str__(self) -> str:
return (f'{self.fname} {self.lname} - {self.phone}. '
f'Born on {self.birthdate.strftime("%B-%d-%Y")}. '
f'Last Contacted: {self.last_contact.strftime("%B-%d-%Y")}.')
def set_last_contacted_date(self, date_contact = date.today()):
self.last_contact = date_contact
def __eq__(self, o: object) -> bool:
if not isinstance(o, Friend):
raise ValueError('Can not compare a non-friend object')
return (self.fname == o.fname and self.lname == o.lname and self.birthdate == o.birthdate)
class FriendList:
def __init__(self, friends = []) -> None:
super().__init__()
self.friends = friends
def add_friend_to_list(self, friend):
if friend in self.friends:
print('A duplicate may exist. Please check list first.')
else:
self.friends.append(friend)
def delete_friend(self, friend):
if friend in self.friends:
self.friends.remove(friend)
print('Deleted')
def __str__(self) -> str:
return str(list(friend.fname + ' ' + friend.lname for friend in self.friends)) | 31.607843 | 98 | 0.585608 |
d9978c23fabd0bf26a9cec7ead776bb8318494db | 1,374 | py | Python | ooobuild/dyn/xml/crypto/xxml_signature.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/xml/crypto/xxml_signature.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/xml/crypto/xxml_signature.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.xml.crypto
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.xml.crypto import XXMLSignature as XXMLSignature
setattr(XXMLSignature, '__ooo_ns__', 'com.sun.star.xml.crypto')
setattr(XXMLSignature, '__ooo_full_ns__', 'com.sun.star.xml.crypto.XXMLSignature')
setattr(XXMLSignature, '__ooo_type_name__', 'interface')
else:
from ....lo.xml.crypto.xxml_signature import XXMLSignature as XXMLSignature
__all__ = ['XXMLSignature']
| 37.135135 | 86 | 0.766376 |
f89c280656480bd7b6c8defa2846a9ce6c8364cd | 25 | py | Python | cride/circles/views/__init__.py | mariogonzcardona/platzi-cride | 40da9489de8339816dcb18db59f46daa851f6236 | [
"MIT"
] | null | null | null | cride/circles/views/__init__.py | mariogonzcardona/platzi-cride | 40da9489de8339816dcb18db59f46daa851f6236 | [
"MIT"
] | 1 | 2020-05-28T18:31:48.000Z | 2020-05-28T18:31:48.000Z | cride/circles/views/__init__.py | mariogonzcardona/platzi-cride | 40da9489de8339816dcb18db59f46daa851f6236 | [
"MIT"
] | null | null | null | # from .circles import *
| 12.5 | 24 | 0.68 |
1124e5aa4e2cc0af4ca1a3903ad3056321e6d44e | 1,898 | py | Python | setup.py | renovate-tests/cs251-toolkit | fc1dbc85e04083116a985ab1bd5314a60125f038 | [
"MIT"
] | null | null | null | setup.py | renovate-tests/cs251-toolkit | fc1dbc85e04083116a985ab1bd5314a60125f038 | [
"MIT"
] | null | null | null | setup.py | renovate-tests/cs251-toolkit | fc1dbc85e04083116a985ab1bd5314a60125f038 | [
"MIT"
] | null | null | null | import sys
from setuptools import setup, find_packages
if sys.version_info < (3, 5):
sys.exit("The toolkit requires Python 3.5 or greater.\nYou have {}".format(sys.version_info))
setup(
name='cs251tk',
version='2.5.0',
description='The CS251 (Software Design) Toolkit',
author='Hawken Rives',
author_email='[email protected]',
url='https://github.com/stodevx/cs251-toolkit',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='stolaf course-tooling',
install_requires=[
'PyYAML == 3.*',
'requests >= 2.20.*',
'termcolor == 1.*',
'natsort == 5.0.*',
'appdirs == 1.4.*',
'python-dateutil == 2.7.*'
],
tests_require=['tox'],
packages=find_packages(exclude=['tests', 'docs']),
# see http://python-packaging.readthedocs.io/en/latest/command-line-scripts.html
entry_points={
'console_scripts': [
'cs251tk=cs251tk.toolkit.__main__:main',
'referee=cs251tk.referee.__main__:main',
],
},
)
| 33.892857 | 97 | 0.602213 |
6acc708d8f95a62cd3a468300bf2eab3d6c8a0db | 816 | py | Python | features/steps/mark.py | rvodden/minisculus | 097f0be1e061c1e313d929e1d71c17c2a402d71c | [
"MIT"
] | null | null | null | features/steps/mark.py | rvodden/minisculus | 097f0be1e061c1e313d929e1d71c17c2a402d71c | [
"MIT"
] | null | null | null | features/steps/mark.py | rvodden/minisculus | 097f0be1e061c1e313d929e1d71c17c2a402d71c | [
"MIT"
] | null | null | null | from behave import given
from behave.runner import Context
from minisculus import MarkOne, MarkTwo
@given("a mark one machine with its wheel set to {}")
def a_mark_one_machine_with_its_wheel_set_to(context: Context, wheel_value: int):
"""
Args:
context: The feature context.
wheel_value: The value of the wheel.
"""
context.mark = MarkOne(wheel_value)
@given("a mark two machine with its wheels set to {} and {}")
def a_mark_two_machine_with_its_wheels_set_to_and(
context: Context, wheel1_value: int, wheel2_value: int
):
"""
Args:
context: The feature context.
wheel1_value: The value the first wheel should be set to.
wheel2_value: The value the second wheel should be set to.
"""
context.mark = MarkTwo(wheel1_value, wheel2_value)
| 29.142857 | 81 | 0.703431 |
890cc2af4dd286f050f998f73df0709ed4b1aa87 | 78 | py | Python | server/cleanIpCore/consts.py | Rexarrior/NetworkUtility | 37ebe95aa46462ab5fe2dfe83320c95fe404abd3 | [
"Apache-2.0"
] | null | null | null | server/cleanIpCore/consts.py | Rexarrior/NetworkUtility | 37ebe95aa46462ab5fe2dfe83320c95fe404abd3 | [
"Apache-2.0"
] | 3 | 2022-02-13T15:00:05.000Z | 2022-02-27T05:56:34.000Z | server/cleanIpCore/consts.py | Rexarrior/NetworkUtility | 37ebe95aa46462ab5fe2dfe83320c95fe404abd3 | [
"Apache-2.0"
] | null | null | null |
S_BASE_IP = '0.0.0.0'
S_MIN_PORT = 8001
S_MAX_PORT = 9001
S_MAX_COUNT = 1000
| 13 | 21 | 0.717949 |
c6b835dcfb7fc8904c662a0272b4ffd6884e763b | 2,850 | py | Python | Lib/site-packages/chainer/functions/array/where.py | km-t/dcpython | c0fcd5557691004d7d9d22a662d90e52ecc5f34f | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | Lib/site-packages/chainer/functions/array/where.py | km-t/dcpython | c0fcd5557691004d7d9d22a662d90e52ecc5f34f | [
"CNRI-Python-GPL-Compatible"
] | 11 | 2020-01-28T22:49:05.000Z | 2022-03-11T23:50:27.000Z | Lib/site-packages/chainer/functions/array/where.py | km-t/dcpython | c0fcd5557691004d7d9d22a662d90e52ecc5f34f | [
"CNRI-Python-GPL-Compatible"
] | null | null | null | import numpy
import chainer
from chainer import backend
from chainer import function_node
from chainer.utils import type_check
class Where(function_node.FunctionNode):
"""Choose elements depending on condition."""
def __init__(self, condition):
self.condition = condition
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
x_type, y_type = in_types
condition = self.condition
type_check.expect(
condition.dtype == numpy.bool_,
x_type.dtype == y_type.dtype,
)
type_check.expect_broadcast_shapes(
condition.shape, x_type.shape, y_type.shape)
def forward(self, inputs):
# may broadcast
xp = backend.get_array_module(*inputs)
x, y = inputs
condition = self.condition
return xp.where(condition, x, y),
def backward(self, indexes, grad_outputs):
condition = self.condition
xp = backend.get_array_module(condition)
g, = grad_outputs
zero = xp.zeros((), dtype=g.dtype)
ret = []
if 0 in indexes:
gx, = Where(condition).apply((g, zero))
ret.append(chainer.functions.sum_to(gx, self.inputs[0].shape))
if 1 in indexes:
gy, = Where(condition).apply((zero, g))
ret.append(chainer.functions.sum_to(gy, self.inputs[1].shape))
return ret
def where(condition, x, y):
"""Choose elements depending on condition.
This function choose values depending on a given ``condition``.
All ``condition``, ``x``, and ``y`` must have the same shape.
Args:
condition (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable containing the condition.
A :math:`(s_1, s_2, ..., s_N)` -shaped boolean array.
Only boolean array is permitted.
x (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable chosen when ``condition`` is ``True``.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
y (:class:`~chainer.Variable` or :ref:`ndarray`):
Input variable chosen when ``condition`` is ``False``.
A :math:`(s_1, s_2, ..., s_N)` -shaped float array.
Returns:
~chainer.Variable: Variable containing chosen values.
.. admonition:: Example
>>> cond = np.array([[1, 0], [0, 1]], dtype=np.bool)
>>> cond
array([[ True, False],
[False, True]])
>>> x = np.array([[1, 2], [3, 4]], np.float32)
>>> y = np.zeros((2, 2), np.float32)
>>> F.where(cond, x, y).data
array([[1., 0.],
[0., 4.]], dtype=float32)
"""
if isinstance(condition, chainer.Variable):
condition = condition.array
z, = Where(condition).apply((x, y))
return z
| 32.022472 | 74 | 0.581404 |
5fd6b4fd3380ddf6d390e4a47ce8bebd32e17a4f | 5,095 | py | Python | algorithm/utils/img_utils.py | danromuald/sagemaker-pytorch-neural-style | 19699d998b45c13f37820f76924bacc8ef6185a4 | [
"MIT"
] | null | null | null | algorithm/utils/img_utils.py | danromuald/sagemaker-pytorch-neural-style | 19699d998b45c13f37820f76924bacc8ef6185a4 | [
"MIT"
] | null | null | null | algorithm/utils/img_utils.py | danromuald/sagemaker-pytorch-neural-style | 19699d998b45c13f37820f76924bacc8ef6185a4 | [
"MIT"
] | null | null | null | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## ECE Department, Rutgers University
## Email: [email protected]
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import os
import numpy as np
import torch
from PIL import Image
from torch.autograd import Variable
from io import BytesIO
import base64
def tensor_load_rgbimage(filename, size=None, scale=None, keep_asp=False):
img = Image.open(filename).convert('RGB')
if size is not None:
if keep_asp:
size2 = int(size * 1.0 / img.size[0] * img.size[1])
img = img.resize((size, size2), Image.ANTIALIAS)
else:
img = img.resize((size, size), Image.ANTIALIAS)
elif scale is not None:
img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.ANTIALIAS)
img = np.array(img).transpose(2, 0, 1)
img = torch.from_numpy(img).float()
return img
def tensor_load_inference_img(img, size=None, scale=None, keep_asp=False):
if size is not None:
if keep_asp:
size2 = int(size * 1.0 / img.size[0] * img.size[1])
img = img.resize((size, size2), Image.ANTIALIAS)
else:
img = img.resize((size, size), Image.ANTIALIAS)
elif scale is not None:
img = img.resize(
(int(img.size[0] / scale), int(img.size[1] / scale)), Image.ANTIALIAS)
img = np.array(img).transpose(2, 0, 1)
img = torch.from_numpy(img).float()
return img
def tensor_make_inference_img_str(tensor, cuda=True):
if cuda:
img = tensor.clone().cpu().clamp(0, 255).numpy()
else:
img = tensor.clone().clamp(0, 255).numpy()
img = img.transpose(1, 2, 0).astype('uint8')
img = Image.fromarray(img)
buff = BytesIO()
img.save(buff, format='JPEG')
img_str = base64.b64encode(buff.getvalue())
return "data:image/jpeg;base64," + img_str.decode('utf-8')
def tensor_save_rgbimage(tensor, filename, cuda=False):
if cuda:
img = tensor.clone().cpu().clamp(0, 255).numpy()
else:
img = tensor.clone().clamp(0, 255).numpy()
img = img.transpose(1, 2, 0).astype('uint8')
img = Image.fromarray(img)
img.save(filename)
def tensor_save_bgrimage(tensor, filename, cuda=False):
(b, g, r) = torch.chunk(tensor, 3)
tensor = torch.cat((r, g, b))
tensor_save_rgbimage(tensor, filename, cuda)
def gram_matrix(y):
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram
def subtract_imagenet_mean_batch(batch):
"""Subtract ImageNet mean pixel-wise from a BGR image."""
tensortype = type(batch.data)
mean = tensortype(batch.data.size())
mean[:, 0, :, :] = 103.939
mean[:, 1, :, :] = 116.779
mean[:, 2, :, :] = 123.680
return batch - Variable(mean)
def add_imagenet_mean_batch(batch):
"""Add ImageNet mean pixel-wise from a BGR image."""
tensortype = type(batch.data)
mean = tensortype(batch.data.size())
mean[:, 0, :, :] = 103.939
mean[:, 1, :, :] = 116.779
mean[:, 2, :, :] = 123.680
return batch + Variable(mean)
def imagenet_clamp_batch(batch, low, high):
batch[:, 0, :, :].data.clamp_(low - 103.939, high - 103.939)
batch[:, 1, :, :].data.clamp_(low - 116.779, high - 116.779)
batch[:, 2, :, :].data.clamp_(low - 123.680, high - 123.680)
def preprocess_batch(batch):
batch = batch.transpose(0, 1)
(r, g, b) = torch.chunk(batch, 3)
batch = torch.cat((b, g, r))
batch = batch.transpose(0, 1)
return batch
class StyleLoader():
def __init__(self, style_folder, style_size, cuda=True):
self.folder = style_folder
self.style_size = style_size
self.files = os.listdir(style_folder)
self.cuda = cuda
def get(self, i):
idx = i % len(self.files)
filepath = os.path.join(self.folder, self.files[idx])
style = tensor_load_rgbimage(filepath, self.style_size)
style = style.unsqueeze(0)
style = preprocess_batch(style)
if self.cuda:
style = style.cuda()
style_v = Variable(style, requires_grad=False)
return style_v
def size(self):
return len(self.files)
class InferenceStyleLoader():
def __init__(self, style_folder, style_fname, style_size, cuda=True):
self.folder = style_folder
self.style_size = style_size
self.style_fname = style_fname
self.cuda = cuda
def get(self):
filepath = os.path.join(self.folder, self.style_fname)
style = tensor_load_rgbimage(filepath, self.style_size)
style = style.unsqueeze(0)
style = preprocess_batch(style)
if self.cuda:
style = style.cuda()
style_v = Variable(style, requires_grad=False)
return style_v
| 31.067073 | 95 | 0.603337 |
55511ae80d3da305a2a40ae768ef99ea9e106a54 | 1,294 | py | Python | fixture/session.py | daryarudi/python_training | a9b85bc33a21d1fc23c4a1701e0886cdb909d9b2 | [
"Apache-2.0"
] | null | null | null | fixture/session.py | daryarudi/python_training | a9b85bc33a21d1fc23c4a1701e0886cdb909d9b2 | [
"Apache-2.0"
] | null | null | null | fixture/session.py | daryarudi/python_training | a9b85bc33a21d1fc23c4a1701e0886cdb909d9b2 | [
"Apache-2.0"
] | null | null | null | class SessionHelper:
def __init__(self, app):
self.app = app
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath("//input[@value='Login']").click()
def ensure_login(self, username, password):
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password)
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
wd.find_element_by_name("user")
def ensure_logout(self):
if self.is_logged_in():
self.logout()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def is_logged_in_as(self, username):
wd = self.app.wd
return self.get_logged_user() == username
def get_logged_user(self):
wd = self.app.wd
return wd.find_element_by_xpath("//div/div[1]/form/b").text[1:-1]
| 30.093023 | 73 | 0.608964 |
146968526a84184862201a5d9f4788b6e828abb3 | 2,051 | py | Python | library_collection/migrations/0002_add_note_fields.py | amywieliczka/avram | 1956130fcde464acf3ed00432a597dd857647e91 | [
"Unlicense"
] | null | null | null | library_collection/migrations/0002_add_note_fields.py | amywieliczka/avram | 1956130fcde464acf3ed00432a597dd857647e91 | [
"Unlicense"
] | 5 | 2015-02-17T18:53:27.000Z | 2020-12-09T22:36:41.000Z | library_collection/migrations/0002_add_note_fields.py | amywieliczka/avram | 1956130fcde464acf3ed00432a597dd857647e91 | [
"Unlicense"
] | 1 | 2022-02-25T15:22:40.000Z | 2022-02-25T15:22:40.000Z | # -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('library_collection', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='collection',
name='date_last_harvested',
field=models.DateField(null=True, blank=True),
),
migrations.AddField(
model_name='collection',
name='harvest_exception_notes',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='collection',
name='harvest_frequency',
field=models.DurationField(null=True, blank=True),
),
migrations.AlterField(
model_name='collection',
name='files_in_dams',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='collection',
name='files_in_hand',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='collection',
name='formats',
field=models.ManyToManyField(help_text=b'File formats for DAMS ingest', to='library_collection.Format', blank=True),
),
migrations.AlterField(
model_name='collection',
name='metadata_in_dams',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='collection',
name='qa_completed',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='collection',
name='repository',
field=models.ManyToManyField(to='library_collection.Repository', verbose_name=b'Unit', blank=True),
),
migrations.AlterField(
model_name='repository',
name='campus',
field=models.ManyToManyField(to='library_collection.Campus', blank=True),
),
]
| 32.555556 | 128 | 0.577279 |
147e084f6badd2ef58552945d5d1824c34fa803a | 2,749 | py | Python | config/rfam_local_template.py | mb1069/rfam-production | 10c76e249dc22d30862b3a873fd54f390e859ad8 | [
"Apache-2.0"
] | 1 | 2020-01-14T12:12:46.000Z | 2020-01-14T12:12:46.000Z | config/rfam_local_template.py | mb1069/rfam-production | 10c76e249dc22d30862b3a873fd54f390e859ad8 | [
"Apache-2.0"
] | null | null | null | config/rfam_local_template.py | mb1069/rfam-production | 10c76e249dc22d30862b3a873fd54f390e859ad8 | [
"Apache-2.0"
] | null | null | null | """
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# ---------------------------------GEN_CONFIG----------------------------------
RFAM_GPFS_LOC = ''
LOC_PATH = ''
GEN_DWLD_EXEC = ''
LSF_GROUPS_CMD = 'bgadd -L %s /rfam_gen/%s'
LSF_GEN_GROUP = '/rfam_gen'
USER_EMAIL = ''
# ------------------------------DATABASES--------------------------------------
# Databases
RFAMLIVEPUB = {
'user': '',
'pwd': '',
'host': '',
'db': '',
'port': '',
}
RFAMLIVE = {
'user': '',
'pwd': '',
'host': '',
'db': '',
'port': '',
}
RFAMLIVE_DJANGO = {
'USER': RFAMLIVE['user'],
'PASSWORD': RFAMLIVE['pwd'],
'HOST': RFAMLIVE['host'],
'NAME': RFAMLIVE['db'],
'PORT': RFAMLIVE['port'],
'ENGINE': 'django.db.backends.mysql',
}
RFAM12 = {
'user': '',
'pwd': '',
'host': '',
'db': '',
'port': '',
}
RFAMLIVELOC = {
'user': '',
'pwd': '',
'host': '',
'db': '',
'port': '',
}
# ----------------------------Django settings----------------------------------
# DATABASES
RFAMDEV = {
'ENGINE': 'django.db.backends.mysql',
'NAME': '',
'HOST': '',
'PORT': '',
'USER': '',
'PASSWORD': '',
}
RFAMLOC = {
'ENGINE': 'django.db.backends.mysql',
'NAME': '',
'HOST': '',
'PORT': '',
'USER': '',
'PASSWORD': '',
}
# SETTINGS
SECRET_KEY = 'change secret key in production'
# ----------------------------RFAM CONFIG PATHS--------------------------------
ESL_PATH = ''
FA_GEN = ''
RFAMSEQ_PATH = ''
FAM_VIEW_PL = ''
TMP_PATH = '/tmp'
ESL_FSEQ_PATH = ''
FSR_PATH = ''
FSR_LOCAL = ''
ENA_URL = 'http://www.ebi.ac.uk/ena/data/view/%s&display=fasta&range=%s-%s'
# Maybe delete these
TAX_NODES_DUMP = ''
TAX_NAMES_DUMP = ''
RFAM_NCBI_IDS = ''
VALID_NCBI_IDS = ''
NCBI_RANKS = ''
# -------------------------------LSF GROUPS------------------------------------
# rfamprod privileges required
FA_EXPORT_GROUP = '/rfam_fa'
RFAM_VIEW_GROUP = '/rfam_view'
# -----------------------------------------------------------------------------
if __name__ == '__main__':
pass
| 22.532787 | 80 | 0.484176 |
2db1b6c6816c58fe69da5d46f41860f5e7f3201b | 1,444 | py | Python | src/main/python/systemds/operator/algorithm/builtin/logSumExp.py | shiruke/systemds | cdd7b9ca15c3f17ec15045e85b107e26a4d7e7a7 | [
"Apache-2.0"
] | null | null | null | src/main/python/systemds/operator/algorithm/builtin/logSumExp.py | shiruke/systemds | cdd7b9ca15c3f17ec15045e85b107e26a4d7e7a7 | [
"Apache-2.0"
] | null | null | null | src/main/python/systemds/operator/algorithm/builtin/logSumExp.py | shiruke/systemds | cdd7b9ca15c3f17ec15045e85b107e26a4d7e7a7 | [
"Apache-2.0"
] | null | null | null | # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/logSumExp.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def logSumExp(M: OperationNode, **kwargs: Dict[str, VALID_INPUT_TYPES]):
params_dict = {'M':M}
params_dict.update(kwargs)
return Matrix(M.sds_context, 'logSumExp', named_input_nodes=params_dict)
| 38 | 76 | 0.703601 |
6835c4124dadd2b3f1b93ac3f4c22d14eaf48465 | 412 | py | Python | read_trace.py | SillyTin/erays | be8768f280de174400050afc267bb5f3987042d0 | [
"MIT"
] | 79 | 2018-08-17T19:02:09.000Z | 2022-03-29T21:17:43.000Z | read_trace.py | catageek/erayscan | 141e160879e79a752c8d321af0a43f707a1106e4 | [
"MIT"
] | 3 | 2018-08-29T16:40:07.000Z | 2021-03-20T02:47:54.000Z | read_trace.py | catageek/erayscan | 141e160879e79a752c8d321af0a43f707a1106e4 | [
"MIT"
] | 20 | 2018-08-20T21:05:20.000Z | 2022-03-02T15:15:20.000Z | import sys, json
line = open(sys.argv[1]).readline()
info = json.loads(line)
trace = info['result']['structLogs']
for step in trace:
if step['depth'] != 1:
continue
for i, item in enumerate(step['stack']):
print("$s%d:\t%s" % (i, hex(int(item, 16))[2:]))
for i in step['memory']:
print(i)
# print("".join(step['memory']))
print("-" * 32)
print(str(step['pc']) + "\t" + step['op'])
print("-" * 32)
| 22.888889 | 50 | 0.582524 |
d3a26d77e09f4e0bef46cb5ce6d8351db5f3d673 | 4,891 | py | Python | docs/conf.py | kwp-communications/jicket | f01325e4ac736c41962fe11c8ae5587a18c542ec | [
"MIT"
] | 6 | 2018-10-10T08:42:37.000Z | 2018-10-15T15:52:18.000Z | docs/conf.py | kwp-communications/jicket | f01325e4ac736c41962fe11c8ae5587a18c542ec | [
"MIT"
] | 1 | 2018-10-01T08:39:11.000Z | 2018-10-10T08:44:03.000Z | docs/conf.py | kwp-communications/jicket | f01325e4ac736c41962fe11c8ae5587a18c542ec | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Jicket'
copyright = '2018, KWP GmbH & Co. KG'
author = 'KWP GmbH & Co. KG'
# The full version, including alpha/beta/rc tags
with open("../VERSION", "r") as f:
release = f.read()
# The short X.Y version
version = release
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Jicketdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Jicket.tex', 'Jicket Documentation',
'KWP GmbH \\& Co. KG', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'jicket', 'Jicket Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Jicket', 'Jicket Documentation',
author, 'Jicket', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration ------------------------------------------------- | 30.56875 | 79 | 0.644245 |
15a6b941ba2b4647d7dc7f1dcf9a5dddfbefa70c | 2,082 | py | Python | mri_works/NodeEditor/python/classForProbe.py | montigno/mri_works | 8ec6ff1500aa34d3540e44e4b0148023cf821f61 | [
"CECILL-B"
] | 2 | 2020-08-20T21:00:53.000Z | 2021-08-16T15:28:51.000Z | mri_works/NodeEditor/python/classForProbe.py | montigno/mri_works | 8ec6ff1500aa34d3540e44e4b0148023cf821f61 | [
"CECILL-B"
] | 3 | 2020-09-24T06:50:43.000Z | 2020-12-15T11:02:04.000Z | mri_works/NodeEditor/python/classForProbe.py | montigno/mri_works | 8ec6ff1500aa34d3540e44e4b0148023cf821f61 | [
"CECILL-B"
] | 1 | 2020-08-20T21:00:59.000Z | 2020-08-20T21:00:59.000Z | from prompt_toolkit import print_formatted_text, ANSI
class printProbe():
def __init__(self, unit, lab, format, label, val):
if 'int' in format:
col = '\x1b[94m'
elif 'float' in format:
col = '\x1b[33m'
elif 'tuple' in format:
col = '\x1b[98m'
elif 'str' in format:
col = '\x1b[35m'
elif 'bool' in format:
col = '\x1b[92m'
elif 'path' in format:
col = '\x1b[91m'
elif 'dict' in format:
col = '\x1b[93m'
if label == 'Type':
tmpval = val
continued = True
if isinstance(tmpval, list):
if val:
if isinstance(tmpval, list):
while continued:
if isinstance(tmpval, list):
tmpval = tmpval[0]
else:
val = 'array of ' + type(tmpval).__name__
continued = False
else:
val = 'list of ' + type(tmpval).__name__
else:
val = type(tmpval).__name__
elif label == 'Length':
if isinstance(val, list):
if val:
tmptxt = '('
tmpval = val
continued = True
if isinstance(tmpval, list):
while continued:
if isinstance(tmpval, list):
tmptxt += str(len(tmpval))
tmpval = tmpval[0]
tmptxt += ', '
else:
continued = False
tmptxt = tmptxt[0:-2]+')'
else:
tmptxt = '1'
val = tmptxt
else:
val = '1'
print_formatted_text(ANSI(col+unit+'('+lab+')' + ' : ' + label+' = ' + str(val)))
| 34.131148 | 89 | 0.370797 |
7d8066edfe1ee7f7c81ac6e7a52838d7929dca84 | 1,510 | py | Python | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/jdfusion/models/EipAddress.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/jdfusion/models/EipAddress.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | python_code/vnev/Lib/site-packages/jdcloud_sdk/services/jdfusion/models/EipAddress.py | Ureimu/weather-robot | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | [
"MIT"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class EipAddress(object):
def __init__(self, cloudID=None, ipAddress=None, id=None, status=None, instanceType=None, instanceId=None, bandwidth=None, allocationTime=None):
"""
:param cloudID: (Optional) 云注册信息ID
:param ipAddress: (Optional) 公网IP地址
:param id: (Optional) 公网IP ID
:param status: (Optional) 状态
:param instanceType: (Optional) 当前绑定的实例类型
:param instanceId: (Optional) 当前绑定的实例ID
:param bandwidth: (Optional) EIP的带宽峰值,单位为Mbps
:param allocationTime: (Optional) EIP的创建时间
"""
self.cloudID = cloudID
self.ipAddress = ipAddress
self.id = id
self.status = status
self.instanceType = instanceType
self.instanceId = instanceId
self.bandwidth = bandwidth
self.allocationTime = allocationTime
| 35.952381 | 148 | 0.696026 |
9aee5367e305ec1e733f3ae8f04c07f50adde6e6 | 451 | py | Python | contest/abc144/D.py | mola1129/atcoder | 1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db | [
"MIT"
] | null | null | null | contest/abc144/D.py | mola1129/atcoder | 1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db | [
"MIT"
] | null | null | null | contest/abc144/D.py | mola1129/atcoder | 1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db | [
"MIT"
] | null | null | null | import math
def main():
a, b, x = map(int, input().split())
# 水平時の高さを求める
h = x / a ** 2
rad = 0
if h >= (b / 2):
# 高さが半分以上
# 水のない三角形部分の高さを求める
u = (a * a * b - x) * 2 / a ** 2
rad = math.atan(u / a)
else:
# 高さが半分以下
# 水部分の三角形の高さを求める
u = x / (b * a) * 2
rad = math.atan(b / u)
# ラジアンから度数へ変換
print(math.degrees(rad))
if __name__ == '__main__':
main()
| 18.04 | 40 | 0.439024 |
2ebe31b33e9645d999f468b593f5b563639d5cfb | 136 | py | Python | schema/setup.py | dafarz/base-service | 95791beac06c1ac58e0fa2050aa2cf3a3a22d8d7 | [
"MIT"
] | null | null | null | schema/setup.py | dafarz/base-service | 95791beac06c1ac58e0fa2050aa2cf3a3a22d8d7 | [
"MIT"
] | null | null | null | schema/setup.py | dafarz/base-service | 95791beac06c1ac58e0fa2050aa2cf3a3a22d8d7 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='schema',
packages=find_packages(),
install_requires=['pydantic']
) | 19.428571 | 43 | 0.720588 |
81d018b255615cd7b04c8c91410ae4765d2e3bc5 | 1,120 | py | Python | Machine_Learning/Project/Bryan/scratch/1P/LR.py | bnonni/Python | 9ebd18caa4e2d805028b557e8b77ea65a9ee1a3d | [
"Apache-2.0"
] | 4 | 2019-10-05T03:41:20.000Z | 2020-11-04T00:39:13.000Z | Machine_Learning/Project/Bryan/scratch/1P/LR.py | bnonni/Python | 9ebd18caa4e2d805028b557e8b77ea65a9ee1a3d | [
"Apache-2.0"
] | null | null | null | Machine_Learning/Project/Bryan/scratch/1P/LR.py | bnonni/Python | 9ebd18caa4e2d805028b557e8b77ea65a9ee1a3d | [
"Apache-2.0"
] | 2 | 2019-10-02T14:08:51.000Z | 2019-10-03T20:49:09.000Z | #!/usr/bin/env python3
import gc
import warnings
import gc, sys, re, os, math
from time import strptime, mktime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
gc.collect()
warnings.filterwarnings('ignore')
np.random.seed(1)
# %matplotlib inline
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, classification_report, roc_curve, roc_auc_score, auc, accuracy_score
import operator
def runLogisticRegression(e, X_train, y_train, X_test, y_test):
c = np.arange(1, e+1)
cma = {}
cra = {}
acc = {}
preds = {}
for i in c:
lr = LogisticRegression(C=i, multi_class='ovr', solver='lbfgs',random_state=0)
lr.fit(X_train, y_train)
y_pred = lr.predict(X_test)
cma[i] = confusion_matrix(y_test, y_pred)
cra[i] = classification_report(y_test, y_pred)
acc[i] = (accuracy_score(y_test, y_pred))
preds[i] = y_pred
ky = max(acc.items(), key=operator.itemgetter(1))[0]
val = float(max(acc.items(), key=operator.itemgetter(1))[1])
return cma, cra, acc, preds, ky, val | 33.939394 | 114 | 0.685714 |
59d0d39e3456700c34f0ffe28ace5f59fc3a0bb2 | 15,216 | py | Python | capriqorn/postproc/filter/solvent_matching.py | bio-phys/capriqorn | 8f514deaf301643a38a4d101e3ada1ae10a1abc6 | [
"CC-BY-4.0"
] | 8 | 2018-03-29T09:48:20.000Z | 2021-04-14T10:12:49.000Z | capriqorn/postproc/filter/solvent_matching.py | bio-phys/capriqorn | 8f514deaf301643a38a4d101e3ada1ae10a1abc6 | [
"CC-BY-4.0"
] | 5 | 2018-05-28T11:29:17.000Z | 2018-06-15T04:54:10.000Z | capriqorn/postproc/filter/solvent_matching.py | bio-phys/capriqorn | 8f514deaf301643a38a4d101e3ada1ae10a1abc6 | [
"CC-BY-4.0"
] | 1 | 2021-03-10T12:25:31.000Z | 2021-03-10T12:25:31.000Z | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# Capriqorn --- CAlculation of P(R) and I(Q) Of macRomolcules in solutioN
#
# Copyright (c) Juergen Koefinger, Klaus Reuter, and contributors.
# See the file AUTHORS.rst for the full list of contributors.
#
# Released under the GNU Public Licence, v2 or any higher version, see the file LICENSE.txt.
"""Capriqorn self-consistent solvent matching filter.
"""
from __future__ import division
from past.utils import old_div
from six.moves import range
import numpy as np
import scipy.interpolate as sint
from cadishi import base
from cadishi.io import hdf5
from ...lib import selection
from ...lib import rdf
class Solvent(base.Filter):
"""A filter that performs self-consistent solvent matching."""
_depends = []
_conflicts = []
def __init__(self, source=-1,
g_ascii_file="rdf.extended.dat",
g_match='O,O',
g_scaled_file=None, # optional: HDF5 file to read g_scaled from
g_plateau_fraction=0.3, # fraction of the distance range of the rdfs, where the rdfs are flat
# noise is reduced to g_noise_fraction at largest distance over a distance determined by g_plateau_fraction.
g_noise_fraction=1.,
debug=False,
verbose=False):
self.src = source
self.g_ascii_file = g_ascii_file
self.g_match = g_match
self.g_scaled_file = g_scaled_file
self.g_plateau_fraction = g_plateau_fraction
self.g_noise_fraction = g_noise_fraction
# --- Note: The following two parameters are obtained from the pipeline log!
self.geometry = None
self.x_particle_method = None
# ---
self.debug = debug
self.verb = verbose
# ---
self._depends.extend(super(base.Filter, self)._depends)
self._conflicts.extend(super(base.Filter, self)._conflicts)
def get_meta(self):
"""
Return information on the present filter,
ready to be added to a frame object's list of
pipeline meta information.
"""
meta = {}
label = 'SolventMatching'
param = {'g_ascii_file': self.g_ascii_file,
'g_match': self.g_match,
'g_scaled_file': self.g_scaled_file,
'g_plateau_fraction': self.g_plateau_fraction,
'g_noise_fraction': self.g_noise_fraction,
# --- we keep the following two entries for log purposes
'geometry': self.geometry,
'x_particle_method': self.x_particle_method}
meta[label] = param
return meta
def __iter__(self):
return self
def __next__(self):
""" Self-consistent solvent matching, implemented as a Python
generator. Implementation follows <lsz/shell-sc-solv-match.py>.
"""
for obj in next(self.src):
if obj is not None:
assert isinstance(obj, base.Container)
if self.g_scaled_file is not None:
# --- read g_scaled and rho from previous calculation ---
reader = hdf5.H5Reader(filename=self.g_scaled_file)
for frm in reader.next():
obj_g_scaled = frm
break
del reader
g_dict = obj_g_scaled.get_data(base.loc_solv_match + '/g_scaled')
rho_dict = obj_g_scaled.get_data(base.loc_solv_match + '/rho')
else:
# --- compute g_scaled and rho ---
# obtain information from the pipeline log
dr = obj.query_meta('histograms/histogram/dr')
assert (dr is not None)
# ---
self.geometry = obj.get_geometry()
assert (self.geometry is not None)
geometry_param = obj.query_meta(self.geometry)
assert (geometry_param is not None)
# ---
virtual_param = obj.query_meta('VirtualParticles')
if (virtual_param is not None):
self.x_particle_method = virtual_param['method']
xrho = virtual_param['x_density']
# --- obtain the shell volume from the pipeline log
if (self.geometry in ['Sphere', 'Cuboid', 'Ellipsoid']):
V_shell = geometry_param['shell_volume']
VSqr_shell = V_shell ** 2
elif (self.geometry == 'ReferenceStructure' or self.geometry == 'MultiReferenceStructure'):
# V_shell is determined below
V_shell = None
else:
raise NotImplementedError('Geometry ' + self.geometry +
'not implemented for self-consistent solvent matching')
# --- read and prepare g-function for matching
g_header = (rdf.readHeader(self.g_ascii_file)).rstrip('\n').split()
assert (self.g_match in g_header)
_g_el_set = set([])
for item in g_header:
if (item == '#'):
continue
pair = item.split(',')
_g_el_set.add(pair[0])
_g_el_set.add(pair[1]) # obsolete
g_elements = sorted(list(_g_el_set))
# ---
g_table_0 = np.loadtxt(self.g_ascii_file)
# TODO: Assert that histograms and rdfs have same bin size. Else, generate new rdf by interpolation.
g_dr = (g_table_0[1:, 0] - g_table_0[:-1, 0]).mean()
# print g_dr
# Tapers noise for self.g_noise_fraction<1. Determines and set ginfty in last bins.
g_table_0_smooth = rdf.smooth(g_table_0, g_dr, self.g_plateau_fraction,
self.g_noise_fraction, verb=False)
# np.savetxt("g_table_0_smooth.dat", g_table_0_smooth)
if (self.debug):
obj.put_data(base.loc_solv_match + '/g_table_0_smooth', g_table_0_smooth)
g_table_0 = g_table_0_smooth
_radii = obj.get_data(base.loc_histograms + '/radii')
# Extend rdf in distance AFTER noise tapering, where rdf values at largest distance are set to ginfty.
if _radii.shape[0] > g_table_0.shape[0]:
new_g_table = np.zeros((_radii.shape[0], g_table_0.shape[1]))
new_g_table[:g_table_0.shape[0], :] = g_table_0
new_g_table[:, 0] = _radii
tmp = g_table_0[-1, 1:]
new_g_table[g_table_0.shape[0]:, 1:] = tmp[np.newaxis, :]
g_table_0 = new_g_table
# np.savetxt("g_table_0_smooth_extended.dat", new_g_table)
if (self.debug):
obj.put_data(base.loc_solv_match + '/g_table_0_smooth_extended', new_g_table)
# if do_g_extension:
# g_dr_0 = g_table_0[-1, 0] - g_table_0[-2, 0]
# g_nr_0 = g_table_0.shape[0]
# g_nrow = g_extension_factor * g_nr_0
# g_ncol = g_table_0.shape[1]
# g_table = np.zeros((g_nrow, g_ncol))
# g_table[0:g_nr_0, :] = g_table_0[0:g_nr_0, :]
# for idx in range(g_nr_0, g_nrow):
# g_table[idx, 0] = g_table[idx - 1, 0] + g_dr_0
# g_table[idx, 1:] = g_table[idx - 1, 1:]
# else:
# g_table = g_table_0
g_table = g_table_0
# ---
assert (len(g_header) == g_table.shape[1])
g_idx = g_header.index(self.g_match)
g_org = g_table[:, [0, g_idx]]
if (self.debug):
obj.put_data(base.loc_solv_match + '/g_org', g_org)
rho_g_org = g_org[0, 1] # rho value stored at [0,1] (code by JK)
# --- split g_table into a dict holding individual arrays
g_dict = {}
g_dict['radii'] = g_table[:, 0]
for i in range(1, len(g_header)):
g_dict[g_header[i]] = g_table[:, i]
# --- calculate particle and density of the matching solvent
# get_shell also merges virtual particles X1 and X2
shell = selection.get_shell(obj)
# --- multiref: set properly (volume-weighted) averaged shell H_{xx}(r)
# if (virtual_param is not None):
if (virtual_param is not None and self.geometry == 'MultiReferenceStructure'):
shell.put_data(base.loc_histograms + "/X,X", obj.get_data(base.loc_shell_Hxx + "/X.s,X.s"))
# --- determine V_shell and VSqr_shell for the reference and multiref structure case
# JK: Can/should be moved to Average filter?
if (self.geometry == 'ReferenceStructure' or self.geometry == 'MultiReferenceStructure'):
nx = (shell.get_data(base.loc_nr_particles + '/X')).mean()
V_shell = old_div(nx, xrho)
VSqr_shell = V_shell ** 2
if self.geometry == 'MultiReferenceStructure':
nxSqr = ((shell.get_data(base.loc_nr_particles + '/X')) ** 2).mean()
VSqr_shell = old_div(nxSqr, xrho ** 2)
# ---
# print "###", self.g_match, shell.get_keys(base.loc_histograms)
assert (self.g_match in shell.get_keys(base.loc_histograms))
pair = self.g_match.split(',')
assert (pair[0] == pair[1])
assert (pair[0] in shell.get_keys(base.loc_nr_particles))
# print shell.particles[pair[0]]
n_match_avg = (shell.get_data(base.loc_nr_particles + '/' + pair[0])).mean()
rho_match = old_div(n_match_avg, V_shell)
# JK: Should we instead use <n_i/V_i> averaged over frames for multiref??
# Use SciPy interpolator object to operate on the
# reference g function. Warning: Linear interpolation!
g_int = sint.interp1d(g_org[:, 0], g_org[:, 1])
# --- solvent-matching calculation
_radii = obj.get_data(base.loc_histograms + '/radii')
pShell = np.zeros_like(_radii)
H = np.zeros_like(_radii)
gAct = np.zeros_like(_radii)
# ---
if (self.geometry == 'Sphere') and (self.x_particle_method is None):
R = geometry_param['radius']
sw = geometry_param['shell_width']
for i, r in enumerate(_radii):
pShell[i] = rdf.PSh(R - sw, R, r)
H[i] = pShell[i] * g_int(_radii[i])
else:
histgrms = shell.get_data(base.loc_histograms)
pShell = histgrms['X,X'].copy()
pShell /= pShell.sum()
pShell /= dr
for i, r in enumerate(_radii):
if (pShell[i] > 0.0):
gAct[i] /= pShell[i]
else:
gAct[i] = 0.0
if (_radii[i] < g_dict['radii'][0]) or (_radii[i] >= g_dict['radii'][-1]):
H[i] = 0.0
else:
H[i] = pShell[i] * g_int(_radii[i])
# ---
pre_factor = rho_match ** 2 * VSqr_shell * dr / 2.
# print "### pre_factor =", pre_factor
H[:] *= pre_factor
histo = shell.get_data(base.loc_histograms + '/' + self.g_match)
scale_factor = old_div(np.sum(histo[:] * H[:]), np.sum(H[:] ** 2))
# print "### scale_factor =", scale_factor
obj.put_data(base.loc_solv_match + '/scale_factor', scale_factor)
if (self.debug):
obj.put_data(base.loc_solv_match + '/pre_factor', pre_factor)
obj.put_data(base.loc_solv_match + '/scale_factor', scale_factor)
obj.put_data(base.loc_solv_match + '/histo', histo)
obj.put_data(base.loc_solv_match + '/pShell', pShell)
obj.put_data(base.loc_solv_match + '/H', H)
# ---
H *= scale_factor
gAct /= scale_factor
if (self.debug):
obj.put_data(base.loc_solv_match + '/H_scaled', H)
obj.put_data(base.loc_solv_match + '/gAct', gAct)
# ---
rho_dict = {}
for name in g_elements:
avg = (shell.get_data(base.loc_nr_particles + '/' + name)).mean()
rho_dict[name] = old_div(avg, V_shell)
if (self.debug):
obj.put_data(base.loc_solv_match + '/rho_g_org', rho_g_org)
obj.put_data(base.loc_solv_match + '/rho_match', rho_match)
# --- Patch zeroeth elements of g arrays with the density,
# --> Do we really want to keep this convention?
for key in rho_dict:
pair = key + ',' + key
assert (pair in g_dict)
(g_dict[pair])[0] = rho_dict[key]
if (self.debug):
obj.put_data(base.loc_solv_match + '/g_original', g_dict)
# --- final rescaled g functions used by delta_h
for key in g_dict:
if (key == 'radii'):
continue
else:
(g_dict[key])[1:] *= scale_factor
obj.put_data(base.loc_solv_match + '/g_scaled', g_dict)
obj.put_data(base.loc_solv_match + '/rho', rho_dict)
obj.put_meta(self.get_meta())
if self.verb:
print("Solvent.next() :", obj.i)
yield obj
else:
yield None
| 49.402597 | 125 | 0.492902 |
e59bc7b43765bd36683fc3a23743122a9018ec5c | 17,375 | py | Python | openshift/client/models/v1_build_status.py | TristanCacqueray/openshift-restclient-python | 7758cde7a8094acb279904f15c29e5fe3e9f7d33 | [
"Apache-2.0"
] | null | null | null | openshift/client/models/v1_build_status.py | TristanCacqueray/openshift-restclient-python | 7758cde7a8094acb279904f15c29e5fe3e9f7d33 | [
"Apache-2.0"
] | null | null | null | openshift/client/models/v1_build_status.py | TristanCacqueray/openshift-restclient-python | 7758cde7a8094acb279904f15c29e5fe3e9f7d33 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a client. By listing and beginning a watch from the returned resourceVersion, clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'metav1.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1BuildStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'cancelled': 'bool',
'completion_timestamp': 'datetime',
'config': 'V1ObjectReference',
'duration': 'int',
'log_snippet': 'str',
'message': 'str',
'output': 'V1BuildStatusOutput',
'output_docker_image_reference': 'str',
'phase': 'str',
'reason': 'str',
'stages': 'list[V1StageInfo]',
'start_timestamp': 'datetime'
}
attribute_map = {
'cancelled': 'cancelled',
'completion_timestamp': 'completionTimestamp',
'config': 'config',
'duration': 'duration',
'log_snippet': 'logSnippet',
'message': 'message',
'output': 'output',
'output_docker_image_reference': 'outputDockerImageReference',
'phase': 'phase',
'reason': 'reason',
'stages': 'stages',
'start_timestamp': 'startTimestamp'
}
def __init__(self, cancelled=None, completion_timestamp=None, config=None, duration=None, log_snippet=None, message=None, output=None, output_docker_image_reference=None, phase=None, reason=None, stages=None, start_timestamp=None):
"""
V1BuildStatus - a model defined in Swagger
"""
self._cancelled = None
self._completion_timestamp = None
self._config = None
self._duration = None
self._log_snippet = None
self._message = None
self._output = None
self._output_docker_image_reference = None
self._phase = None
self._reason = None
self._stages = None
self._start_timestamp = None
self.discriminator = None
if cancelled is not None:
self.cancelled = cancelled
if completion_timestamp is not None:
self.completion_timestamp = completion_timestamp
if config is not None:
self.config = config
if duration is not None:
self.duration = duration
if log_snippet is not None:
self.log_snippet = log_snippet
if message is not None:
self.message = message
if output is not None:
self.output = output
if output_docker_image_reference is not None:
self.output_docker_image_reference = output_docker_image_reference
self.phase = phase
if reason is not None:
self.reason = reason
if stages is not None:
self.stages = stages
if start_timestamp is not None:
self.start_timestamp = start_timestamp
@property
def cancelled(self):
"""
Gets the cancelled of this V1BuildStatus.
cancelled describes if a cancel event was triggered for the build.
:return: The cancelled of this V1BuildStatus.
:rtype: bool
"""
return self._cancelled
@cancelled.setter
def cancelled(self, cancelled):
"""
Sets the cancelled of this V1BuildStatus.
cancelled describes if a cancel event was triggered for the build.
:param cancelled: The cancelled of this V1BuildStatus.
:type: bool
"""
self._cancelled = cancelled
@property
def completion_timestamp(self):
"""
Gets the completion_timestamp of this V1BuildStatus.
completionTimestamp is a timestamp representing the server time when this Build was finished, whether that build failed or succeeded. It reflects the time at which the Pod running the Build terminated. It is represented in RFC3339 form and is in UTC.
:return: The completion_timestamp of this V1BuildStatus.
:rtype: datetime
"""
return self._completion_timestamp
@completion_timestamp.setter
def completion_timestamp(self, completion_timestamp):
"""
Sets the completion_timestamp of this V1BuildStatus.
completionTimestamp is a timestamp representing the server time when this Build was finished, whether that build failed or succeeded. It reflects the time at which the Pod running the Build terminated. It is represented in RFC3339 form and is in UTC.
:param completion_timestamp: The completion_timestamp of this V1BuildStatus.
:type: datetime
"""
self._completion_timestamp = completion_timestamp
@property
def config(self):
"""
Gets the config of this V1BuildStatus.
config is an ObjectReference to the BuildConfig this Build is based on.
:return: The config of this V1BuildStatus.
:rtype: V1ObjectReference
"""
return self._config
@config.setter
def config(self, config):
"""
Sets the config of this V1BuildStatus.
config is an ObjectReference to the BuildConfig this Build is based on.
:param config: The config of this V1BuildStatus.
:type: V1ObjectReference
"""
self._config = config
@property
def duration(self):
"""
Gets the duration of this V1BuildStatus.
duration contains time.Duration object describing build time.
:return: The duration of this V1BuildStatus.
:rtype: int
"""
return self._duration
@duration.setter
def duration(self, duration):
"""
Sets the duration of this V1BuildStatus.
duration contains time.Duration object describing build time.
:param duration: The duration of this V1BuildStatus.
:type: int
"""
self._duration = duration
@property
def log_snippet(self):
"""
Gets the log_snippet of this V1BuildStatus.
logSnippet is the last few lines of the build log. This value is only set for builds that failed.
:return: The log_snippet of this V1BuildStatus.
:rtype: str
"""
return self._log_snippet
@log_snippet.setter
def log_snippet(self, log_snippet):
"""
Sets the log_snippet of this V1BuildStatus.
logSnippet is the last few lines of the build log. This value is only set for builds that failed.
:param log_snippet: The log_snippet of this V1BuildStatus.
:type: str
"""
self._log_snippet = log_snippet
@property
def message(self):
"""
Gets the message of this V1BuildStatus.
message is a human-readable message indicating details about why the build has this status.
:return: The message of this V1BuildStatus.
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""
Sets the message of this V1BuildStatus.
message is a human-readable message indicating details about why the build has this status.
:param message: The message of this V1BuildStatus.
:type: str
"""
self._message = message
@property
def output(self):
"""
Gets the output of this V1BuildStatus.
output describes the Docker image the build has produced.
:return: The output of this V1BuildStatus.
:rtype: V1BuildStatusOutput
"""
return self._output
@output.setter
def output(self, output):
"""
Sets the output of this V1BuildStatus.
output describes the Docker image the build has produced.
:param output: The output of this V1BuildStatus.
:type: V1BuildStatusOutput
"""
self._output = output
@property
def output_docker_image_reference(self):
"""
Gets the output_docker_image_reference of this V1BuildStatus.
outputDockerImageReference contains a reference to the Docker image that will be built by this build. Its value is computed from Build.Spec.Output.To, and should include the registry address, so that it can be used to push and pull the image.
:return: The output_docker_image_reference of this V1BuildStatus.
:rtype: str
"""
return self._output_docker_image_reference
@output_docker_image_reference.setter
def output_docker_image_reference(self, output_docker_image_reference):
"""
Sets the output_docker_image_reference of this V1BuildStatus.
outputDockerImageReference contains a reference to the Docker image that will be built by this build. Its value is computed from Build.Spec.Output.To, and should include the registry address, so that it can be used to push and pull the image.
:param output_docker_image_reference: The output_docker_image_reference of this V1BuildStatus.
:type: str
"""
self._output_docker_image_reference = output_docker_image_reference
@property
def phase(self):
"""
Gets the phase of this V1BuildStatus.
phase is the point in the build lifecycle. Possible values are \"New\", \"Pending\", \"Running\", \"Complete\", \"Failed\", \"Error\", and \"Cancelled\".
:return: The phase of this V1BuildStatus.
:rtype: str
"""
return self._phase
@phase.setter
def phase(self, phase):
"""
Sets the phase of this V1BuildStatus.
phase is the point in the build lifecycle. Possible values are \"New\", \"Pending\", \"Running\", \"Complete\", \"Failed\", \"Error\", and \"Cancelled\".
:param phase: The phase of this V1BuildStatus.
:type: str
"""
if phase is None:
raise ValueError("Invalid value for `phase`, must not be `None`")
self._phase = phase
@property
def reason(self):
"""
Gets the reason of this V1BuildStatus.
reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.
:return: The reason of this V1BuildStatus.
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""
Sets the reason of this V1BuildStatus.
reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.
:param reason: The reason of this V1BuildStatus.
:type: str
"""
self._reason = reason
@property
def stages(self):
"""
Gets the stages of this V1BuildStatus.
stages contains details about each stage that occurs during the build including start time, duration (in milliseconds), and the steps that occured within each stage.
:return: The stages of this V1BuildStatus.
:rtype: list[V1StageInfo]
"""
return self._stages
@stages.setter
def stages(self, stages):
"""
Sets the stages of this V1BuildStatus.
stages contains details about each stage that occurs during the build including start time, duration (in milliseconds), and the steps that occured within each stage.
:param stages: The stages of this V1BuildStatus.
:type: list[V1StageInfo]
"""
self._stages = stages
@property
def start_timestamp(self):
"""
Gets the start_timestamp of this V1BuildStatus.
startTimestamp is a timestamp representing the server time when this Build started running in a Pod. It is represented in RFC3339 form and is in UTC.
:return: The start_timestamp of this V1BuildStatus.
:rtype: datetime
"""
return self._start_timestamp
@start_timestamp.setter
def start_timestamp(self, start_timestamp):
"""
Sets the start_timestamp of this V1BuildStatus.
startTimestamp is a timestamp representing the server time when this Build started running in a Pod. It is represented in RFC3339 form and is in UTC.
:param start_timestamp: The start_timestamp of this V1BuildStatus.
:type: datetime
"""
self._start_timestamp = start_timestamp
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1BuildStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 39.850917 | 3,325 | 0.660317 |
6e93bd33a400727dc8b069619071b9edf9b24116 | 6,672 | py | Python | interactive.py | edbltn/fairseq | e4d25fd96f1e38190400dbbdbc77eeda71ac50a0 | [
"BSD-3-Clause"
] | 1 | 2019-02-13T13:05:07.000Z | 2019-02-13T13:05:07.000Z | interactive.py | edbltn/fairseq | e4d25fd96f1e38190400dbbdbc77eeda71ac50a0 | [
"BSD-3-Clause"
] | null | null | null | interactive.py | edbltn/fairseq | e4d25fd96f1e38190400dbbdbc77eeda71ac50a0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Translate raw text with a trained model. Batches data on-the-fly.
"""
from collections import namedtuple
import fileinput
import sys
import numpy as np
import torch
from fairseq import data, options, tasks, tokenizer, utils
from fairseq.sequence_generator import SequenceGenerator
from fairseq.utils import import_user_module
Batch = namedtuple('Batch', 'srcs tokens lengths')
Translation = namedtuple('Translation', 'src_str hypos pos_scores alignments')
def buffered_read(input, buffer_size):
buffer = []
for src_str in fileinput.input(files=[input], openhook=fileinput.hook_encoded("utf-8")):
buffer.append(src_str.strip())
if len(buffer) >= buffer_size:
yield buffer
buffer = []
if len(buffer) > 0:
yield buffer
def make_batches(lines, args, task, max_positions):
tokens = [
tokenizer.Tokenizer.tokenize(src_str, task.source_dictionary, add_if_not_exist=False).long()
for src_str in lines
]
lengths = np.array([t.numel() for t in tokens])
itr = task.get_batch_iterator(
dataset=data.LanguagePairDataset(tokens, lengths, task.source_dictionary),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=max_positions,
).next_epoch_itr(shuffle=False)
for batch in itr:
yield Batch(
srcs=[lines[i] for i in batch['id']],
tokens=batch['net_input']['src_tokens'],
lengths=batch['net_input']['src_lengths'],
), batch['id']
def main(args):
import_user_module(args)
if args.buffer_size < 1:
args.buffer_size = 1
if args.max_tokens is None and args.max_sentences is None:
args.max_sentences = 1
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert not args.max_sentences or args.max_sentences <= args.buffer_size, \
'--max-sentences/--batch-size cannot be larger than --buffer-size'
print(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Setup task, e.g., translation
task = tasks.setup_task(args)
# Load ensemble
print('| loading model(s) from {}'.format(args.path))
models, _model_args = utils.load_ensemble_for_inference(
args.path.split(':'), task, model_arg_overrides=eval(args.model_overrides),
)
# Set dictionaries
tgt_dict = task.target_dictionary
# Optimize ensemble for generation
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
# Initialize generator
translator = SequenceGenerator(
models, tgt_dict, beam_size=args.beam, minlen=args.min_len,
stop_early=(not args.no_early_stop), normalize_scores=(not args.unnormalized),
len_penalty=args.lenpen, unk_penalty=args.unkpen,
sampling=args.sampling, sampling_topk=args.sampling_topk, sampling_temperature=args.sampling_temperature,
diverse_beam_groups=args.diverse_beam_groups, diverse_beam_strength=args.diverse_beam_strength,
match_source_len=args.match_source_len, no_repeat_ngram_size=args.no_repeat_ngram_size,
)
if use_cuda:
translator.cuda()
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
def make_result(src_str, hypos):
result = Translation(
src_str='O\t{}'.format(src_str),
hypos=[],
pos_scores=[],
alignments=[],
)
# Process top predictions
for hypo in hypos[:min(len(hypos), args.nbest)]:
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'].int().cpu() if hypo['alignment'] is not None else None,
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe,
)
result.hypos.append('H\t{}\t{}'.format(hypo['score'], hypo_str))
result.pos_scores.append('P\t{}'.format(
' '.join(map(
lambda x: '{:.4f}'.format(x),
hypo['positional_scores'].tolist(),
))
))
result.alignments.append(
'A\t{}'.format(' '.join(map(lambda x: str(utils.item(x)), alignment)))
if args.print_alignment else None
)
return result
def process_batch(batch):
tokens = batch.tokens
lengths = batch.lengths
if use_cuda:
tokens = tokens.cuda()
lengths = lengths.cuda()
encoder_input = {'src_tokens': tokens, 'src_lengths': lengths}
translations = translator.generate(
encoder_input,
maxlen=int(args.max_len_a * tokens.size(1) + args.max_len_b),
)
return [make_result(batch.srcs[i], t) for i, t in enumerate(translations)]
max_positions = utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
)
if args.buffer_size > 1:
print('| Sentence buffer size:', args.buffer_size)
print('| Type the input sentence and press return:')
for inputs in buffered_read(args.input, args.buffer_size):
indices = []
results = []
for batch, batch_indices in make_batches(inputs, args, task, max_positions):
indices.extend(batch_indices)
results.extend(process_batch(batch))
for i in np.argsort(indices):
result = results[i]
print(result.src_str)
for hypo, pos_scores, align in zip(result.hypos, result.pos_scores, result.alignments):
print(hypo)
print(pos_scores)
if align is not None:
print(align)
def cli_main():
parser = options.get_generation_parser(interactive=True)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == '__main__':
cli_main()
| 34.391753 | 113 | 0.639988 |
0fbb4dc16718d87f9c32bdded9af49206c3dceda | 313 | py | Python | 001/two_sum.py | codeestX/re-zero-leetcode-life | 30295074af018aa25fb69010e58273f934459a5a | [
"MIT"
] | null | null | null | 001/two_sum.py | codeestX/re-zero-leetcode-life | 30295074af018aa25fb69010e58273f934459a5a | [
"MIT"
] | null | null | null | 001/two_sum.py | codeestX/re-zero-leetcode-life | 30295074af018aa25fb69010e58273f934459a5a | [
"MIT"
] | null | null | null | class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
d = {}
for i, num in enumerate(nums):
if target - num in d:
return [d[target - num], i]
d[num] = i
| 24.076923 | 43 | 0.43131 |
e549b61db56cd3537a7d3e1e5535d22957220fb3 | 2,352 | py | Python | kornia/color/adjust.py | timaebi/kornia | 4710234128d7d830cd020c45df8eb17870d4e939 | [
"Apache-2.0"
] | 10 | 2021-01-26T05:25:01.000Z | 2022-02-08T06:10:41.000Z | kornia/color/adjust.py | sounakdey/kornia | 6a0df6dee7b213572ff3441bb6eb0e07a23f0ef3 | [
"Apache-2.0"
] | 3 | 2021-05-03T10:34:15.000Z | 2022-02-17T04:25:26.000Z | kornia/color/adjust.py | sounakdey/kornia | 6a0df6dee7b213572ff3441bb6eb0e07a23f0ef3 | [
"Apache-2.0"
] | 4 | 2021-04-30T01:51:38.000Z | 2022-01-27T05:06:04.000Z | import torch
import torch.nn as nn
class AdjustBrightness(nn.Module):
r"""Adjust Brightness of an Image
See :class:`~kornia.color.AdjustBrightness` for details.
Args:
image (torch.Tensor): Image to be adjusted.
brightness_factor (torch.Tensor): Brightness adjust factor per element
in the batch. 0 generates a compleatly black image, 1 does not modify
the input image while any other non-negative number modify the
brightness by this factor.
Returns:
torch.Tensor: Adjusted image.
"""
def __init__(self) -> None:
super(AdjustBrightness, self).__init__()
def forward(self, # type: ignore
image: torch.Tensor, # type: ignore
brightness_factor: torch.Tensor # type: ignore
) -> torch.Tensor: # type: ignore
return adjust_brightness(image, brightness_factor)
def adjust_brightness(image: torch.Tensor,
brightness_factor: torch.Tensor) -> torch.Tensor:
r"""Adjust Brightness of an Image
See :class:`~kornia.color.AdjustBrightness` for details.
Args:
image (torch.Tensor): Image to be adjusted.
brightness_factor (torch.Tensor): Brightness adjust factor per element
in the batch. 0 generates a compleatly black image, 1 does not modify
the input image while any other non-negative number modify the
brightness by this factor.
Returns:
torch.Tensor: Adjusted image.
"""
if not torch.is_tensor(image):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(image)))
if len(image.shape) < 3:
raise ValueError("Input size must have a shape of (*, H, W). Got {}"
.format(image.shape))
if (brightness_factor < torch.zeros(1)).any():
raise ValueError("Brightness factor must be non-negative. Got {}"
.format(brightness_factor))
if torch.is_tensor(brightness_factor):
for _ in image.shape[1:]:
brightness_factor = brightness_factor.unsqueeze(-1)
# Apply brightness factor to each channel
adjust_image: torch.Tensor = image * brightness_factor
# Truncate between pixel values
out: torch.Tensor = torch.clamp(adjust_image, 0.0, 1.0)
return out
| 33.126761 | 79 | 0.641156 |
3b9a2294f4c91a109787b4f3fba38779d4363459 | 6,618 | py | Python | dojo/tools/zap/parser.py | christophe226/django-DefectDojo | 0ea0a59526ee9169dce29777288cf9dba0c44263 | [
"BSD-3-Clause"
] | null | null | null | dojo/tools/zap/parser.py | christophe226/django-DefectDojo | 0ea0a59526ee9169dce29777288cf9dba0c44263 | [
"BSD-3-Clause"
] | 562 | 2019-06-21T18:44:38.000Z | 2022-03-28T18:09:08.000Z | dojo/tools/zap/parser.py | christophe226/django-DefectDojo | 0ea0a59526ee9169dce29777288cf9dba0c44263 | [
"BSD-3-Clause"
] | null | null | null |
import re
import socket
import hyperlink
from defusedxml import ElementTree as ET
from django.utils.html import escape, strip_tags
from dojo.models import Endpoint, Finding
class ZapParser(object):
"""Parser for xml file generated by the OWASP Zed Attacl Proxy (ZAP) tool https://www.zaproxy.org/."""
def get_scan_types(self):
return ["ZAP Scan"]
def get_label_for_scan_types(self, scan_type):
return "ZAP Scan"
def get_description_for_scan_types(self, scan_type):
return "ZAP XML report format."
def get_findings(self, xml_output, test):
tree = ET.parse(xml_output)
return self.get_items(tree, test)
def get_items(self, tree, test):
"""
@return items A list of Host instances
"""
items = list()
for node in tree.findall('site'):
site = Site(node)
main_host = Endpoint(host=site.host, port=site.port)
for item in site.items:
severity = item.riskdesc.split(' ', 1)[0]
references = ''
for ref in item.ref:
references += ref + "\n"
find = Finding(title=item.name,
cwe=item.cwe,
description=strip_tags(item.desc),
test=test,
severity=severity,
mitigation=strip_tags(item.resolution),
references=references,
false_p=False,
duplicate=False,
out_of_scope=False,
mitigated=None,
impact="No impact provided",
)
find.unsaved_endpoints = [main_host]
for i in item.items:
endpoint = Endpoint.from_uri(i['uri'])
find.unsaved_endpoints.append(endpoint)
items.append(find)
return items
def get_attrib_from_subnode(xml_node, subnode_xpath_expr, attrib_name):
"""
Finds a subnode in the item node and the retrieves a value from it
@return An attribute value
"""
global ETREE_VERSION
node = None
if ETREE_VERSION[0] <= 1 and ETREE_VERSION[1] < 3:
match_obj = re.search(r"([^\@]+?)\[\@([^=]*?)=\'([^\']*?)\'", subnode_xpath_expr)
if match_obj is not None:
node_to_find = match_obj.group(1)
xpath_attrib = match_obj.group(2)
xpath_value = match_obj.group(3)
for node_found in xml_node.findall(node_to_find):
if node_found.attrib[xpath_attrib] == xpath_value:
node = node_found
break
else:
node = xml_node.find(subnode_xpath_expr)
else:
node = xml_node.find(subnode_xpath_expr)
if node is not None:
return node.get(attrib_name)
return None
class Site(object):
def __init__(self, item_node):
self.node = item_node
self.name = self.node.get('name')
self.host = self.node.get('host')
self.name = self.node.get('name')
self.port = self.node.get('port')
self.items = []
for alert in self.node.findall('alerts/alertitem'):
self.items.append(Item(alert))
def get_text_from_subnode(self, subnode_xpath_expr):
"""
Finds a subnode in the host node and the retrieves a value from it.
@return An attribute value
"""
sub_node = self.node.find(subnode_xpath_expr)
if sub_node is not None:
return sub_node.text
return None
def resolve(self, host):
try:
return socket.gethostbyname(host)
except:
pass
return host
class Item(object):
"""
An abstract representation of a Item
@param item_node A item_node taken from an zap xml tree
"""
def __init__(self, item_node):
self.node = item_node
self.id = self.get_text_from_subnode('pluginid')
self.name = self.get_text_from_subnode('alert')
self.severity = self.get_text_from_subnode('riskcode')
self.riskdesc = self.get_text_from_subnode('riskdesc')
self.desc = self.get_text_from_subnode('desc')
self.resolution = self.get_text_from_subnode('solution') if self.get_text_from_subnode('solution') else ""
self.desc += "\n\nReference: " + self.get_text_from_subnode('reference') if self.get_text_from_subnode(
'reference') else ""
self.ref = []
if self.get_text_from_subnode('cweid'):
self.ref.append("CWE-" + self.get_text_from_subnode('cweid'))
self.cwe = self.get_text_from_subnode('cweid')
else:
self.cwe = 0
description_detail = "\n"
for instance in item_node.findall('instances/instance'):
for node in instance.iter():
if node.tag == "uri":
if node.text != "":
description_detail += "URL: " + node.text
if node.tag == "method":
if node.text != "":
description_detail += "Method: " + node.text
if node.tag == "param":
if node.text != "":
description_detail += "Parameter: " + node.text
if node.tag == "evidence":
if node.text != "":
description_detail += "Evidence: " + escape(node.text)
description_detail += "\n"
self.desc += description_detail
if self.get_text_from_subnode('wascid'):
self.ref.append("WASC-" + self.get_text_from_subnode('wascid'))
self.items = []
for instance in item_node.findall('instances/instance'):
n = instance.findtext("uri")
n2 = instance.findtext("param")
url = hyperlink.parse(n)
item = {'uri': n, 'param': n2 if n2 else "", 'host': url.host, 'protocol': url.scheme, 'port': url.port}
self.items.append(item)
self.requests = "\n".join([i['uri'] for i in self.items])
def get_text_from_subnode(self, subnode_xpath_expr):
"""
Finds a subnode in the host node and the retrieves a value from it.
@return An attribute value
"""
sub_node = self.node.find(subnode_xpath_expr)
if sub_node is not None:
return sub_node.text
return None
| 33.424242 | 116 | 0.551526 |
d9fb4aa1b95166e7835358ecfff44a304633bcff | 4,179 | py | Python | model.py | lbilic/testrep | ad228b03ba9f9620d239ad446a173911b2486cbb | [
"MIT"
] | null | null | null | model.py | lbilic/testrep | ad228b03ba9f9620d239ad446a173911b2486cbb | [
"MIT"
] | null | null | null | model.py | lbilic/testrep | ad228b03ba9f9620d239ad446a173911b2486cbb | [
"MIT"
] | null | null | null |
import numpy as np
import os
import skimage.io as io
import skimage.transform as trans
import numpy as np
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras import backend as keras
smooth = 1.
def dice_coef(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return -dice_coef(y_true, y_pred)
def unet(pretrained_weights = None,input_size = (256,256,1)):
inputs = Input(input_size)
conv1 = Conv2D(64, (3, 3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
conv1 = Conv2D(64, (3, 3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, (3, 3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
conv2 = Conv2D(128, (3, 3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, (3, 3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
conv3 = Conv2D(256, (3, 3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, (3, 3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
conv4 = Conv2D(512, (3, 3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, (3, 3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
conv5 = Conv2D(1024, (3, 3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, (2, 2), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
merge6 = concatenate([drop4,up6], axis = 3)
conv6 = Conv2D(512, (3, 3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
conv6 = Conv2D(512, (3, 3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
up7 = Conv2D(256, (2, 2), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
merge7 = concatenate([conv3,up7], axis = 3)
conv7 = Conv2D(256, (3, 3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
conv7 = Conv2D(256, (3, 3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
up8 = Conv2D(128, (2, 2), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
merge8 = concatenate([conv2,up8], axis = 3)
conv8 = Conv2D(128, (3, 3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
conv8 = Conv2D(128, (3, 3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
up9 = Conv2D(64, (2, 2), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
merge9 = concatenate([conv1,up9], axis = 3)
conv9 = Conv2D(64, (3, 3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
conv9 = Conv2D(64, (3, 3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv9 = Conv2D(2, (3, 3), activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
conv10 = Conv2D(1, (1, 1), name='weight_output')(conv9)
model = Model(inputs = [inputs], outputs = [conv10])
model.compile(optimizer = Adam(lr = 1e-5), loss = dice_coef_loss, metrics = [dice_coef])
#model.summary()
if(pretrained_weights):
model.load_weights(pretrained_weights)
return model | 54.272727 | 137 | 0.663795 |
8edfdfeb57d96b0b6c6bbc91dba12c28f12cbf27 | 14,634 | py | Python | collagen/data/_splitter.py | MIPT-Oulu/Collagen | 0cbc4285d60e5c9fcc89f629fcf4321e80b7452c | [
"MIT"
] | 4 | 2019-05-14T14:44:51.000Z | 2020-03-13T08:37:48.000Z | collagen/data/_splitter.py | MIPT-Oulu/Collagen | 0cbc4285d60e5c9fcc89f629fcf4321e80b7452c | [
"MIT"
] | 26 | 2019-04-21T20:35:22.000Z | 2022-03-12T00:32:57.000Z | collagen/data/_splitter.py | MIPT-Oulu/Collagen | 0cbc4285d60e5c9fcc89f629fcf4321e80b7452c | [
"MIT"
] | 1 | 2019-05-14T14:53:28.000Z | 2019-05-14T14:53:28.000Z | import dill as pickle
import pandas as pd
from sklearn import model_selection
from sklearn.utils import resample
__all__ = ["Splitter", "FoldSplit", "TrainValSplit", "SSFoldSplit"]
class Splitter(object):
def __init__(self):
self.__ds_chunks = None
self.__folds_iter = None
pass
def __next__(self):
if self.__folds_iter is None:
raise NotImplementedError
else:
next(self.__folds_iter)
def __iter__(self):
if self.__ds_chunks is None:
raise NotImplementedError
else:
return self
def dump(self, filename):
with open(filename, "wb") as f:
pickle.dump(self.__ds_chunks, f, pickle.HIGHEST_PROTOCOL)
def load(self, filename):
with open(filename, "rb") as f:
self.__ds_chunks = pickle.load(f)
self.__folds_iter = iter(self.__ds_chunks)
class FoldSplit(Splitter):
def __init__(self, ds: pd.DataFrame, n_folds: int = 5, target_col: str = 'target',
group_col: str or None = None, random_state: int or None = None):
super().__init__()
if group_col is None:
splitter = model_selection.StratifiedKFold(n_splits=n_folds, random_state=random_state)
split_iter = splitter.split(ds, ds[target_col])
else:
splitter = model_selection.GroupKFold(n_splits=n_folds)
split_iter = splitter.split(ds, ds[target_col], groups=ds[group_col])
self.__cv_folds_idx = [(train_idx, val_idx) for (train_idx, val_idx) in split_iter]
self.__ds_chunks = [(ds.iloc[split[0]], ds.iloc[split[1]]) for split in self.__cv_folds_idx]
self.__folds_iter = iter(self.__ds_chunks)
def __next__(self):
return next(self.__folds_iter)
def __iter__(self):
return self
def dump(self, filename):
with open(filename, "wb") as f:
pickle.dump(self.__ds_chunks, f, pickle.HIGHEST_PROTOCOL)
def fold(self, i):
return self.__ds_chunks[i]
def n_folds(self):
return len(self.__cv_folds_idx)
def fold_idx(self, i):
return self.__cv_folds_idx[i]
class SSFoldSplit(Splitter):
def __init__(self, ds: pd.DataFrame, n_ss_folds: int = 3, n_folds: int = 5, target_col: str = 'target',
random_state: int or None = None, unlabeled_target_col: str = '5means_classes', test_ratio: int = 0.25,
labeled_train_size_per_class: int = None, unlabeled_train_size_per_class: int = None,
labeled_train_size: int = None, unlabeled_train_size: int = None, group_col: str or None = None,
equal_target: bool = True, equal_unlabeled_target: bool = True, shuffle: bool = True):
super().__init__()
self._test_ratio = test_ratio
if equal_target and labeled_train_size_per_class is None:
raise ValueError("labeled_train_size_per_class must be determined when \
equal_target is True, but found None")
if not equal_target and labeled_train_size is None:
raise ValueError("labeled_train_size must be determined when \
equal_target is False, but found None")
# Master split into Label/Unlabel
if group_col is None:
master_splitter = model_selection.StratifiedKFold(n_splits=n_ss_folds, random_state=random_state)
unlabeled_idx, labeled_idx = next(master_splitter.split(ds, ds[target_col]))
else:
master_splitter = model_selection.GroupKFold(n_splits=n_ss_folds)
unlabeled_idx, labeled_idx = next(master_splitter.split(ds, ds[target_col], groups=ds[group_col]))
unlabeled_ds = ds.iloc[unlabeled_idx]
# u_groups = ds[unlabeled_target_col].iloc[unlabeled_idx]
labeled_ds = ds.iloc[labeled_idx]
l_groups = ds[target_col].iloc[labeled_idx]
if not equal_target and labeled_train_size is not None and labeled_train_size > len(labeled_idx):
raise ValueError('Input labeled train size {} is larger than actual labeled train size {}'.format(
labeled_train_size, len(labeled_idx)))
if unlabeled_train_size is not None and unlabeled_train_size > len(unlabeled_idx):
unlabeled_train_size = len(unlabeled_idx)
# raise ValueError('Input unlabeled train size {} is larger than actual unlabeled train size {}'.format(unlabeled_train_size, len(unlabeled_idx)))
# Split labeled data using GroupKFold
# Split unlabeled data using GroupKFold
self.__cv_folds_idx = []
self.__ds_chunks = []
# split of train/val data
if group_col is None:
unlabeled_splitter = model_selection.StratifiedKFold(n_splits=n_folds, random_state=random_state + 1)
unlabeled_spl_iter = unlabeled_splitter.split(unlabeled_ds, unlabeled_ds[target_col])
else:
unlabeled_splitter = model_selection.GroupKFold(n_splits=n_folds)
unlabeled_spl_iter = unlabeled_splitter.split(unlabeled_ds, unlabeled_ds[target_col],
groups=unlabeled_ds[group_col])
if group_col is None:
labeled_splitter = model_selection.StratifiedKFold(n_splits=n_folds, random_state=random_state + 2)
labeled_spl_iter = labeled_splitter.split(labeled_ds, labeled_ds[target_col])
else:
labeled_splitter = model_selection.GroupKFold(n_splits=n_folds)
labeled_spl_iter = labeled_splitter.split(labeled_ds, labeled_ds[target_col], groups=labeled_ds[group_col])
for i in range(n_folds):
u_train, u_test = next(unlabeled_spl_iter)
l_train, l_test = next(labeled_spl_iter)
l_train_target = labeled_ds.iloc[l_train][target_col]
l_train_data = labeled_ds.iloc[l_train]
l_test_target = labeled_ds.iloc[l_test][target_col]
l_test_data = labeled_ds.iloc[l_test]
# Sample labeled_train_size of labeled data
if equal_target:
filtered_l_train_idx, chosen_l_train = self._sample_labeled_data(l_train_data, l_train_target,
target_col,
labeled_train_size_per_class,
random_state)
filtered_l_test_idx, chosen_l_test = self._sample_labeled_data(l_test_data, l_test_target,
target_col,
int(
labeled_train_size_per_class * self._test_ratio),
random_state)
else:
if labeled_train_size is not None:
chosen_l_train, _ = model_selection.train_test_split(l_train, train_size=labeled_train_size,
random_state=random_state, shuffle=shuffle,
stratify=l_train_target)
chosen_l_test, _ = model_selection.train_test_split(l_test, train_size=int(
labeled_train_size * self._test_ratio),
random_state=random_state, shuffle=shuffle,
stratify=l_train_target)
else:
chosen_l_train = l_train
chosen_l_test = l_test
filtered_l_train_idx = labeled_ds.iloc[chosen_l_train]
filtered_l_test_idx = labeled_ds.iloc[chosen_l_test]
# Sample unlabeled_train_size of labeled data
if equal_unlabeled_target:
u_train_target = unlabeled_ds.iloc[u_train][unlabeled_target_col]
u_test_target = unlabeled_ds.iloc[u_test][unlabeled_target_col]
filtered_u_train_idx, chosen_u_train = self._sample_unlabeled_data(unlabeled_ds, u_train,
unlabeled_target_col,
u_train_target,
unlabeled_train_size_per_class,
random_state)
filtered_u_test_idx, chosen_u_test = self._sample_unlabeled_data(unlabeled_ds, u_test,
unlabeled_target_col,
u_test_target,
int(
unlabeled_train_size_per_class * self._test_ratio),
random_state)
else:
if unlabeled_train_size is not None:
# chosen_u_train, _ = model_selection.train_test_split(u_train, train_size=unlabeled_train_size,
# random_state=random_state, shuffle=shuffle)
is_replace = unlabeled_train_size > len(u_train)
chosen_u_train = resample(u_train, n_samples=unlabeled_train_size, replace=is_replace,
random_state=random_state)
unlabeled_test_size = int(unlabeled_train_size * self._test_ratio)
is_replace = unlabeled_test_size > len(u_test)
chosen_u_test = resample(u_test, n_samples=unlabeled_test_size, replace=is_replace,
random_state=random_state)
else:
chosen_u_train = u_train
chosen_u_test = u_test
filtered_u_train_idx = unlabeled_ds.iloc[chosen_u_train]
filtered_u_test_idx = unlabeled_ds.iloc[chosen_u_test]
self.__cv_folds_idx.append((chosen_l_train, chosen_l_test, chosen_u_train, chosen_u_test))
self.__ds_chunks.append((filtered_l_train_idx, filtered_l_test_idx,
filtered_u_train_idx, filtered_u_test_idx))
self.__folds_iter = iter(self.__ds_chunks)
def _sample_labeled_data(self, data, targets, target_col, data_per_class, random_state):
labeled_targets = list(set(targets.tolist()))
chosen_data = []
for lt in labeled_targets:
filtered_rows = data[data[target_col] == lt]
filtered_rows_idx = filtered_rows.index
replace = data_per_class > len(filtered_rows_idx)
chosen_idx_by_target = resample(filtered_rows_idx, n_samples=data_per_class,
replace=replace, random_state=random_state)
chosen_data += chosen_idx_by_target.tolist()
filtered_idx = data.loc[chosen_data]
return filtered_idx, chosen_data
def _sample_unlabeled_data(self, unlabeled_ds, u_train, unlabeled_target_col, u_train_target, data_per_class,
random_state, replace=False):
u_train_target = unlabeled_ds.iloc[u_train][unlabeled_target_col]
u_train_data = unlabeled_ds.iloc[u_train]
ideal_labeled_targets = list(set(u_train_target.tolist()))
chosen_u_train = []
for lt in ideal_labeled_targets:
filtered_rows = u_train_data[u_train_data[unlabeled_target_col] == lt]
filtered_rows_idx = filtered_rows.index
replace = data_per_class > len(filtered_rows_idx)
chosen_u_train_by_target = resample(filtered_rows_idx, n_samples=data_per_class,
replace=replace, random_state=random_state)
chosen_u_train += chosen_u_train_by_target.tolist()
filtered_u_train_idx = u_train_data.loc[chosen_u_train]
return filtered_u_train_idx, chosen_u_train
def _sampling(self, l_train_data, l_train_target, target_col, labeled_train_size_per_class, random_state):
labeled_targets = list(set(l_train_target.tolist()))
chosen_l_train = []
for lt in labeled_targets:
filtered_rows = l_train_data[l_train_data[target_col] == lt]
filtered_rows_idx = filtered_rows.index
chosen_l_train_by_target = resample(filtered_rows_idx, n_samples=labeled_train_size_per_class, replace=True,
random_state=random_state)
chosen_l_train += chosen_l_train_by_target.tolist()
filtered_l_train_idx = l_train_data.loc[chosen_l_train]
return chosen_l_train, filtered_l_train_idx
def dump(self, filename):
with open(filename, "wb") as f:
pickle.dump(self.__ds_chunks, f, pickle.HIGHEST_PROTOCOL)
def __next__(self):
return next(self.__folds_iter)
def __iter__(self):
return self
def fold(self, i):
return self.__ds_chunks[i]
def n_folds(self):
return len(self.__cv_folds_idx)
def fold_idx(self, i):
return self.__cv_folds_idx[i]
class TrainValSplit(Splitter):
def __init__(self, ds: pd.DataFrame, train_size: int or float, shuffle: bool, random_state: int or None = None):
super().__init__()
train_idx, val_idx = model_selection.train_test_split(self.__ds_chunks.index,
train_size=train_size,
shuffle=shuffle,
random_state=random_state)
self.__ds_chunks = [ds.iloc[train_idx], ds.iloc[val_idx]]
self.__dataset_iter = iter(self.__ds_chunks)
def __next__(self):
return next(self.__dataset_iter)
def __iter__(self):
return self
def train_set(self):
return self.__ds_chunks[0]
def val_set(self):
return self.__ds_chunks[1]
| 50.116438 | 158 | 0.581522 |
1941665601464012f3615c914147755e3be12f17 | 164 | py | Python | addons/Sprytile-6b68d00/rx/core/py3/observable.py | trisadmeslek/V-Sekai-Blender-tools | 0d8747387c58584b50c69c61ba50a881319114f8 | [
"MIT"
] | 733 | 2017-08-22T09:47:54.000Z | 2022-03-27T23:56:52.000Z | rx/core/py3/observable.py | asheraryam/Sprytile | c63be50d14b07192ff134ceab256f0d69b9c4c92 | [
"MIT"
] | 74 | 2017-08-16T09:13:05.000Z | 2022-03-15T02:31:49.000Z | rx/core/py3/observable.py | asheraryam/Sprytile | c63be50d14b07192ff134ceab256f0d69b9c4c92 | [
"MIT"
] | 77 | 2017-09-14T16:56:11.000Z | 2022-03-27T13:55:16.000Z | from abc import ABCMeta, abstractmethod
class Observable(metaclass=ABCMeta):
@abstractmethod
def subscribe(self, observer):
return NotImplemented
| 20.5 | 39 | 0.75 |
84fda62c5bca3716c062db0c002a6018068640e4 | 2,351 | py | Python | chatterbox/api/instagram.py | blitzagency/django-chatterbox | 7bf17444f8308aa12b6718bd62ee1344021c21aa | [
"MIT"
] | 8 | 2015-03-10T20:03:09.000Z | 2018-06-14T23:03:58.000Z | chatterbox/api/instagram.py | blitzagency/django-chatterbox | 7bf17444f8308aa12b6718bd62ee1344021c21aa | [
"MIT"
] | 3 | 2015-07-14T22:44:47.000Z | 2020-06-05T23:43:05.000Z | chatterbox/api/instagram.py | blitzagency/django-chatterbox | 7bf17444f8308aa12b6718bd62ee1344021c21aa | [
"MIT"
] | null | null | null | import logging
from six.moves.urllib.parse import urlencode
from . import OAuth2Api, SimpleProfile
log = logging.getLogger(__name__)
class Instagram(OAuth2Api):
def verify_parsed_response(self, data):
pass
def whoami(self):
log.debug("Invoking whoami")
return self.get("https://api.instagram.com/v1/users/self")
def simple_profile(self):
log.debug("Invoking simple_profile")
data = self.whoami()
data = data.get('data', {})
link = "http://instagram.com/{}".format(data.get("username", None))
result = {
"id": data.get("id", None),
"name": data.get("username", None),
"link": link,
"picture": data.get("profile_picture", None)
}
profile = SimpleProfile(**result)
return profile
def search(self, query, **kwargs):
"""http://instagram.com/developer/endpoints/tags/
PARAMETERS
count -- Count of tagged media to return.
min_tag_id -- Return media before this min_tag_id.
max_tag_id -- Return media after this max_tag_id.
"""
log.debug("Invoking search")
# need to do a check on quality of query (spaces?)
url = 'https://api.instagram.com/v1/tags/{}/media/recent'.format(query)
if kwargs:
additional = urlencode(kwargs)
url = url + '?{}'.format(additional)
return self.get(url)
def user_media(self, user_id='self', **kwargs):
"""http://instagram.com/developer/endpoints/users/#get_users_feed
PARAMETERS
count -- Count of media to return.
max_timestamp -- Return media before this UNIX timestamp.
access_token -- A valid access token.
min_timestamp -- Return media after this UNIX timestamp.
min_id -- Return media later than this min_id.
max_id -- Return media earlier than this max_id.
"""
log.debug("Invoking user_media")
# user_id can be None
if not user_id:
user_id = 'self'
# need to do a check on quality of query (spaces?)
url = 'https://api.instagram.com/v1/users/{}/media/recent'.format(user_id)
if kwargs:
additional = urlencode(kwargs)
url = url + '?{}'.format(additional)
return self.get(url)
| 30.141026 | 82 | 0.598043 |
c6be5754043618abb850c65da3bed2c43529021d | 3,034 | py | Python | models/enconder_decoder.py | hitfee01/Yolo3D | e9a73f66b948311013b190ce1065e4abf595f206 | [
"MIT"
] | 9 | 2021-01-22T01:21:15.000Z | 2022-03-23T06:06:08.000Z | models/enconder_decoder.py | hitfee01/Yolo3D | e9a73f66b948311013b190ce1065e4abf595f206 | [
"MIT"
] | 7 | 2021-01-07T05:33:58.000Z | 2022-03-24T06:14:11.000Z | models/enconder_decoder.py | hitfee01/Yolo3D | e9a73f66b948311013b190ce1065e4abf595f206 | [
"MIT"
] | 5 | 2021-03-27T08:28:05.000Z | 2022-02-13T14:10:41.000Z |
import torch
import torch.nn as nn
import numpy as np
from models.MultiBin import MultiBin
class Coder(object):
def __init__(self, dim_ref):
super(Coder, self).__init__()
self.multibin = MultiBin(2, 0.1)
self.dim_ref = torch.tensor(dim_ref, dtype=torch.float32)
@torch.no_grad()
def encode_bbox(self, gt_bboxes, offsets):
gt_ij = (gt_bboxes[:, :2] - offsets).long()
gt_i, gt_j = gt_ij.T # grid xy indices
gt_bboxes[:, :2] -= gt_ij
return gt_bboxes, gt_i, gt_j
@torch.no_grad()
def decode_bbox(self, pred_bboxes):
pass
@torch.no_grad()
def encode_orient(self, gt_alphas, device=torch.device('cpu')):
self.multibin.to(device)
num = list(gt_alphas.size())[0]
# alpha is [-pi..pi], shift it to be [0..2pi]
Orientation = torch.zeros((num, self.multibin.bin_num * 2), dtype=torch.float32, device=device)
Confidence = torch.zeros((num, self.multibin.bin_num,), dtype=torch.long, device=device)
alphas = gt_alphas + np.pi
alphas = alphas.to(device)
bin_idxs = self.multibin.get_bins(alphas)
bin_ben_angles = self.multibin.get_bins_bench_angle(bin_idxs[1])
angle_diff = alphas[bin_idxs[0]] - bin_ben_angles
Confidence[bin_idxs] = 1
Orientation[bin_idxs[0], bin_idxs[1]*self.multibin.bin_num] = torch.cos(angle_diff).to(torch.float32)
Orientation[bin_idxs[0], bin_idxs[1]*self.multibin.bin_num + 1] = torch.sin(angle_diff).to(torch.float32)
return Orientation, Confidence
@torch.no_grad()
def decode_orient(self, pred_alphas, pred_bin_confs):
self.multibin.to(pred_alphas.device)
batch_size, bins = pred_bin_confs.size()
if batch_size <= 0:
return torch.zeros_like(pred_alphas[:, :1])
argmax = torch.argmax(pred_bin_confs, dim=1)
indexes_cos = (argmax * bins).long()
indexes_sin = (argmax * bins + 1).long()
batch_ids = torch.arange(batch_size).to(pred_bin_confs.device)
# extract just the important bin
alpha = torch.atan2(pred_alphas[batch_ids, indexes_sin], pred_alphas[batch_ids, indexes_cos])
alpha += self.multibin.get_bin_bench_angle(argmax)
# alpha is [0..2pi], shift it to be [-pi..pi]
alpha -= np.pi
i_pos = alpha > np.pi
i_neg = alpha < -np.pi
alpha[i_pos] -= 2*np.pi
alpha[i_neg] += 2*np.pi
return alpha
@torch.no_grad()
def encode_dimension(self, gt_dimensions, gt_classes, device=torch.device('cpu')):
self.dim_ref = self.dim_ref.to(device)
gt_dimensions = gt_dimensions.to(device)
dim_refs = self.dim_ref[gt_classes.long()]
return gt_dimensions/dim_refs - 1
@torch.no_grad()
def decode_dimension(self, pred_dimension_offsets, pred_classes):
self.dim_ref = self.dim_ref.to(pred_classes.device)
dim_refs = self.dim_ref[pred_classes.long()]
return (pred_dimension_offsets + 1) * dim_refs
| 39.402597 | 113 | 0.648649 |
a7b449c2f9c52ebb4e8d47deb65bb51efcc9b241 | 1,761 | py | Python | tapsdk/backends/dotnet/inputmodes.py | jnunez101/tap-python-sdk | a2481dd53db255f09d066db97c5582180ded0f6c | [
"MIT"
] | 22 | 2020-03-01T04:41:48.000Z | 2021-12-27T17:10:55.000Z | tapsdk/backends/dotnet/inputmodes.py | jnunez101/tap-python-sdk | a2481dd53db255f09d066db97c5582180ded0f6c | [
"MIT"
] | 16 | 2020-02-25T05:10:45.000Z | 2022-01-12T16:05:30.000Z | tapsdk/backends/dotnet/inputmodes.py | jnunez101/tap-python-sdk | a2481dd53db255f09d066db97c5582180ded0f6c | [
"MIT"
] | 16 | 2020-02-22T20:35:30.000Z | 2021-12-31T14:36:07.000Z | import logging
import clr
import System
clr.AddReference(r"tapsdk/backends/dotnet/TAPWin")
from TAPWin import TAPInputMode
from TAPWin import RawSensorSensitivity
class TapInputMode:
def __init__(self, mode, sensitivity: list=[0, 0, 0]):
self._modes = {
"text" : {"name": "Text Mode", "code": TAPInputMode.Text()},
"controller" : {"name": "Controller Mode", "code": TAPInputMode.Controller()},
"controller_text" : {"name": "Controller and Text Mode", "code": TAPInputMode.ControllerWithMouseHID()},
"raw" : {"name": "Raw sensors Mode", "code": TAPInputMode.RawSensor(RawSensorSensitivity(System.Byte(0), System.Byte(0), System.Byte(0)))}
}
self.sensitivity = sensitivity
if mode in self._modes.keys():
self.mode = mode
if mode == "raw":
self._register_sensitivity(sensitivity)
else:
logging.warning("Invalid mode \"%s\". Set to \"text\"" % mode)
self.mode = "text"
def _register_sensitivity(self, sensitivity):
if isinstance(sensitivity, list) and len(sensitivity) == 3:
sensitivity[0] = max(0, min(4,sensitivity[0])) # fingers accelerometers
sensitivity[1] = max(0, min(5,sensitivity[1])) # imu gyro
sensitivity[2] = max(0, min(4,sensitivity[2])) # imu accelerometer
self.sensitivity = sensitivity
self._modes["raw"]["code"] = TAPInputMode.RawSensor(RawSensorSensitivity(System.Byte(sensitivity[0]), System.Byte(sensitivity[1]), System.Byte(sensitivity[2])))
def get_object(self):
return self._modes[self.mode]["code"]
def get_name(self):
return self._modes[self.mode]["name"]
| 44.025 | 172 | 0.62067 |
b93f87e5bc0586cd04ec367024e7c36594767618 | 858 | py | Python | src/setup.py | mingrammer/pyshark | 30923f08c347c7f63510b9c0db24ac2cd7e932ee | [
"MIT"
] | 1 | 2021-01-06T21:22:35.000Z | 2021-01-06T21:22:35.000Z | src/setup.py | mingrammer/pyshark | 30923f08c347c7f63510b9c0db24ac2cd7e932ee | [
"MIT"
] | null | null | null | src/setup.py | mingrammer/pyshark | 30923f08c347c7f63510b9c0db24ac2cd7e932ee | [
"MIT"
] | null | null | null | import os
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.txt')) as f:
long_description = f.read()
setup(
name="pyshark",
version="0.4.2.3",
packages=find_packages(),
package_data={'': ['*.ini', '*.pcapng']},
install_requires=['lxml', 'py', 'logbook'],
tests_require=['pytest'],
url="https://github.com/KimiNewt/pyshark",
long_description=long_description,
author="KimiNewt",
description="Python wrapper for tshark, allowing python packet parsing using wireshark dissectors",
keywords="wireshark capture packets parsing packet",
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| 31.777778 | 103 | 0.653846 |
7ffa50cb3374767d0d55680432bc803a9a2abd6c | 693 | py | Python | flask_center/app.py | iamjeerge/vision-platform | aac82798c392b4fc55592020fec59dd53a326d37 | [
"MIT"
] | null | null | null | flask_center/app.py | iamjeerge/vision-platform | aac82798c392b4fc55592020fec59dd53a326d37 | [
"MIT"
] | 1 | 2021-04-04T12:59:24.000Z | 2021-04-04T12:59:24.000Z | flask_center/app.py | iamjeerge/vision-platform | aac82798c392b4fc55592020fec59dd53a326d37 | [
"MIT"
] | null | null | null | import os
import sys
from flask import Flask
from flask_jwt_extended import JWTManager
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
api = Api(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'some-secret-string'
db = SQLAlchemy(app)
@app.before_first_request
def create_tables():
db.create_all()
app.config['JWT_SECRET_KEY'] = 'jwt-secret-string'
jwt = JWTManager(app)
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh']
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
| 22.354839 | 64 | 0.76912 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.