max_stars_repo_path
stringlengths 4
305
| max_stars_repo_name
stringlengths 4
130
| max_stars_count
int64 0
191k
| id
stringlengths 1
8
| content
stringlengths 6
1.02M
| score
float64 -1.16
4.16
| int_score
int64 0
4
|
---|---|---|---|---|---|---|
pypagai/models/model_lstm.py | gcouti/pypagAI | 1 | 7 | <gh_stars>1-10
from keras import Model, Input
from keras.layers import Dense, concatenate, LSTM, Reshape, Permute, Embedding, Dropout, Convolution1D, Flatten
from keras.optimizers import Adam
from pypagai.models.base import KerasModel
class SimpleLSTM(KerasModel):
"""
Use a simple lstm neural network
"""
@staticmethod
def default_config():
config = KerasModel.default_config()
config['hidden'] = 32
return config
def __init__(self, cfg):
super().__init__(cfg)
self._cfg_ = cfg
def _create_network_(self):
hidden = self._cfg_['hidden']
story = Input((self._story_maxlen, ), name='story')
question = Input((self._query_maxlen, ), name='question')
conc = concatenate([story, question],)
conc = Reshape((1, int(conc.shape[1])))(conc)
conc = Permute((2, 1))(conc)
response = LSTM(hidden, dropout=0.2, recurrent_dropout=0.2)(conc)
response = Dense(self._vocab_size, activation='softmax')(response)
self._model = Model(inputs=[story, question], outputs=response)
self._model.compile(optimizer=Adam(lr=2e-4), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
class EmbedLSTM(KerasModel):
"""
Use a simple lstm neural network
"""
@staticmethod
def default_config():
config = KerasModel.default_config()
config['hidden'] = 32
return config
def __init__(self, cfg):
super().__init__(cfg)
self._cfg_ = cfg
def _create_network_(self):
hidden = self._cfg_['hidden']
story = Input((self._story_maxlen, ), name='story')
question = Input((self._query_maxlen, ), name='question')
eb_story = Embedding(self._vocab_size, 64)(story)
eb_story = Dropout(0.3)(eb_story)
eb_question = Embedding(self._vocab_size, 64)(question)
eb_question = Dropout(0.3)(eb_question)
conc = concatenate([eb_story, eb_question], axis=1)
response = LSTM(hidden, dropout=0.2, recurrent_dropout=0.2)(conc)
response = Dense(self._vocab_size, activation='softmax')(response)
self._model = Model(inputs=[story, question], outputs=response)
self._model.compile(optimizer=Adam(lr=2e-4), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
class ConvLSTM(KerasModel):
"""
Use a simple lstm neural network
"""
@staticmethod
def default_config():
config = KerasModel.default_config()
config['hidden'] = 32
return config
def __init__(self, model_cfg):
super().__init__(model_cfg)
self._cfg = model_cfg
def _create_network_(self):
hidden = self._cfg['hidden']
story = Input((self._story_maxlen, ), name='story')
question = Input((self._query_maxlen, ), name='question')
eb_story = Embedding(self._vocab_size, 64)(story)
eb_story = Convolution1D(64, 3, padding='same')(eb_story)
eb_story = Convolution1D(32, 3, padding='same')(eb_story)
eb_story = Convolution1D(16, 3, padding='same')(eb_story)
# eb_story = Flatten()(eb_story)
eb_question = Embedding(self._vocab_size, 64)(question)
eb_question = Convolution1D(64, 3, padding='same')(eb_question)
eb_question = Convolution1D(32, 3, padding='same')(eb_question)
eb_question = Convolution1D(16, 3, padding='same')(eb_question)
# eb_question = Flatten()(eb_question)
conc = concatenate([eb_story, eb_question], axis=1)
response = LSTM(hidden, dropout=0.2, recurrent_dropout=0.2)(conc)
response = Dense(self._vocab_size, activation='softmax')(response)
self._model = Model(inputs=[story, question], outputs=response)
self._model.compile(optimizer=Adam(lr=2e-4), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
| 2.53125 | 3 |
tests/__init__.py | eloo/sensor.sbahn_munich | 0 | 15 | """Tests for the sbahn_munich integration"""
line_dict = {
"name": "S3",
"color": "#333333",
"text_color": "#444444",
}
| 0.242188 | 0 |
Object_detection_image.py | hiperus0988/pyao | 1 | 23 | <gh_stars>1-10
######## Image Object Detection Using Tensorflow-trained Classifier #########
#
# Author: <NAME>
# Date: 1/15/18
# Description:
# This program uses a TensorFlow-trained classifier to perform object detection.
# It loads the classifier uses it to perform object detection on an image.
# It draws boxes and scores around the objects of interest in the image.
## Some of the code is copied from Google's example at
## https://github.com/tensorflow/models/blob/master/research/object_detection/object_detection_tutorial.ipynb
## and some is copied from Dat Tran's example at
## https://github.com/datitran/object_detector_app/blob/master/object_detection_app.py
## but I changed it to make it more understandable to me.
# Import packages
import os
import cv2
import numpy as np
import tensorflow as tf
import sys
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Import utilites
from utils import label_map_util
from utils import visualization_utils as vis_util
# Name of the directory containing the object detection module we're using
MODEL_NAME = 'inference_graph'
IMAGE_NAME = 'test1.jpg'
# Grab path to current working directory
CWD_PATH = os.getcwd()
# Path to frozen detection graph .pb file, which contains the model that is used
# for object detection.
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,'frozen_inference_graph.pb')
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,'training','labelmap.pbtxt')
# Path to image
PATH_TO_IMAGE = os.path.join(CWD_PATH,IMAGE_NAME)
# Number of classes the object detector can identify
NUM_CLASSES = 6
# Load the label map.
# Label maps map indices to category names, so that when our convolution
# network predicts `5`, we know that this corresponds to `king`.
# Here we use internal utility functions, but anything that returns a
# dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Load the Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
sess = tf.Session(graph=detection_graph)
# Define input and output tensors (i.e. data) for the object detection classifier
# Input tensor is the image
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Output tensors are the detection boxes, scores, and classes
# Each box represents a part of the image where a particular object was detected
detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represents level of confidence for each of the objects.
# The score is shown on the result image, together with the class label.
detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
# Number of objects detected
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Load image using OpenCV and
# expand image dimensions to have shape: [1, None, None, 3]
# i.e. a single-column array, where each item in the column has the pixel RGB value
image = cv2.imread(PATH_TO_IMAGE)
image_expanded = np.expand_dims(image, axis=0)
# Perform the actual detection by running the model with the image as input
(boxes, scores, classes, num) = sess.run(
[detection_boxes, detection_scores, detection_classes, num_detections],
feed_dict={image_tensor: image_expanded})
# Draw the results of the detection (aka 'visulaize the results')
vis_util.visualize_boxes_and_labels_on_image_array(
image,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8,
min_score_thresh=0.60)
# All the results have been drawn on image. Now display the image.
cv2.imshow('Object detector', image)
# Press any key to close the image
cv2.waitKey(0)
# Clean up
cv2.destroyAllWindows()
| 2.921875 | 3 |
setup.py | giggslam/python-messengerbot-sdk | 23 | 31 | <reponame>giggslam/python-messengerbot-sdk<filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
__version__ = ''
with open('facebookbot/__about__.py', 'r') as fd:
reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]')
for line in fd:
m = reg.match(line)
if m:
__version__ = m.group(1)
break
def _requirements():
with open('requirements.txt', 'r') as fd:
return [name.strip() for name in fd.readlines()]
with open('README.rst', 'r') as fd:
long_description = fd.read()
setup(
name="fbsdk",
version=__version__,
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
url="https://github.com/boompieman/fbsdk",
description="Facebook Messaging API SDK for Python",
long_description=long_description,
license='Apache License 2.0',
packages=[
"facebookbot", "facebookbot.models"
],
install_requires=_requirements(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Topic :: Software Development"
]
)
| 1.289063 | 1 |
Courses/1 month/2 week/day 6/Formula.py | emir-naiz/first_git_lesson | 0 | 39 | summary = 0
i = 0
while i < 5:
summary = summary + i
print(summary)
i = i + 1
| 1.335938 | 1 |
examples/mouse.py | ginkage/trackball-python | 22 | 47 | <filename>examples/mouse.py
#!/usr/bin/env python
import time
import os
import math
from trackball import TrackBall
print("""Trackball: Mouse
Use the trackball as a mouse in Raspbian, with right-click
when the switch is pressed.
Press Ctrl+C to exit!
""")
trackball = TrackBall(interrupt_pin=4)
trackball.set_rgbw(0, 0, 0, 0)
# Check for xte (used to control mouse)
use_xte = os.system('which xte') == 0
if use_xte == 0:
raise RuntimeError("xte not found. Did you sudo apt install xautomation?")
while True:
up, down, left, right, switch, state = trackball.read()
# Send movements and clicks to xte
if switch:
cmd = 'xte "mouseclick 1"'
os.system(cmd)
elif right or up or left or down:
x = right - left
x = math.copysign(x**2, x)
y = down - up
y = math.copysign(y**2, y)
cmd = 'xte "mousermove {} {}"'.format(int(x), int(y))
os.system(cmd)
time.sleep(0.0001)
| 2.6875 | 3 |
Task2C.py | StanleyHou117/group66_LentTermProject | 0 | 55 | from floodsystem.stationdata import build_station_list
from floodsystem.flood import stations_highest_rel_level
def run():
stations = build_station_list()
warning_stations = stations_highest_rel_level(stations,10)
for entry in warning_stations:
print(entry[0].name,entry[1])
if __name__ == "__main__":
print("*** Task 2C: CUED Part IA Flood Warning System ***")
run() | 1.601563 | 2 |
python/ray/ml/tests/test_torch_trainer.py | mgelbart/ray | 22 | 63 | <reponame>mgelbart/ray<gh_stars>10-100
import pytest
import torch
import ray
from ray.ml.predictors.integrations.torch import TorchPredictor
from ray.ml.train.integrations.torch import TorchTrainer
from ray import train
from ray.ml.examples.pytorch.torch_linear_example import train_func as linear_train_func
@pytest.fixture
def ray_start_4_cpus():
address_info = ray.init(num_cpus=4)
yield address_info
# The code after the yield will run as teardown code.
ray.shutdown()
@pytest.mark.parametrize("num_workers", [1, 2])
def test_torch_linear(ray_start_4_cpus, num_workers):
def train_func(config):
result = linear_train_func(config)
assert len(result) == epochs
assert result[-1]["loss"] < result[0]["loss"]
num_workers = num_workers
epochs = 3
scaling_config = {"num_workers": num_workers}
config = {"lr": 1e-2, "hidden_size": 1, "batch_size": 4, "epochs": epochs}
trainer = TorchTrainer(
train_loop_per_worker=train_func,
train_loop_config=config,
scaling_config=scaling_config,
)
trainer.fit()
def test_torch_e2e(ray_start_4_cpus):
def train_func():
model = torch.nn.Linear(1, 1)
train.save_checkpoint(model=model)
scaling_config = {"num_workers": 2}
trainer = TorchTrainer(
train_loop_per_worker=train_func, scaling_config=scaling_config
)
result = trainer.fit()
predict_dataset = ray.data.range(3)
class TorchScorer:
def __init__(self):
self.pred = TorchPredictor.from_checkpoint(result.checkpoint)
def __call__(self, x):
return self.pred.predict(x, dtype=torch.float)
predictions = predict_dataset.map_batches(
TorchScorer, batch_format="pandas", compute="actors"
)
assert predictions.count() == 3
def test_torch_e2e_state_dict(ray_start_4_cpus):
def train_func():
model = torch.nn.Linear(1, 1).state_dict()
train.save_checkpoint(model=model)
scaling_config = {"num_workers": 2}
trainer = TorchTrainer(
train_loop_per_worker=train_func, scaling_config=scaling_config
)
result = trainer.fit()
# If loading from a state dict, a model definition must be passed in.
with pytest.raises(ValueError):
TorchPredictor.from_checkpoint(result.checkpoint)
class TorchScorer:
def __init__(self):
self.pred = TorchPredictor.from_checkpoint(
result.checkpoint, model=torch.nn.Linear(1, 1)
)
def __call__(self, x):
return self.pred.predict(x, dtype=torch.float)
predict_dataset = ray.data.range(3)
predictions = predict_dataset.map_batches(
TorchScorer, batch_format="pandas", compute="actors"
)
assert predictions.count() == 3
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", "-x", __file__]))
| 2.015625 | 2 |
python_modules/dagster/dagster_tests/compat_tests/test_back_compat.py | vatervonacht/dagster | 3 | 71 | # pylint: disable=protected-access
import os
import re
import pytest
from dagster import file_relative_path
from dagster.core.errors import DagsterInstanceMigrationRequired
from dagster.core.instance import DagsterInstance, InstanceRef
from dagster.utils.test import restore_directory
# test that we can load runs and events from an old instance
def test_0_6_4():
test_dir = file_relative_path(__file__, 'snapshot_0_6_4')
with restore_directory(test_dir):
instance = DagsterInstance.from_ref(InstanceRef.from_dir(test_dir))
runs = instance.get_runs()
with pytest.raises(
DagsterInstanceMigrationRequired,
match=re.escape(
'Instance is out of date and must be migrated (SqliteEventLogStorage for run '
'c7a6c4d7-6c88-46d0-8baa-d4937c3cefe5). Database is at revision None, head is '
'567bc23fd1ac. Please run `dagster instance migrate`.'
),
):
for run in runs:
instance.all_logs(run.run_id)
def test_0_6_6_sqlite_exc():
test_dir = file_relative_path(__file__, 'snapshot_0_6_6/sqlite')
with restore_directory(test_dir):
instance = DagsterInstance.from_ref(InstanceRef.from_dir(test_dir))
runs = instance.get_runs()
# Note that this is a deliberate choice -- old runs are simply invisible, and their
# presence won't raise DagsterInstanceMigrationRequired. This is a reasonable choice since
# the runs.db has moved and otherwise we would have to do a check for the existence of an
# old runs.db every time we accessed the runs. Instead, we'll do this only in the upgrade
# method.
assert len(runs) == 0
run_ids = instance._event_storage.get_all_run_ids()
assert run_ids == ['89296095-892d-4a15-aa0d-9018d1580945']
with pytest.raises(
DagsterInstanceMigrationRequired,
match=re.escape(
'Instance is out of date and must be migrated (SqliteEventLogStorage for run '
'89296095-892d-4a15-aa0d-9018d1580945). Database is at revision None, head is '
'567bc23fd1ac. Please run `dagster instance migrate`.'
),
):
instance._event_storage.get_logs_for_run('89296095-892d-4a15-aa0d-9018d1580945')
def test_0_6_6_sqlite_migrate():
test_dir = file_relative_path(__file__, 'snapshot_0_6_6/sqlite')
assert os.path.exists(file_relative_path(__file__, 'snapshot_0_6_6/sqlite/runs.db'))
assert not os.path.exists(file_relative_path(__file__, 'snapshot_0_6_6/sqlite/history/runs.db'))
with restore_directory(test_dir):
instance = DagsterInstance.from_ref(InstanceRef.from_dir(test_dir))
instance.upgrade()
runs = instance.get_runs()
assert len(runs) == 1
run_ids = instance._event_storage.get_all_run_ids()
assert run_ids == ['89296095-892d-4a15-aa0d-9018d1580945']
instance._event_storage.get_logs_for_run('89296095-892d-4a15-aa0d-9018d1580945')
assert not os.path.exists(file_relative_path(__file__, 'snapshot_0_6_6/sqlite/runs.db'))
assert os.path.exists(file_relative_path(__file__, 'snapshot_0_6_6/sqlite/history/runs.db'))
| 1.492188 | 1 |
eoxserver/services/ows/wps/v10/encoders/parameters.py | constantinius/eoxserver_combined | 1 | 79 | <reponame>constantinius/eoxserver_combined<filename>eoxserver/services/ows/wps/v10/encoders/parameters.py
#-------------------------------------------------------------------------------
#
# WPS 1.0 parameters' XML encoders
#
# Project: EOxServer <http://eoxserver.org>
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2013 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from eoxserver.services.ows.wps.parameters import (
LiteralData, ComplexData, BoundingBoxData,
AllowedAny, AllowedEnum, AllowedRange, AllowedRangeCollection,
AllowedByReference,
)
from eoxserver.services.ows.wps.v10.util import (
OWS, WPS, NIL, ns_ows,
)
#-------------------------------------------------------------------------------
def encode_input_descr(prm):
""" Encode process description input."""
elem = NIL("Input", *_encode_param_common(prm))
elem.attrib["minOccurs"] = ("1", "0")[bool(prm.is_optional)]
elem.attrib["maxOccurs"] = "1"
if isinstance(prm, LiteralData):
elem.append(_encode_literal(prm, True))
elif isinstance(prm, ComplexData):
elem.append(_encode_complex(prm, True))
elif isinstance(prm, BoundingBoxData):
elem.append(_encode_bbox(prm, True))
return elem
def encode_output_descr(prm):
""" Encode process description output."""
elem = NIL("Output", *_encode_param_common(prm))
if isinstance(prm, LiteralData):
elem.append(_encode_literal(prm, False))
elif isinstance(prm, ComplexData):
elem.append(_encode_complex(prm, False))
elif isinstance(prm, BoundingBoxData):
elem.append(_encode_bbox(prm, False))
return elem
def encode_input_exec(prm):
""" Encode common part of the execure response data input."""
return WPS("Input", *_encode_param_common(prm, False))
def encode_output_exec(prm):
""" Encode common part of the execure response data output."""
return WPS("Output", *_encode_param_common(prm))
def encode_output_def(outdef):
""" Encode the execure response output definition."""
attrib = {}
if outdef.uom is not None:
attrib['uom'] = outdef.uom
if outdef.crs is not None:
attrib['crs'] = outdef.crs
if outdef.mime_type is not None:
attrib['mimeType'] = outdef.mime_type
if outdef.encoding is not None:
attrib['encoding'] = outdef.encoding
if outdef.schema is not None:
attrib['schema'] = outdef.schema
if outdef.as_reference is not None:
attrib['asReference'] = 'true' if outdef.as_reference else 'false'
return WPS("Output", *_encode_param_common(outdef, False), **attrib)
def _encode_param_common(prm, title_required=True):
""" Encode common sub-elements of all XML parameters."""
elist = [OWS("Identifier", prm.identifier)]
if prm.title or title_required:
elist.append(OWS("Title", prm.title or prm.identifier))
if prm.abstract:
elist.append(OWS("Abstract", prm.abstract))
return elist
#-------------------------------------------------------------------------------
def _encode_literal(prm, is_input):
dtype = prm.dtype
elem = NIL("LiteralData" if is_input else "LiteralOutput")
elem.append(OWS("DataType", dtype.name, **{
ns_ows("reference"): "http://www.w3.org/TR/xmlschema-2/#%s"%dtype.name,
}))
if prm.uoms:
elem.append(NIL("UOMs",
NIL("Default", OWS("UOM", prm.uoms[0])),
NIL("Supported", *[OWS("UOM", u) for u in prm.uoms])
))
if is_input:
elem.append(_encode_allowed_value(prm.allowed_values))
if prm.default is not None:
elem.append(NIL("DefaultValue", str(prm.default)))
return elem
def _encode_allowed_value(avobj):
enum, ranges, elist = None, [], []
if isinstance(avobj, AllowedAny):
return OWS("AnyValue")
elif isinstance(avobj, AllowedByReference):
return WPS("ValuesReference", **{
ns_ows("reference"): avobj.url,
"valuesForm": avobj.url,
})
elif isinstance(avobj, AllowedEnum):
enum = avobj
elif isinstance(avobj, AllowedRange):
ranges = [avobj]
elif isinstance(avobj, AllowedRangeCollection):
enum, ranges = avobj.enum, avobj.ranges
else:
raise TypeError("Invalid allowed value object! OBJ=%r"%avobj)
dtype = avobj.dtype
ddtype = dtype.get_diff_dtype()
if enum is not None:
elist.extend(OWS("Value", dtype.encode(v)) for v in enum.values)
for range_ in ranges:
attr, elms = {}, []
if range_.closure != 'closed':
attr = {ns_ows("rangeClosure"): range_.closure}
if range_.minval is not None:
elms.append(OWS("MinimumValue", dtype.encode(range_.minval)))
if range_.maxval is not None:
elms.append(OWS("MaximumValue", dtype.encode(range_.maxval)))
if range_.spacing is not None:
elms.append(OWS("Spacing", ddtype.encode(range_.spacing)))
elist.append(OWS("Range", *elms, **attr))
return OWS("AllowedValues", *elist)
#-------------------------------------------------------------------------------
def _encode_complex(prm, is_input):
return NIL("ComplexData" if is_input else "ComplexOutput",
NIL("Default", _encode_format(prm.default_format)),
NIL("Supported", *[_encode_format(f) for f in prm.formats.itervalues()])
)
def _encode_format(frmt):
elem = NIL("Format", NIL("MimeType", frmt.mime_type))
if frmt.encoding is not None:
elem.append(NIL("Encoding", frmt.encoding))
if frmt.schema is not None:
elem.append(NIL("Schema", frmt.schema))
return elem
#-------------------------------------------------------------------------------
def _encode_bbox(prm, is_input):
return NIL("BoundingBoxData" if is_input else "BoundingBoxOutput",
NIL("Default", NIL("CRS", prm.encode_crs(prm.default_crs))),
NIL("Supported", *[NIL("CRS", prm.encode_crs(crs)) for crs in prm.crss])
)
| 1.15625 | 1 |
Complab assignment.py | peteboi/Python-Scripts | 0 | 87 | <reponame>peteboi/Python-Scripts<gh_stars>0
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
def orbit(u):
x,y,v_x,v_y = u
r=np.hypot(x,y)
#r= 1.521e+06
#M,G=1.989e+30,6.7e-11
M,G=20,110
f=G*M/r**3
return np.array([v_x,v_y,-f*x,-f*y])
def RK4(f,u,dt):
k1=f(u)*dt
k2=f(u+0.5*k1)*dt
k3=f(u+0.5*k2)*dt
k4=f(u+k3)*dt
return u+(k1+2*k2+2*k3+k4)/6
def RK4_int(f,y0,tspan):
y=np.zeros([len(tspan),len(y0)])
y[0,:] =y0
for k in range (1,len(tspan)):
y[k,:] = RK4(f,y[k-1],tspan[k]-tspan[k-1])
return y
dt=0.1
t = np.arange(0,10,dt)
y0=np.array([10, 0.0, 10, 10])
sol_rk4=RK4_int(orbit,y0,t)
x,y,v_x,v_y = sol_rk4.T
plt.grid()
plt.plot(x,y)
plt.show()
| 2.296875 | 2 |
pipelines/trackml.py | texasmichelle/kubeflow-cern | 4 | 95 | #!/usr/bin/env python3
import kfp.dsl as dsl
import kfp.gcp as gcp
# Pipeline input variables.
KUBECTL_IMAGE = "gcr.io/mcas-195423/trackml_master_kfp_kubectl"
KUBECTL_IMAGE_VERSION = "1"
TRACKML_IMAGE = "gcr.io/mcas-195423/trackml_master_trackml"
TRACKML_IMAGE_VERSION = "1"
def train_op():
return dsl.ContainerOp(
name='train',
image="{}:{}".format(TRACKML_IMAGE, TRACKML_IMAGE_VERSION),
command=["python"],
arguments=["train.py"],
).apply(gcp.use_gcp_secret()
)#.set_gpu_limit(1)
def serve_op():
return dsl.ContainerOp(
name='serve',
image="{}:{}".format(KUBECTL_IMAGE, KUBECTL_IMAGE_VERSION),
arguments=[
"/src/set_kubectl.sh",
"--namespace", "kubeflow",
"--command", "apply -f /src/k8s/serve.yaml",
]
).apply(gcp.use_gcp_secret())
def resultsgen_op():
return dsl.ContainerOp(
name='resultsgen',
image="{}:{}".format(TRACKML_IMAGE, TRACKML_IMAGE_VERSION),
command=["python"],
arguments=["resultsgen.py"],
).apply(gcp.use_gcp_secret())
@dsl.pipeline(
name='trackml',
description='A pipeline that predicts particle tracks'
)
def trackml():
train = train_op()
serve = serve_op()
serve.after(train)
resultsgen = resultsgen_op()
resultsgen.after(serve)
if __name__ == '__main__':
import kfp.compiler as compiler
compiler.Compiler().compile(trackml, __file__ + '.tar.gz')
| 1.367188 | 1 |
multithreaded_webcrawler.py | the-muses-ltd/Multithreaded-Webcrawler-Cassandra- | 0 | 103 | # This is a reusable webcraawler architecture that can be adapted to scrape any webstie.
# RESULTS:
# Roughly 24 seconds per thousand courses scraped for ThreadPoolExecutor vs 63s for unthreaded script.
# This is a very basic implementation of multithreading in order to show the proof of concept, but is a good base to build off of.
import requests
from bs4 import BeautifulSoup
import csv
from concurrent.futures import ProcessPoolExecutor, as_completed, ThreadPoolExecutor
import time
import logging
from mitopencourseware_crawler_worker import mit_crawler
def courses_spider(max_pages):
data_to_csv = [] #holds all data to send to csv
print("Webcrawler workers have started, please wait while we finish crawling...")
# remove max pages loop (unecessary)
page = 1
while page <= max_pages:
url = 'https://ocw.mit.edu/courses/'
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, 'html.parser')
# Multithread only the work:
# Tuning is required to find the most efficient amount of workers in the thread pool.
with ThreadPoolExecutor(max_workers=30) as executor:
start = time.time()
futures = [ executor.submit(work, link) for link in soup.findAll('h4', {'class': 'course_title'}, limit=100) ]
data_to_csv = []
for result in as_completed(futures):
data_to_csv.append(result.result())
end = time.time()
print("Time Taken to complete: {:.6f}s".format(end-start))
print("Courses extracted: ", len(data_to_csv))
page += 1
export_to_csv(data_to_csv)
def work(link):
# replace this fucntion with the specific crawler you want to use:
return mit_crawler(link)
# Exports data to a formatted csv file, this will be replaced with multithreaded API calls to the Cassandra Prisma Database
# or on the cloud in production, it will be sent to the S3 temporary database to be picked up by the AWS Lambda funtion which will push it to the Cassandra Database
def export_to_csv(csv_data):
with open('web_crawl_data.csv',mode='w') as csv_file:
field_names = ['Title','URL extension','External Website Logo','URL(href)','Description','Course logo URL']
csv_writer = csv.DictWriter(csv_file, fieldnames=field_names)#delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writeheader()
for course in csv_data:
course_data = {
'Title':course[0],
'URL extension':course[1],
'External Website Logo':course[2],
'URL(href)':course[3],
'Description':course[4],
'Course logo URL':course[5],
}
csv_writer.writerow(course_data)
| 2.59375 | 3 |
python_utilities/plotting/util.py | sdaxen/python_utilities | 2 | 111 | <reponame>sdaxen/python_utilities
"""Utility functions for plotting.
Author: <NAME>
E-mail: <EMAIL>"""
from collections import deque
import numpy as np
def rgb_to_hsv(rgb):
"""Convert RGB colors to HSV colors."""
r, g, b = tuple(map(float, rgb))
if any([r > 1, g > 1, b > 1]):
r /= 255.
g /= 255.
b /= 255.
mmax = max(r, g, b)
mmin = min(r, g, b)
c = mmax - mmin
if (c == 0.):
hp = 0.
elif (mmax == r):
hp = ((g - b) / c) % 6
elif (mmax == g):
hp = ((b - r) / c) + 2
elif (mmax == b):
hp = ((r - g) / c) + 4
h = 60 * hp
v = mmax
if (c == 0):
s = 0
else:
s = c / v
return (h, s, v)
def hsv_to_rgb(hsv):
"""Convert HSV colors to RGB colors."""
h, s, v = tuple(map(float, hsv))
c = v * s
m = v - c
hp = h / 60.
x = c * (1. - abs((hp % 2) - 1.))
hp = int(hp)
rgb = deque((c + m, x + m, m))
if (hp % 2):
rgb.reverse()
rgb.rotate((hp - 3) / 2)
else:
rgb.rotate(hp / 2)
return tuple(rgb)
def rgb_to_yuv(rgb):
"""Convert RGB colors to Y'UV colors, useful for comparison."""
rgbv = np.array(rgb).reshape(3, 1)
if np.any(rgbv > 1.):
rgbv = rgbv / 255.
yuv = np.dot(np.array([[ .299, .587, .114],
[-.14713, -.28886, .436],
[ .615, -.51499, -.10001]], dtype=np.double),
rgbv)
return list(yuv)
def yuv_to_rgb(yuv):
"""Convert Y'UV colors to RGB colors."""
yuvv = np.array(yuv).reshape(3, 1)
rgb = np.dot(np.array([[1., 0., 1.13983],
[1., -.39465, -.58060],
[1., 2.03211, 0.]], dtype=np.double),
yuvv)
return list(rgb)
def compute_yuv_dist(rgb1, rgb2):
"""Compute Euclidean Y'UV distance between RGB colors."""
yuv1 = rgb_to_yuv(rgb1)
yuv2 = rgb_to_yuv(rgb2)
return float(sum((np.array(yuv1) - np.array(yuv2))**2)**.5)
def lighten_rgb(rgb, p=0.):
"""Lighten RGB colors by percentage p of total."""
h, s, v = rgb_to_hsv(rgb)
hsv = (h, s, min(1, v + p))
return hsv_to_rgb(hsv)
| 2.421875 | 2 |
qiskit_metal/qlibrary/qubits/Transmon_Interdigitated.py | PatrickSJacobs/qiskit-metal | 0 | 119 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
#from math import *
from math import sin, cos
from qiskit_metal import draw, Dict
from qiskit_metal.qlibrary.core.base import QComponent
import numpy as np
#from ... import config
#if not config.is_building_docs():
# from qiskit_metal import is_true
class TransmonInterdigitated(QComponent):
"""
The base "TransmonInterdigitated" inherits the "QComponent" class.
This creates a transmon pocket with two large pads connected by a Josephson
junction. Both pads have four interdigitated "fingers" which increase the
capacitance of the structure. There are three coupling capacitor pads with qpins
defined; these can be connected to other structures in a design using CPWs.
Default Options:
* pad_width: '1000um' -- width of the large rectanglular pads on either side
of the junction
* pad_height: '300um' -- height of the large rectanglular pads on either side
of the junction
* finger_width: '50um' -- width of the "finger" on either side of the junction
* finger_height: '100um' -- height of the "finger" on the side of the junction
* finger_space: '50um' -- height of the Josephson Junction (equivalently; space
between two fingers)
* pad_pos_x: '0um' -- the internal coordinate defining the center of the bottom
rectangular pad
* pad_pos_y: '0um' -- the internal coordinate defining the center of the bottom
rectangular pad
* comb_width: '50um' -- the width of the four interdigitated combs connected to
either pad
* comb_space_vert: '50um' -- the space between the edge of a comb and the edge of
the opposite rectangular pad
* comb_space_hor: '50um' -- the space between adjacent interdigitated comb structures
* jj_width: '20um' -- the width of the Josephson Junction located between the two
fingers of the device
* cc_space: '50um' -- the space between the lower rectangular pad and the coupling
capacitor below it
* cc_width: '100um' -- the width of the coupling capacitor located below the bottom
rectangular pad
* cc_height: '100um' -- the height of the coupling capacitor located below the bottom
rectangular pad
* cc_topleft_space: '50um' -- the space between the upper rectangular pad and the top
left coupling capacitor
* cc_topleft_width: '100um' -- the width of the top left coupling capacitor pad
* cc_topleft_height: '100um' -- the height of the top left coupling capacitor pad
* cc_topright_space: '50um' -- the space between the upper rectangular pad and the
top right coupling capacitor
* cc_topright_width: '100um' -- the width of the top right coupling capacitor pad
* cc_topright_height: '100um' -- the height of the top right coupling capacitor pad
* position_x: '0um' -- the x-coordinate defining the center of the transmon pocket
on the chip
* position_y: '0um' -- the y-coordinate defining the center of the transmon pocket
on the chip
* rotation: '0.0' -- the angle at which the entire structure is rotated
* rotation_top_pad: '180' -- internal coordinate defining the angle of rotation
between top and bottom pads
* layer: '1' -- all objcets are drawn assuming they are part of the same layer on a
the chip
"""
# Default drawing options
default_options = Dict(pad_width='1000um',
pad_height='300um',
finger_width='50um',
finger_height='100um',
finger_space='50um',
pad_pos_x='0um',
pad_pos_y='0um',
comb_width='50um',
comb_space_vert='50um',
comb_space_hor='50um',
jj_width='20um',
cc_space='50um',
cc_width='100um',
cc_height='100um',
cc_topleft_space='50um',
cc_topleft_width='100um',
cc_topleft_height='100um',
cc_topright_space='50um',
cc_topright_width='100um',
cc_topright_height='100um',
position_x='0um',
position_y='0um',
rotation='0.0',
rotation_top_pad='180',
layer='1')
"""Default drawing options"""
# Name prefix of component, if user doesn't provide name
component_metadata = Dict(short_name='component')
"""Component metadata"""
def make(self):
"""Convert self.options into QGeometry."""
p = self.parse_options() # Parse the string options into numbers
# draw the lower pad as a rectangle
pad_lower = draw.rectangle(p.pad_width, p.pad_height, p.pad_pos_x,
p.pad_pos_y)
# draw the lower finger as a rectangle
finger_lower = draw.rectangle(
p.finger_width, p.finger_height, p.pad_pos_x, p.pad_pos_y +
0.49999 * (p.pad_height) + 0.49999 * (p.finger_height))
# draw the Josephson Junction
rect_jj = draw.rectangle(
p.jj_width, p.finger_space, p.pad_pos_x,
0.5 * (p.pad_height) + p.finger_height + 0.5 * (p.finger_space))
# draw the first comb to the right of the lower finger as a rectangle
comb1_lower = draw.rectangle(
p.comb_width,
(2 * p.finger_height + p.finger_space - p.comb_space_vert),
(0.5 * p.finger_width + p.comb_space_hor + 0.5 * p.comb_width),
(0.5 * p.pad_height + 0.5 *
(p.pad_pos_y + 0.5 * (p.pad_height) + 0.5 * (p.finger_height))))
# draw the second comb to the right of the lower finger by translating the first comb
comb2_lower = draw.translate(comb1_lower,
2.0 * (p.comb_space_hor + p.comb_width),
0.0)
# draw the first comb to the left of the lower finger
comb3_lower = draw.rectangle(
p.comb_width,
(2 * p.finger_height + p.finger_space - p.comb_space_vert),
(-0.5 * p.finger_width - 2.0 * p.comb_space_hor -
1.5 * p.comb_width),
(0.5 * p.pad_height + 0.5 *
(p.pad_pos_y + 0.5 * (p.pad_height) + 0.5 * (p.finger_height))))
# draw the second comb to the left of the lower finger
comb4_lower = draw.translate(comb3_lower,
-2.0 * (p.comb_space_hor + p.comb_width),
0.0)
coupling_capacitor = draw.rectangle(
p.cc_width, p.cc_height, p.pad_pos_x,
p.pad_pos_y - 0.5 * (p.pad_height) - p.cc_space - 0.5 * p.cc_height)
cc_topleft = draw.rectangle(
p.cc_topleft_width, p.cc_topleft_height,
p.pad_pos_x - 0.5 * p.pad_width + 0.5 * p.cc_topleft_width,
p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height +
p.finger_space + p.cc_topleft_space + 0.5 * p.cc_topleft_height)
cc_topright = draw.translate(
cc_topleft,
p.pad_width - 0.5 * p.cc_topleft_width - 0.5 * p.cc_topright_width,
0.0)
# merge the bottom elements
bottom = draw.union(pad_lower, finger_lower, comb1_lower, comb2_lower,
comb3_lower, comb4_lower)
# create the top portion of the comb by translating and rotating
# the bottom portion of the comb
top = draw.translate(bottom, 0.0, p.pad_height + p.finger_space)
top = draw.rotate(top, p.rotation_top_pad)
# merge everything into a single design
design = draw.union(bottom, top, rect_jj, coupling_capacitor,
cc_topleft, cc_topright)
# draw the transmon pocket bounding box
pocket = draw.rectangle(1.5 * p.pad_width, 5.0 * p.pad_height)
# the origin is originally set to the middle of the lower pad.
# Let's move it to the center of the JJ.
design = draw.translate(
design, 0.0,
-0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
# now translate the final structure according to the user input
design = draw.rotate(design, p.rotation, origin=(0, 0))
design = draw.translate(design, p.position_x, p.position_y)
pocket = draw.rotate(pocket, p.rotation, origin=(0, 0))
pocket = draw.translate(pocket, p.position_x, p.position_y)
geom = {'design': design}
geom_pocket = {'pocket': pocket}
self.add_qgeometry('poly', geom, layer=p.layer, subtract=False)
self.add_qgeometry('poly', geom_pocket, layer=p.layer, subtract=True)
###################################################################
# Add Qpin connections for coupling capacitors
# define a function that both rotates and translates the
# qpin coordinates
def qpin_rotate_translate(x):
""" This function rotates the coordinates of the three qpins
according to the user inputs for "position_x", "position_y"
and "rotation".
"""
y = list(x)
z = [0.0, 0.0]
z[0] = y[0] * cos(p.rotation * 3.14159 / 180) - y[1] * sin(
p.rotation * 3.14159 / 180)
z[1] = y[0] * sin(p.rotation * 3.14159 / 180) + y[1] * cos(
p.rotation * 3.14159 / 180)
z[0] = z[0] + p.position_x
z[1] = z[1] + p.position_y
x = (z[0], z[1])
return x
# Add Qpin connections for the bottom coupling capacitor
qp1a = (0.0,
-0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
qp1b = (0.0, -0.5 * p.pad_height - p.cc_space - p.cc_height -
0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
# rotate and translate the qpin coordinates
qp1a = qpin_rotate_translate(qp1a)
qp1b = qpin_rotate_translate(qp1b)
self.add_pin('pin1',
points=np.array([qp1a, qp1b]),
width=0.01,
input_as_norm=True)
# Add Qpin connections for top left coupling capacitor
qp2a = (p.pad_pos_x - 0.5 * p.pad_width + 0.5 * p.cc_topleft_width,
p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height +
p.finger_space + p.cc_topleft_space +
0.5 * p.cc_topleft_height - 0.5 * p.pad_height -
p.finger_height - 0.5 * p.finger_space)
qp2b = (p.pad_pos_x - 0.5 * p.pad_width, p.pad_pos_y +
1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space +
p.cc_topleft_space + 0.5 * p.cc_topleft_height -
0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
qp2a = qpin_rotate_translate(qp2a)
qp2b = qpin_rotate_translate(qp2b)
self.add_pin('pin2',
points=np.array([qp2a, qp2b]),
width=0.01,
input_as_norm=True)
# Add Qpin connections for top right coupling capacitor
qp3a = (p.pad_pos_x + 0.5 * p.pad_width - 0.5 * p.cc_topleft_width,
p.pad_pos_y + 1.5 * p.pad_height + 2.0 * p.finger_height +
p.finger_space + p.cc_topleft_space +
0.5 * p.cc_topleft_height - 0.5 * p.pad_height -
p.finger_height - 0.5 * p.finger_space)
qp3b = (p.pad_pos_x + 0.5 * p.pad_width, p.pad_pos_y +
1.5 * p.pad_height + 2.0 * p.finger_height + p.finger_space +
p.cc_topleft_space + 0.5 * p.cc_topleft_height -
0.5 * p.pad_height - p.finger_height - 0.5 * p.finger_space)
qp3a = qpin_rotate_translate(qp3a)
qp3b = qpin_rotate_translate(qp3b)
self.add_pin('pin3',
points=np.array([qp3a, qp3b]),
width=0.01,
input_as_norm=True)
| 1.859375 | 2 |
pandas/core/apply.py | AakankshaAshok/pandas | 0 | 127 | import inspect
import numpy as np
from pandas._libs import reduction as libreduction
from pandas.util._decorators import cache_readonly
from pandas.core.dtypes.common import (
is_dict_like,
is_extension_array_dtype,
is_list_like,
is_sequence,
)
from pandas.core.dtypes.generic import ABCSeries
def frame_apply(
obj,
func,
axis=0,
raw=False,
result_type=None,
ignore_failures=False,
args=None,
kwds=None,
):
""" construct and return a row or column based frame apply object """
axis = obj._get_axis_number(axis)
if axis == 0:
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply
return klass(
obj,
func,
raw=raw,
result_type=result_type,
ignore_failures=ignore_failures,
args=args,
kwds=kwds,
)
class FrameApply:
def __init__(self, obj, func, raw, result_type, ignore_failures, args, kwds):
self.obj = obj
self.raw = raw
self.ignore_failures = ignore_failures
self.args = args or ()
self.kwds = kwds or {}
if result_type not in [None, "reduce", "broadcast", "expand"]:
raise ValueError(
"invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}"
)
self.result_type = result_type
# curry if needed
if (kwds or args) and not isinstance(func, (np.ufunc, str)):
def f(x):
return func(x, *args, **kwds)
else:
f = func
self.f = f
# results
self.result = None
self.res_index = None
self.res_columns = None
@property
def columns(self):
return self.obj.columns
@property
def index(self):
return self.obj.index
@cache_readonly
def values(self):
return self.obj.values
@cache_readonly
def dtypes(self):
return self.obj.dtypes
@property
def agg_axis(self):
return self.obj._get_agg_axis(self.axis)
def get_result(self):
""" compute the results """
# dispatch to agg
if is_list_like(self.f) or is_dict_like(self.f):
return self.obj.aggregate(self.f, axis=self.axis, *self.args, **self.kwds)
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
# string dispatch
if isinstance(self.f, str):
# Support for `frame.transform('method')`
# Some methods (shift, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
func = getattr(self.obj, self.f)
sig = inspect.getfullargspec(func)
if "axis" in sig.args:
self.kwds["axis"] = self.axis
return func(*self.args, **self.kwds)
# ufunc
elif isinstance(self.f, np.ufunc):
with np.errstate(all="ignore"):
results = self.obj._data.apply("apply", func=self.f)
return self.obj._constructor(
data=results, index=self.index, columns=self.columns, copy=False
)
# broadcasting
if self.result_type == "broadcast":
return self.apply_broadcast()
# one axis empty
elif not all(self.obj.shape):
return self.apply_empty_result()
# raw
elif self.raw and not self.obj._is_mixed_type:
return self.apply_raw()
return self.apply_standard()
def apply_empty_result(self):
"""
we have an empty result; at least 1 axis is 0
we will try to apply the function to an empty
series in order to see if this is a reduction function
"""
# we are not asked to reduce or infer reduction
# so just return a copy of the existing object
if self.result_type not in ["reduce", None]:
return self.obj.copy()
# we may need to infer
should_reduce = self.result_type == "reduce"
from pandas import Series
if not should_reduce:
try:
r = self.f(Series([]))
except Exception:
pass
else:
should_reduce = not isinstance(r, Series)
if should_reduce:
if len(self.agg_axis):
r = self.f(Series([]))
else:
r = np.nan
return self.obj._constructor_sliced(r, index=self.agg_axis)
else:
return self.obj.copy()
def apply_raw(self):
""" apply to the values as a numpy array """
try:
result = libreduction.compute_reduction(self.values, self.f, axis=self.axis)
except ValueError as err:
if "Function does not reduce" not in str(err):
# catch only ValueError raised intentionally in libreduction
raise
result = np.apply_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result, index=self.index, columns=self.columns)
else:
return self.obj._constructor_sliced(result, index=self.agg_axis)
def apply_broadcast(self, target):
result_values = np.empty_like(target.values)
# axis which we want to compare compliance
result_compare = target.shape[0]
for i, col in enumerate(target.columns):
res = self.f(target[col])
ares = np.asarray(res).ndim
# must be a scalar or 1d
if ares > 1:
raise ValueError("too many dims to broadcast")
elif ares == 1:
# must match return dim
if result_compare != len(res):
raise ValueError("cannot broadcast result")
result_values[:, i] = res
# we *always* preserve the original index / columns
result = self.obj._constructor(
result_values, index=target.index, columns=target.columns
)
return result
def apply_standard(self):
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if (
self.result_type in ["reduce", None]
and not self.dtypes.apply(is_extension_array_dtype).any()
# Disallow complex_internals since libreduction shortcut
# cannot handle MultiIndex
and not self.agg_axis._has_complex_internals
):
values = self.values
index = self.obj._get_axis(self.axis)
labels = self.agg_axis
empty_arr = np.empty(len(index), dtype=values.dtype)
# Preserve subclass for e.g. test_subclassed_apply
dummy = self.obj._constructor_sliced(
empty_arr, index=index, dtype=values.dtype
)
try:
result = libreduction.compute_reduction(
values, self.f, axis=self.axis, dummy=dummy, labels=labels
)
except ValueError as err:
if "Function does not reduce" not in str(err):
# catch only ValueError raised intentionally in libreduction
raise
except TypeError:
# e.g. test_apply_ignore_failures we just ignore
if not self.ignore_failures:
raise
except ZeroDivisionError:
# reached via numexpr; fall back to python implementation
pass
else:
return self.obj._constructor_sliced(result, index=labels)
# compute the result using the series generator
self.apply_series_generator()
# wrap results
return self.wrap_results()
def apply_series_generator(self):
series_gen = self.series_generator
res_index = self.result_index
i = None
keys = []
results = {}
if self.ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = self.f(v)
except Exception:
pass
else:
keys.append(v.name)
successes.append(i)
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
for i, v in enumerate(series_gen):
results[i] = self.f(v)
keys.append(v.name)
self.results = results
self.res_index = res_index
self.res_columns = self.result_columns
def wrap_results(self):
results = self.results
# see if we can infer the results
if len(results) > 0 and 0 in results and is_sequence(results[0]):
return self.wrap_results_for_axis()
# dict of scalars
result = self.obj._constructor_sliced(results)
result.index = self.res_index
return result
class FrameRowApply(FrameApply):
axis = 0
def apply_broadcast(self):
return super().apply_broadcast(self.obj)
@property
def series_generator(self):
return (self.obj._ixs(i, axis=1) for i in range(len(self.columns)))
@property
def result_index(self):
return self.columns
@property
def result_columns(self):
return self.index
def wrap_results_for_axis(self):
""" return the results for the rows """
results = self.results
result = self.obj._constructor(data=results)
if not isinstance(results[0], ABCSeries):
if len(result.index) == len(self.res_columns):
result.index = self.res_columns
if len(result.columns) == len(self.res_index):
result.columns = self.res_index
return result
class FrameColumnApply(FrameApply):
axis = 1
def apply_broadcast(self):
result = super().apply_broadcast(self.obj.T)
return result.T
@property
def series_generator(self):
constructor = self.obj._constructor_sliced
return (
constructor(arr, index=self.columns, name=name)
for i, (arr, name) in enumerate(zip(self.values, self.index))
)
@property
def result_index(self):
return self.index
@property
def result_columns(self):
return self.columns
def wrap_results_for_axis(self):
""" return the results for the columns """
results = self.results
# we have requested to expand
if self.result_type == "expand":
result = self.infer_to_same_shape()
# we have a non-series and don't want inference
elif not isinstance(results[0], ABCSeries):
from pandas import Series
result = Series(results)
result.index = self.res_index
# we may want to infer results
else:
result = self.infer_to_same_shape()
return result
def infer_to_same_shape(self):
""" infer the results to the same shape as the input object """
results = self.results
result = self.obj._constructor(data=results)
result = result.T
# set the index
result.index = self.res_index
# infer dtypes
result = result.infer_objects()
return result
| 1.796875 | 2 |
mypy/server/aststrip.py | mmaryada27/mypy | 0 | 135 | <gh_stars>0
"""Strip/reset AST in-place to match state after semantic analysis pass 1.
Fine-grained incremental mode reruns semantic analysis (passes 2 and 3)
and type checking for *existing* AST nodes (targets) when changes are
propagated using fine-grained dependencies. AST nodes attributes are
often changed during semantic analysis passes 2 and 3, and running
semantic analysis again on those nodes would produce incorrect
results, since these passes aren't idempotent. This pass resets AST
nodes to reflect the state after semantic analysis pass 1, so that we
can rerun semantic analysis.
(The above is in contrast to behavior with modules that have source code
changes, for which we reparse the entire module and reconstruct a fresh
AST. No stripping is required in this case. Both modes of operation should
have the same outcome.)
Notes:
* This is currently pretty fragile, as we must carefully undo whatever
changes can be made in semantic analysis passes 2 and 3, including changes
to symbol tables.
* We reuse existing AST nodes because it makes it relatively straightforward
to reprocess only a single target within a module efficiently. If there
was a way to parse a single target within a file, in time proportional to
the size of the target, we'd rather create fresh AST nodes than strip them.
Alas, no such facility exists and building it is non-trivial.
* Currently we don't actually reset all changes, but only those known to affect
non-idempotent semantic analysis behavior.
TODO: It would be more principled and less fragile to reset everything
changed in semantic analysis pass 2 and later.
* Reprocessing may recreate AST nodes (such as Var nodes, and TypeInfo nodes
created with assignment statements) that will get different identities from
the original AST. Thus running an AST merge is necessary after stripping,
even though some identities are preserved.
"""
import contextlib
from typing import Union, Iterator, Optional
from mypy.nodes import (
Node, FuncDef, NameExpr, MemberExpr, RefExpr, MypyFile, FuncItem, ClassDef, AssignmentStmt,
ImportFrom, Import, TypeInfo, SymbolTable, Var, CallExpr, Decorator, OverloadedFuncDef,
SuperExpr, UNBOUND_IMPORTED, GDEF, MDEF, IndexExpr
)
from mypy.traverser import TraverserVisitor
def strip_target(node: Union[MypyFile, FuncItem, OverloadedFuncDef]) -> None:
"""Reset a fine-grained incremental target to state after semantic analysis pass 1.
NOTE: Currently we opportunistically only reset changes that are known to otherwise
cause trouble.
"""
visitor = NodeStripVisitor()
if isinstance(node, MypyFile):
visitor.strip_file_top_level(node)
else:
node.accept(visitor)
class NodeStripVisitor(TraverserVisitor):
def __init__(self) -> None:
self.type = None # type: Optional[TypeInfo]
self.names = None # type: Optional[SymbolTable]
self.is_class_body = False
# By default, process function definitions. If False, don't -- this is used for
# processing module top levels.
self.recurse_into_functions = True
def strip_file_top_level(self, file_node: MypyFile) -> None:
"""Strip a module top-level (don't recursive into functions)."""
self.names = file_node.names
self.recurse_into_functions = False
file_node.accept(self)
def visit_class_def(self, node: ClassDef) -> None:
"""Strip class body and type info, but don't strip methods."""
node.info.type_vars = []
node.info.bases = []
node.info.abstract_attributes = []
node.info.mro = []
node.info.add_type_vars()
node.info.tuple_type = None
node.info.typeddict_type = None
node.info._cache = set()
node.info._cache_proper = set()
node.base_type_exprs.extend(node.removed_base_type_exprs)
node.removed_base_type_exprs = []
with self.enter_class(node.info):
super().visit_class_def(node)
def visit_func_def(self, node: FuncDef) -> None:
if not self.recurse_into_functions:
return
node.expanded = []
node.type = node.unanalyzed_type
with self.enter_method(node.info) if node.info else nothing():
super().visit_func_def(node)
def visit_decorator(self, node: Decorator) -> None:
node.var.type = None
for expr in node.decorators:
expr.accept(self)
if self.recurse_into_functions:
node.func.accept(self)
def visit_overloaded_func_def(self, node: OverloadedFuncDef) -> None:
if not self.recurse_into_functions:
return
if node.impl:
# Revert change made during semantic analysis pass 2.
assert node.items[-1] is not node.impl
node.items.append(node.impl)
super().visit_overloaded_func_def(node)
@contextlib.contextmanager
def enter_class(self, info: TypeInfo) -> Iterator[None]:
# TODO: Update and restore self.names
old_type = self.type
old_is_class_body = self.is_class_body
self.type = info
self.is_class_body = True
yield
self.type = old_type
self.is_class_body = old_is_class_body
@contextlib.contextmanager
def enter_method(self, info: TypeInfo) -> Iterator[None]:
# TODO: Update and restore self.names
old_type = self.type
old_is_class_body = self.is_class_body
self.type = info
self.is_class_body = False
yield
self.type = old_type
self.is_class_body = old_is_class_body
def visit_assignment_stmt(self, node: AssignmentStmt) -> None:
node.type = node.unanalyzed_type
if self.type and not self.is_class_body:
# TODO: Handle multiple assignment
if len(node.lvalues) == 1:
lvalue = node.lvalues[0]
if isinstance(lvalue, MemberExpr) and lvalue.is_new_def:
# Remove defined attribute from the class symbol table. If is_new_def is
# true for a MemberExpr, we know that it must be an assignment through
# self, since only those can define new attributes.
del self.type.names[lvalue.name]
super().visit_assignment_stmt(node)
def visit_import_from(self, node: ImportFrom) -> None:
if node.assignments:
node.assignments = []
else:
if self.names:
# Reset entries in the symbol table. This is necessary since
# otherwise the semantic analyzer will think that the import
# assigns to an existing name instead of defining a new one.
for name, as_name in node.names:
imported_name = as_name or name
symnode = self.names[imported_name]
symnode.kind = UNBOUND_IMPORTED
symnode.node = None
def visit_import(self, node: Import) -> None:
if node.assignments:
node.assignments = []
else:
if self.names:
# Reset entries in the symbol table. This is necessary since
# otherwise the semantic analyzer will think that the import
# assigns to an existing name instead of defining a new one.
for name, as_name in node.ids:
imported_name = as_name or name
initial = imported_name.split('.')[0]
symnode = self.names[initial]
symnode.kind = UNBOUND_IMPORTED
symnode.node = None
def visit_name_expr(self, node: NameExpr) -> None:
# Global assignments are processed in semantic analysis pass 1, and we
# only want to strip changes made in passes 2 or later.
if not (node.kind == GDEF and node.is_new_def):
# Remove defined attributes so that they can recreated during semantic analysis.
if node.kind == MDEF and node.is_new_def:
self.strip_class_attr(node.name)
self.strip_ref_expr(node)
def visit_member_expr(self, node: MemberExpr) -> None:
self.strip_ref_expr(node)
# These need to cleared for member expressions but not for other RefExprs since
# these can change based on changed in a base class.
node.is_new_def = False
node.is_inferred_def = False
if self.is_duplicate_attribute_def(node):
# This is marked as an instance variable definition but a base class
# defines an attribute with the same name, and we can't have
# multiple definitions for an attribute. Defer to the base class
# definition.
self.strip_class_attr(node.name)
node.def_var = None
super().visit_member_expr(node)
def visit_index_expr(self, node: IndexExpr) -> None:
node.analyzed = None # was a type alias
super().visit_index_expr(node)
def strip_class_attr(self, name: str) -> None:
if self.type is not None:
del self.type.names[name]
def is_duplicate_attribute_def(self, node: MemberExpr) -> bool:
if not node.is_inferred_def:
return False
assert self.type is not None, "Internal error: Member defined outside class"
if node.name not in self.type.names:
return False
return any(info.get(node.name) is not None for info in self.type.mro[1:])
def strip_ref_expr(self, node: RefExpr) -> None:
node.kind = None
node.node = None
node.fullname = None
node.is_new_def = False
node.is_inferred_def = False
def visit_call_expr(self, node: CallExpr) -> None:
node.analyzed = None
super().visit_call_expr(node)
def visit_super_expr(self, node: SuperExpr) -> None:
node.info = None
super().visit_super_expr(node)
# TODO: handle more node types
def is_self_member_ref(memberexpr: MemberExpr) -> bool:
"""Does memberexpr refer to an attribute of self?"""
# TODO: Merge with is_self_member_ref in semanal.py.
if not isinstance(memberexpr.expr, NameExpr):
return False
node = memberexpr.expr.node
return isinstance(node, Var) and node.is_self
@contextlib.contextmanager
def nothing() -> Iterator[None]:
yield
| 1.757813 | 2 |
src/main/python/taf/foundation/api/ui/aut.py | WesleyPeng/uiXautomation | 6 | 143 | # Copyright (c) 2017-2018 {Flair Inc.} <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from taf.foundation.utils import ConnectionCache
class AUT(object):
cache = None
current = None
def __init__(
self,
name=None,
identifier=None,
**kwargs
):
if not AUT.cache:
AUT.cache = ConnectionCache(identifier)
self.id = self.cache.register(
self._create_instance(name, **kwargs),
identifier
)
AUT.current = self
@staticmethod
def launch(app_location, **kwargs):
raise NotImplementedError(
'Launch application'
)
def activate(self):
if self.id != self.cache.current_key:
self.cache.current_key = self.id
AUT.current = self
def take_screenshot(self):
self.activate()
return self.get_screenshot_data()
def close(self):
self.cache.close(self.id)
if not self.cache.current:
AUT.cache = None
AUT.current = None
def get_screenshot_data(self):
raise NotImplementedError(
'Get screenshot data from AUT'
)
def _create_instance(self, name, **kwargs):
raise NotImplementedError(
'Create instance of AUT'
)
| 1.171875 | 1 |
tests/test_prep_read.py | taruma/hidrokit | 5 | 151 | """Test for .prep.read module
"""
from hidrokit.prep import read
import numpy as np
import pandas as pd
A = pd.DataFrame(
data=[
[1, 3, 4, np.nan, 2, np.nan],
[np.nan, 2, 3, np.nan, 1, 4],
[2, np.nan, 1, 3, 4, np.nan]
],
columns=['A', 'B', 'C', 'D', 'E', 'F']
)
A_date = A.set_index(pd.date_range("20190617", "20190619"))
res_A_number = {'A': [1], 'B': [2], 'C': [], 'D': [0, 1], 'E': [], 'F': [0, 2]}
res_A_date = {'A': ['0618'], 'B': ['0619'], 'C': [],
'D': ['0617', '0618'], 'E': [], 'F': ['0617', '0619']}
def test_read_number():
test = read.missing_row(A, date_index=False)
assert test.items() == res_A_number.items()
def test_read_date():
test = read.missing_row(A_date, date_format="%m%d")
assert test.items() == res_A_date.items()
| 1.414063 | 1 |
superneurons/tools/img_val/main.py | Phaeton-lang/baselines | 0 | 159 | <reponame>Phaeton-lang/baselines<filename>superneurons/tools/img_val/main.py<gh_stars>0
# Created by ay27 at 17/4/9
import os
import matplotlib.pyplot as plt
import struct
import numpy as np
def trans(row):
return list(map(lambda x: np.uint8(x), row))
def read_image(filename):
with open(filename, mode='rb') as file:
n = file.read(8)
n = struct.unpack("<Q", n)[0]
c = file.read(8)
c = struct.unpack("<Q", c)[0]
h = file.read(8)
h = struct.unpack("<Q", h)[0]
w = file.read(8)
w = struct.unpack("<Q", w)[0]
print(n, c, h, w)
for ii in range(n):
r = trans(file.read(h*w))
g = trans(file.read(h*w))
b = trans(file.read(h*w))
if ii == 100:
break
print(file.tell() == os.fstat(file.fileno()).st_size)
img = np.array([r,g,b]).transpose(1,0).reshape(h,w,c)
print(img.shape)
plt.imshow(img)
plt.show()
def read_label(path, ground_truth=None):
with open(path, 'rb') as file:
n = file.read(8)
n = struct.unpack("<Q", n)[0]
c = file.read(8)
c = struct.unpack("<Q", c)[0]
h = file.read(8)
h = struct.unpack("<Q", h)[0]
w = file.read(8)
w = struct.unpack("<Q", w)[0]
print(n, c, h, w)
label = []
sets = set()
while not (file.tell() == os.fstat(file.fileno()).st_size):
ch = file.read(4)
num = struct.unpack("<l", ch)[0]
label.append(num)
sets.add(num)
# print(file.tell() == os.fstat(file.fileno()).st_size)
print(label)
print(len(label))
# print(label[900],label[901], label[902], label[903], label[904])
return label
# if ground_truth:
# g = []
# with open(ground_truth) as file:
# for line in file:
# g.append(int(line.split(' ')[1]))
# np.testing.assert_array_equal(g, label)
if __name__ == '__main__':
# read_image('../../data/ilsvrc2012/img.bin')
# read_label('../../data/ilsvrc2012/label.bin', '../../data/ilsvrc2012/val.txt')
# read_image('../../build/cifar100_train_image.bin')
# read_label('../../build/cifar100_train_label.bin')
read_image('../../build/val_data_8.bin')
for i in range(10):
read_label('../../build/val_label_%d.bin' % i)
# labels = []
# for i in range(10):
# labels.append(read_label('../../build/val_label_%d.bin' % i))
#
# ground = []
# with open('../../build/shuffled_list') as file:
# ground.append() | 2.0625 | 2 |
napari/_qt/dialogs/qt_plugin_dialog.py | kne42/napari | 0 | 167 | <filename>napari/_qt/dialogs/qt_plugin_dialog.py
import os
import sys
from pathlib import Path
from typing import Sequence
from napari_plugin_engine.dist import standard_metadata
from napari_plugin_engine.exceptions import PluginError
from qtpy.QtCore import QEvent, QProcess, QProcessEnvironment, QSize, Qt, Slot
from qtpy.QtGui import QFont, QMovie
from qtpy.QtWidgets import (
QCheckBox,
QDialog,
QFrame,
QHBoxLayout,
QLabel,
QLineEdit,
QListWidget,
QListWidgetItem,
QPushButton,
QSizePolicy,
QSplitter,
QTextEdit,
QVBoxLayout,
QWidget,
)
import napari.resources
from ...plugins import plugin_manager
from ...plugins.pypi import (
ProjectInfo,
iter_napari_plugin_info,
normalized_name,
)
from ...utils._appdirs import user_plugin_dir, user_site_packages
from ...utils.misc import parse_version, running_as_bundled_app
from ...utils.translations import trans
from ..qthreading import create_worker
from ..widgets.qt_eliding_label import ElidingLabel
from ..widgets.qt_plugin_sorter import QtPluginSorter
from .qt_plugin_report import QtPluginErrReporter
# TODO: add error icon and handle pip install errors
# TODO: add queue to handle clicks when already processing
class Installer:
def __init__(self, output_widget: QTextEdit = None):
from ...plugins import plugin_manager
# create install process
self._output_widget = None
self.process = QProcess()
self.process.setProgram(sys.executable)
self.process.setProcessChannelMode(QProcess.MergedChannels)
self.process.readyReadStandardOutput.connect(self._on_stdout_ready)
# setup process path
env = QProcessEnvironment()
combined_paths = os.pathsep.join(
[user_site_packages(), env.systemEnvironment().value("PYTHONPATH")]
)
env.insert("PYTHONPATH", combined_paths)
# use path of parent process
env.insert(
"PATH", QProcessEnvironment.systemEnvironment().value("PATH")
)
self.process.setProcessEnvironment(env)
self.process.finished.connect(lambda: plugin_manager.discover())
self.process.finished.connect(lambda: plugin_manager.prune())
self.set_output_widget(output_widget)
def set_output_widget(self, output_widget: QTextEdit):
if output_widget:
self._output_widget = output_widget
self.process.setParent(output_widget)
def _on_stdout_ready(self):
if self._output_widget:
text = self.process.readAllStandardOutput().data().decode()
self._output_widget.append(text)
def install(self, pkg_list: Sequence[str]):
cmd = ['-m', 'pip', 'install', '--upgrade']
if running_as_bundled_app() and sys.platform.startswith('linux'):
cmd += [
'--no-warn-script-location',
'--prefix',
user_plugin_dir(),
]
self.process.setArguments(cmd + list(pkg_list))
if self._output_widget:
self._output_widget.clear()
self.process.start()
def uninstall(self, pkg_list: Sequence[str]):
args = ['-m', 'pip', 'uninstall', '-y']
self.process.setArguments(args + list(pkg_list))
if self._output_widget:
self._output_widget.clear()
self.process.start()
for pkg in pkg_list:
plugin_manager.unregister(pkg)
class PluginListItem(QFrame):
def __init__(
self,
package_name: str,
version: str = '',
url: str = '',
summary: str = '',
author: str = '',
license: str = "UNKNOWN",
*,
plugin_name: str = None,
parent: QWidget = None,
enabled: bool = True,
):
super().__init__(parent)
self.setup_ui(enabled)
if plugin_name:
self.plugin_name.setText(plugin_name)
self.package_name.setText(f"{package_name} {version}")
self.summary.setText(summary)
self.package_author.setText(author)
self.action_button.setText(trans._("uninstall"))
self.action_button.setObjectName("remove_button")
self.enabled_checkbox.setChecked(enabled)
if PluginError.get(plugin_name=plugin_name):
def _show_error():
rep = QtPluginErrReporter(
parent=self._get_dialog(), initial_plugin=plugin_name
)
rep.setWindowFlags(Qt.Sheet)
close = QPushButton(trans._("close"), rep)
rep.layout.addWidget(close)
rep.plugin_combo.hide()
close.clicked.connect(rep.close)
rep.open()
self.error_indicator.clicked.connect(_show_error)
self.error_indicator.show()
self.summary.setIndent(18)
else:
self.summary.setIndent(38)
else:
self.plugin_name.setText(package_name)
self.package_name.setText(version)
self.summary.setText(summary)
self.package_author.setText(author)
self.action_button.setText(trans._("install"))
self.enabled_checkbox.hide()
def _get_dialog(self) -> QDialog:
p = self.parent()
while not isinstance(p, QDialog) and p.parent():
p = p.parent()
return p
def setup_ui(self, enabled=True):
self.v_lay = QVBoxLayout(self)
self.v_lay.setContentsMargins(-1, 6, -1, 6)
self.v_lay.setSpacing(0)
self.row1 = QHBoxLayout()
self.row1.setSpacing(6)
self.enabled_checkbox = QCheckBox(self)
self.enabled_checkbox.setChecked(enabled)
self.enabled_checkbox.stateChanged.connect(self._on_enabled_checkbox)
self.enabled_checkbox.setToolTip(trans._("enable/disable"))
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.enabled_checkbox.sizePolicy().hasHeightForWidth()
)
self.enabled_checkbox.setSizePolicy(sizePolicy)
self.enabled_checkbox.setMinimumSize(QSize(20, 0))
self.enabled_checkbox.setText("")
self.row1.addWidget(self.enabled_checkbox)
self.plugin_name = QLabel(self)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.plugin_name.sizePolicy().hasHeightForWidth()
)
self.plugin_name.setSizePolicy(sizePolicy)
font15 = QFont()
font15.setPointSize(15)
self.plugin_name.setFont(font15)
self.row1.addWidget(self.plugin_name)
self.package_name = QLabel(self)
self.package_name.setAlignment(
Qt.AlignRight | Qt.AlignTrailing | Qt.AlignVCenter
)
self.row1.addWidget(self.package_name)
self.action_button = QPushButton(self)
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.action_button.sizePolicy().hasHeightForWidth()
)
self.action_button.setSizePolicy(sizePolicy)
self.row1.addWidget(self.action_button)
self.v_lay.addLayout(self.row1)
self.row2 = QHBoxLayout()
self.error_indicator = QPushButton()
self.error_indicator.setObjectName("warning_icon")
self.error_indicator.setCursor(Qt.PointingHandCursor)
self.error_indicator.hide()
self.row2.addWidget(self.error_indicator)
self.row2.setContentsMargins(-1, 4, 0, -1)
self.summary = ElidingLabel(parent=self)
sizePolicy = QSizePolicy(
QSizePolicy.MinimumExpanding, QSizePolicy.Preferred
)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.summary.sizePolicy().hasHeightForWidth()
)
self.summary.setSizePolicy(sizePolicy)
self.summary.setObjectName("small_text")
self.row2.addWidget(self.summary)
self.package_author = QLabel(self)
sizePolicy = QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(
self.package_author.sizePolicy().hasHeightForWidth()
)
self.package_author.setSizePolicy(sizePolicy)
self.package_author.setObjectName("small_text")
self.row2.addWidget(self.package_author)
self.v_lay.addLayout(self.row2)
def _on_enabled_checkbox(self, state: int):
"""Called with `state` when checkbox is clicked."""
plugin_manager.set_blocked(self.plugin_name.text(), not state)
class QPluginList(QListWidget):
def __init__(self, parent: QWidget, installer: Installer):
super().__init__(parent)
self.installer = installer
self.setSortingEnabled(True)
@Slot(ProjectInfo)
def addItem(
self, project_info: ProjectInfo, plugin_name=None, enabled=True
):
# don't add duplicates
if (
self.findItems(project_info.name, Qt.MatchFixedString)
and not plugin_name
):
return
# including summary here for sake of filtering below.
searchable_text = project_info.name + " " + project_info.summary
item = QListWidgetItem(searchable_text, parent=self)
item.version = project_info.version
super().addItem(item)
widg = PluginListItem(
*project_info,
parent=self,
plugin_name=plugin_name,
enabled=enabled,
)
method = getattr(
self.installer, 'uninstall' if plugin_name else 'install'
)
widg.action_button.clicked.connect(lambda: method([project_info.name]))
item.setSizeHint(widg.sizeHint())
self.setItemWidget(item, widg)
@Slot(ProjectInfo)
def tag_outdated(self, project_info: ProjectInfo):
for item in self.findItems(project_info.name, Qt.MatchFixedString):
current = item.version
latest = project_info.version
if parse_version(current) >= parse_version(latest):
continue
if hasattr(item, 'outdated'):
# already tagged it
continue
item.outdated = True
widg = self.itemWidget(item)
update_btn = QPushButton(
trans._("update (v{latest})", latest=latest), widg
)
update_btn.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
update_btn.clicked.connect(
lambda: self.installer.install([item.text()])
)
widg.row1.insertWidget(3, update_btn)
def filter(self, text: str):
"""Filter items to those containing `text`."""
shown = self.findItems(text, Qt.MatchContains)
for i in range(self.count()):
item = self.item(i)
item.setHidden(item not in shown)
class QtPluginDialog(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.installer = Installer()
self.setup_ui()
self.installer.set_output_widget(self.stdout_text)
self.installer.process.started.connect(self._on_installer_start)
self.installer.process.finished.connect(self._on_installer_done)
self.refresh()
def _on_installer_start(self):
self.show_status_btn.setChecked(True)
self.working_indicator.show()
self.process_error_indicator.hide()
def _on_installer_done(self, exit_code, exit_status):
self.working_indicator.hide()
if exit_code:
self.process_error_indicator.show()
else:
self.show_status_btn.setChecked(False)
self.refresh()
self.plugin_sorter.refresh()
def refresh(self):
self.installed_list.clear()
self.available_list.clear()
# fetch installed
from ...plugins import plugin_manager
plugin_manager.discover() # since they might not be loaded yet
already_installed = set()
for plugin_name, mod_name, distname in plugin_manager.iter_available():
# not showing these in the plugin dialog
if plugin_name in ('napari_plugin_engine',):
continue
if distname:
already_installed.add(distname)
meta = standard_metadata(distname)
else:
meta = {}
self.installed_list.addItem(
ProjectInfo(
normalized_name(distname or ''),
meta.get('version', ''),
meta.get('url', ''),
meta.get('summary', ''),
meta.get('author', ''),
meta.get('license', ''),
),
plugin_name=plugin_name,
enabled=plugin_name in plugin_manager.plugins,
)
# self.v_splitter.setSizes([70 * self.installed_list.count(), 10, 10])
# fetch available plugins
self.worker = create_worker(iter_napari_plugin_info)
def _handle_yield(project_info):
if project_info.name in already_installed:
self.installed_list.tag_outdated(project_info)
else:
self.available_list.addItem(project_info)
self.worker.yielded.connect(_handle_yield)
self.worker.finished.connect(self.working_indicator.hide)
self.worker.finished.connect(self._update_count_in_label)
self.worker.start()
def setup_ui(self):
self.resize(1080, 640)
vlay_1 = QVBoxLayout(self)
self.h_splitter = QSplitter(self)
vlay_1.addWidget(self.h_splitter)
self.h_splitter.setOrientation(Qt.Horizontal)
self.v_splitter = QSplitter(self.h_splitter)
self.v_splitter.setOrientation(Qt.Vertical)
self.v_splitter.setMinimumWidth(500)
self.plugin_sorter = QtPluginSorter(parent=self.h_splitter)
self.plugin_sorter.layout().setContentsMargins(2, 0, 0, 0)
self.plugin_sorter.hide()
installed = QWidget(self.v_splitter)
lay = QVBoxLayout(installed)
lay.setContentsMargins(0, 2, 0, 2)
self.installed_label = QLabel(trans._("Installed Plugins"))
self.installed_filter = QLineEdit()
self.installed_filter.setPlaceholderText("search...")
self.installed_filter.setMaximumWidth(350)
self.installed_filter.setClearButtonEnabled(True)
mid_layout = QHBoxLayout()
mid_layout.addWidget(self.installed_label)
mid_layout.addWidget(self.installed_filter)
mid_layout.addStretch()
lay.addLayout(mid_layout)
self.installed_list = QPluginList(installed, self.installer)
self.installed_filter.textChanged.connect(self.installed_list.filter)
lay.addWidget(self.installed_list)
uninstalled = QWidget(self.v_splitter)
lay = QVBoxLayout(uninstalled)
lay.setContentsMargins(0, 2, 0, 2)
self.avail_label = QLabel(trans._("Available Plugins"))
self.avail_filter = QLineEdit()
self.avail_filter.setPlaceholderText("search...")
self.avail_filter.setMaximumWidth(350)
self.avail_filter.setClearButtonEnabled(True)
mid_layout = QHBoxLayout()
mid_layout.addWidget(self.avail_label)
mid_layout.addWidget(self.avail_filter)
mid_layout.addStretch()
lay.addLayout(mid_layout)
self.available_list = QPluginList(uninstalled, self.installer)
self.avail_filter.textChanged.connect(self.available_list.filter)
lay.addWidget(self.available_list)
self.stdout_text = QTextEdit(self.v_splitter)
self.stdout_text.setReadOnly(True)
self.stdout_text.setObjectName("pip_install_status")
self.stdout_text.hide()
buttonBox = QHBoxLayout()
self.working_indicator = QLabel(trans._("loading ..."), self)
sp = self.working_indicator.sizePolicy()
sp.setRetainSizeWhenHidden(True)
self.working_indicator.setSizePolicy(sp)
self.process_error_indicator = QLabel(self)
self.process_error_indicator.setObjectName("error_label")
self.process_error_indicator.hide()
load_gif = str(Path(napari.resources.__file__).parent / "loading.gif")
mov = QMovie(load_gif)
mov.setScaledSize(QSize(18, 18))
self.working_indicator.setMovie(mov)
mov.start()
self.direct_entry_edit = QLineEdit(self)
self.direct_entry_edit.installEventFilter(self)
self.direct_entry_edit.setPlaceholderText(
trans._('install by name/url, or drop file...')
)
self.direct_entry_btn = QPushButton(trans._("Install"), self)
self.direct_entry_btn.clicked.connect(self._install_packages)
self.show_status_btn = QPushButton(trans._("Show Status"), self)
self.show_status_btn.setFixedWidth(100)
self.show_sorter_btn = QPushButton(trans._("<< Show Sorter"), self)
self.close_btn = QPushButton(trans._("Close"), self)
self.close_btn.clicked.connect(self.accept)
buttonBox.addWidget(self.show_status_btn)
buttonBox.addWidget(self.working_indicator)
buttonBox.addWidget(self.direct_entry_edit)
buttonBox.addWidget(self.direct_entry_btn)
buttonBox.addWidget(self.process_error_indicator)
buttonBox.addSpacing(60)
buttonBox.addWidget(self.show_sorter_btn)
buttonBox.addWidget(self.close_btn)
buttonBox.setContentsMargins(0, 0, 4, 0)
vlay_1.addLayout(buttonBox)
self.show_status_btn.setCheckable(True)
self.show_status_btn.setChecked(False)
self.show_status_btn.toggled.connect(self._toggle_status)
self.show_sorter_btn.setCheckable(True)
self.show_sorter_btn.setChecked(False)
self.show_sorter_btn.toggled.connect(self._toggle_sorter)
self.v_splitter.setStretchFactor(1, 2)
self.h_splitter.setStretchFactor(0, 2)
self.avail_filter.setFocus()
def _update_count_in_label(self):
count = self.available_list.count()
self.avail_label.setText(
trans._("Available Plugins ({count})", count=count)
)
def eventFilter(self, watched, event):
if event.type() == QEvent.DragEnter:
# we need to accept this event explicitly to be able
# to receive QDropEvents!
event.accept()
if event.type() == QEvent.Drop:
md = event.mimeData()
if md.hasUrls():
files = [url.toLocalFile() for url in md.urls()]
self.direct_entry_edit.setText(files[0])
return True
return super().eventFilter(watched, event)
def _toggle_sorter(self, show):
if show:
self.show_sorter_btn.setText(trans._(">> Hide Sorter"))
self.plugin_sorter.show()
else:
self.show_sorter_btn.setText(trans._("<< Show Sorter"))
self.plugin_sorter.hide()
def _toggle_status(self, show):
if show:
self.show_status_btn.setText(trans._("Hide Status"))
self.stdout_text.show()
else:
self.show_status_btn.setText(trans._("Show Status"))
self.stdout_text.hide()
def _install_packages(self, packages: Sequence[str] = ()):
if not packages:
_packages = self.direct_entry_edit.text()
if os.path.exists(_packages):
packages = [_packages]
else:
packages = _packages.split()
self.direct_entry_edit.clear()
if packages:
self.installer.install(packages)
if __name__ == "__main__":
from qtpy.QtWidgets import QApplication
app = QApplication([])
w = QtPluginDialog()
w.show()
app.exec_()
| 1.40625 | 1 |
src/api/datamanage/pro/lifecycle/data_trace/data_set_create.py | Chromico/bk-base | 84 | 175 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from copy import deepcopy
from datamanage.pro import exceptions as dm_pro_errors
from datamanage.utils.api import MetaApi
from datamanage.pro.utils.time import utc_to_local, str_to_datetime
from datamanage.pro.lifecycle.models_dict import (
DATASET_CREATE_MAPPINGS,
DATASET_CREATE_EVENT_INFO_DICT,
DataTraceShowType,
ComplexSearchBackendType,
DataTraceFinishStatus,
)
def get_dataset_create_info(dataset_id, dataset_type):
"""获取数据足迹中和数据创建相关信息
:param dataset_id: 数据id
:param dataset_type: 数据类型
:return: 数据创建相关信息
:rtype: list
"""
# 1)从dgraph中获取数据创建相关信息
data_set_create_info_statement = """
{
get_dataset_create_info(func: eq(%s, "%s")){created_by created_at}
}
""" % (
DATASET_CREATE_MAPPINGS[dataset_type]['data_set_pk'],
dataset_id,
)
query_result = MetaApi.complex_search(
{"backend_type": ComplexSearchBackendType.DGRAPH.value, "statement": data_set_create_info_statement}, raw=True
)
create_info_ret = query_result['data']['data']['get_dataset_create_info']
if not (isinstance(create_info_ret, list) and create_info_ret):
raise dm_pro_errors.GetDataSetCreateInfoError(message_kv={'dataset_id': dataset_id})
# 2)得到格式化创建信息
create_trace_dict = deepcopy(DATASET_CREATE_EVENT_INFO_DICT)
create_trace_dict.update(
{
"sub_type": dataset_type,
"sub_type_alias": DATASET_CREATE_MAPPINGS[dataset_type]['data_set_create_alias'],
"description": DATASET_CREATE_MAPPINGS[dataset_type]['data_set_create_alias'],
"created_at": utc_to_local(create_info_ret[0]['created_at']),
"created_by": create_info_ret[0]['created_by'],
"show_type": DataTraceShowType.DISPLAY.value,
"datetime": str_to_datetime(utc_to_local(create_info_ret[0]['created_at'])),
"status": DataTraceFinishStatus.STATUS,
"status_alias": DataTraceFinishStatus.STATUS_ALIAS,
}
)
return [create_trace_dict]
| 1.375 | 1 |
base/admin.py | ExpertOfNone/expert_of_none | 0 | 183 | from django.contrib import admin
from base.models import Topic, Photo
class EONBaseAdmin(admin.ModelAdmin):
def get_changeform_initial_data(self, request):
initial = super().get_changeform_initial_data(request)
if 'add' in request.META['PATH_INFO']:
initial['created_by'] = request.user
initial['modified_by'] = request.user
return initial
def save_model(self, request, obj, form, change):
if not obj.created_by:
obj.created_by = request.user
return super().save_model(request, obj, form, change)
class TopicAdmin(EONBaseAdmin):
list_display = [
'name', 'parent_topic', 'top_level', 'modified_by', 'modified', 'created_by', 'created',
]
class PhotoAdmin(EONBaseAdmin):
# TODO Add Proper List Display
pass
admin.site.register(Topic, TopicAdmin)
admin.site.register(Photo, PhotoAdmin)
| 1.546875 | 2 |
main.py | vkumarma/Complete-Interpreter | 0 | 191 | <gh_stars>0
import re
import sys
class Lexer:
def __init__(self, inp_str):
self.index = 0
self.s = inp_str
def get_char(self):
if self.index < len(self.s):
var = self.s[self.index]
self.index += 1
return var
input_file = open(str(sys.argv[1]), 'r') # Open file for reading
line = input_file.read()
# "if z then while x * 4 - 2 do skip endwhile else x := 7 endif; y := 1"
input_string = line.strip("\n")
lexer = Lexer(input_string)
hashtable = {}
tokens_list = []
def token_check(input):
if re.fullmatch("if|then|else|endif|while|do|endwhile|skip", input):
hashtable[input] = "KEYWORD"
tokens_list.append(input)
elif re.search("([a-z]|[A-Z])([a-z]|[A-Z]|[0-9])*", input):
hashtable[input] = "IDENTIFIER"
tokens_list.append(input)
elif re.search("[0-9]+", input):
hashtable[input] = "NUMBER"
tokens_list.append(input)
elif re.fullmatch("\+|\-|\*|/|\(|\)|:=|;", input):
hashtable[input] = "SYMBOL"
tokens_list.append(input)
else:
hashtable[input] = "ERROR READING"
def digit(curr_char, lexer):
sub = ""
while (curr_char.isdigit()):
sub += curr_char
curr_char = lexer.get_char()
if curr_char == None:
break
new.append(curr_char)
return sub
def longest_sub_string(curr_char, lexer):
sub = ""
while (curr_char.isalpha() or curr_char.isdigit()):
sub += curr_char
curr_char = lexer.get_char()
if curr_char == None:
break
new.append(curr_char)
return sub
def symbol(curr_char, lexer):
# print(curr_char)
sym = curr_char
curr_char = lexer.get_char()
new.append(curr_char)
return sym
def assignment(curr_char, lexer):
sub = curr_char
next_char = lexer.get_char()
if next_char == "=":
sub += next_char
new.append(next_char)
return sub
new.append(lexer.get_char())
return sub
new = [] # keeping track of current char.
curr_char = lexer.get_char()
while (curr_char != None):
while (curr_char == ' ' or curr_char == ''):
curr_char = lexer.get_char()
if (curr_char.isdigit()):
token_check(digit(curr_char, lexer))
curr_char = new.pop()
elif (curr_char.isalpha()):
token_check(longest_sub_string(curr_char, lexer))
curr_char = new.pop()
elif curr_char in "+-/*();":
token_check(symbol(curr_char, lexer))
curr_char = new.pop()
elif curr_char == ":":
token_check(assignment(curr_char, lexer))
curr_char = new.pop()
if curr_char == "=":
curr_char = lexer.get_char()
else:
token_check(curr_char)
curr_char = lexer.get_char()
def tokens():
return hashtable
# print(tokens_list)
# print(tokens())
| 2.71875 | 3 |
aws-regions.py | groorj/cloud-regions | 0 | 199 | <reponame>groorj/cloud-regions
import json
import logging
import os
import inspect
import urllib
import urllib.request
from urllib.error import HTTPError
# logger
logger = logging.getLogger()
logger_level = logging.getLevelName(os.environ['LOGGER_LEVEL'])
logger.setLevel(logger_level)
# validate access
def validate_access(event, context):
logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name)
logger.debug("RESTRICTED_ACCESS_ENABLED: [%s]", os.environ['RESTRICTED_ACCESS_ENABLED'])
error_message = "You are not allowed, get out!"
if os.environ['RESTRICTED_ACCESS_ENABLED'] == 'true':
logger.info("Restricted access is enabled")
logger.info("Value for header [%s] is: [%s]", os.environ['RESTRICTED_ACCESS_HTTP_HEADER'], event["headers"][os.environ['RESTRICTED_ACCESS_HTTP_HEADER']])
if event["headers"][os.environ['RESTRICTED_ACCESS_HTTP_HEADER']] != os.environ['RESTRICTED_ACCESS_SECRET']:
logger.info("Key provided is not valid")
logger.debug("Error: [%s]", error_message)
http_code = 403
raise ValueError(http_code, error_message)
else:
logger.info("Key provided is valid")
else:
logger.info("Restricted access is NOT enabled")
# create response
def create_response_new(status_code, message_body):
logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name)
return {
'statusCode': str(status_code),
'body': json.dumps(message_body),
'headers': {
'Content-Type': 'application/json',
'Access-Control-Allow-Origin': '*'
},
}
# download json file
def get_json():
logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name)
try:
response = urllib.request.urlopen(os.environ['AWS_REGIONS_JSON_URL'])
except HTTPError as err:
# catch HTTP error
logger.debug("HTTP error: [%s]", err)
raise
json_data = json.loads(response.read())
return json_data
# entry point -> return region info
def get_region_info(event, context):
logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name)
return_info_final = {}
# validate the access to this resource
try:
validate_access(event, context)
except ValueError as err:
return_info_final['request'] = { "request_status": "Fail", "error_message": err.args[1], "http_error_code": err.args[0] }
return create_response_new(err.args[0], return_info_final)
# get region info
region_code = event['pathParameters']['region_code']
logger.debug("region_code: [%s]", region_code)
try:
json_data = get_json()
except HTTPError as err:
# http_code = err.code
http_code = 500
return_info_final['request'] = { "request_status": "Fail", "error_message": "Error getting Regions information.", "http_error_code": err.code }
return create_response_new(http_code, return_info_final)
# logger.debug("json_data: [%s]", json_data)
# logger.debug("type(json_data): [%s]", type(json_data))
for element in json_data['data']:
# logger.debug("code: [%s] && region_code: [%s]", element['code'], region_code)
if element['code'] == region_code:
logger.info("region_code found")
http_code = 200
return_info_final['request'] = { "request_status": "Success" }
return_info_final['info'] = json_data['info']
return_info_final['data'] = element
break
else:
logger.info("region_code NOT found")
return_info = "Region code NOT found."
http_code = 404
return_info_final['request'] = { "request_status": "Fail", "error_message": "Region code NOT found.", "http_error_code": http_code }
return create_response_new(http_code, return_info_final)
# entry point -> return region info
def get_all_regions_info(event, context):
logger.debug("Inside function: [%s]", inspect.currentframe().f_code.co_name)
return_info_final = {}
# validate the access to this resource
try:
validate_access(event, context)
except ValueError as err:
return_info_final['request'] = { "request_status": "Fail", "error_message": err.args[1], "http_error_code": err.args[0] }
return create_response_new(err.args[0], return_info_final)
# get regions info
try:
json_data = get_json()
except HTTPError as err:
# http_code = err.code
http_code = 500
return_info_final['request'] = { "request_status": "Fail", "error_message": "Error getting Regions information.", "http_error_code": err.code }
return create_response_new(http_code, return_info_final)
logger.debug("json_data: [%s]", json_data)
http_code = 200
return_info_final['request'] = { "request_status": "Success" }
return_info_final['info'] = json_data['info']
return_info_final['data'] = json_data['data']
return create_response_new(http_code, return_info_final)
# End; | 1.484375 | 1 |
tcex/playbooks/playbooks_base.py | RichieB2B/tcex | 0 | 207 | """TcEx Framework Playbook module"""
# standard library
import base64
import json
import re
from collections import OrderedDict
from collections.abc import Iterable
class PlaybooksBase:
"""TcEx Playbook Module Base Class
Args:
tcex (TcEx): Instance of TcEx class.
context (str): The Redis context (hash).
output_variables (list): The requested output variables.
"""
def __init__(self, tcex, context, output_variables):
"""Initialize the Class properties."""
self.tcex = tcex
self._context = context
self._output_variables = output_variables or []
# properties
self._output_variables_by_name = None
self._output_variables_by_type = None
self.log = tcex.log
# match full variable
self._variable_match = re.compile(fr'^{self._variable_pattern}$')
# capture variable parts (exactly a variable)
self._variable_parse = re.compile(self._variable_pattern)
# match embedded variables without quotes (#App:7979:variable_name!StringArray)
self._vars_keyvalue_embedded = re.compile(fr'(?:\"\:\s?)[^\"]?{self._variable_pattern}')
def _coerce_string_value(self, value):
"""Return a string value from an bool or int."""
# coerce bool before int as python says a bool is an int
if isinstance(value, bool):
# coerce bool to str type
self.log.warning(f'Coercing bool value ({value}) to a string ("{str(value).lower()}").')
value = str(value).lower()
# coerce int to str type
if isinstance(value, (float, int)):
self.log.warning(f'Coercing float/int value ({value}) to a string ("{str(value)}").')
value = str(value)
return value
def _create(self, key, value, validate=True):
"""Create the value in Redis if applicable."""
if key is None or value is None:
self.log.warning('The key or value field is None.')
return None
# get variable type from variable value
variable_type = self.variable_type(key)
if variable_type == 'Binary':
# if not isinstance(value, bytes):
# value = value.encode('utf-8')
if validate and not isinstance(value, bytes):
raise RuntimeError('Invalid data provided for Binary.')
value = base64.b64encode(value).decode('utf-8')
elif variable_type == 'KeyValue':
if validate and (not isinstance(value, dict) or not self._is_key_value(value)):
raise RuntimeError('Invalid data provided for KeyValue.')
elif variable_type == 'String':
# coerce string values
value = self._coerce_string_value(value)
if validate and not isinstance(value, str):
raise RuntimeError('Invalid data provided for String.')
elif variable_type == 'TCEntity':
if validate and (not isinstance(value, dict) or not self._is_tc_entity(value)):
raise RuntimeError('Invalid data provided for TcEntity.')
# self.log.trace(f'pb create - context: {self._context}, key: {key}, value: {value}')
try:
value = json.dumps(value)
except ValueError as e: # pragma: no cover
raise RuntimeError(f'Failed to serialize value ({e}).')
try:
return self.tcex.key_value_store.create(self._context, key.strip(), value)
except RuntimeError as e:
self.log.error(e)
return None
def _create_array(self, key, value, validate=True):
"""Create the value in Redis if applicable."""
if key is None or value is None:
self.log.warning('The key or value field is None.')
return None
# get variable type from variable value
variable_type = self.variable_type(key)
# Enhanced entity array is the wild-wild west, don't validate it
if variable_type != 'TCEnhancedEntityArray':
if validate and (not isinstance(value, Iterable) or isinstance(value, (str, dict))):
raise RuntimeError(f'Invalid data provided for {variable_type}.')
value = [
*value
] # spread the value so that we know it's a list (as opposed to an iterable)
if variable_type == 'BinaryArray':
value_encoded = []
for v in value:
if v is not None:
if validate and not isinstance(v, bytes):
raise RuntimeError('Invalid data provided for Binary.')
# if not isinstance(v, bytes):
# v = v.encode('utf-8')
v = base64.b64encode(v).decode('utf-8')
value_encoded.append(v)
value = value_encoded
elif variable_type == 'KeyValueArray':
if validate and not self._is_key_value_array(value):
raise RuntimeError('Invalid data provided for KeyValueArray.')
elif variable_type == 'StringArray':
value_coerced = []
for v in value:
# coerce string values
v = self._coerce_string_value(v)
if validate and not isinstance(v, (type(None), str)):
raise RuntimeError('Invalid data provided for StringArray.')
value_coerced.append(v)
value = value_coerced
elif variable_type == 'TCEntityArray':
if validate and not self._is_tc_entity_array(value):
raise RuntimeError('Invalid data provided for TcEntityArray.')
# self.log.trace(f'pb create - context: {self._context}, key: {key}, value: {value}')
try:
value = json.dumps(value)
except ValueError as e: # pragma: no cover
raise RuntimeError(f'Failed to serialize value ({e}).')
try:
return self.tcex.key_value_store.create(self._context, key.strip(), value)
except RuntimeError as e:
self.log.error(e)
return None
@staticmethod
def _decode_binary(data):
"""Return decoded bytes data handling data written by java apps."""
try:
data = data.decode('utf-8')
except UnicodeDecodeError: # pragma: no cover
# for data written an upstream java App
data = data.decode('latin-1')
return data
@staticmethod
def _is_key_value(data):
"""Return True if provided data has proper structure for Key Value."""
if data is None:
return False
return all(x in data for x in ['key', 'value'])
def _is_key_value_array(self, data):
"""Return True if provided data has proper structure for Key Value Array."""
for d in data:
if not self._is_key_value(d):
return False
return True
@staticmethod
def _is_tc_entity(data):
"""Return True if provided data has proper structure for TC Entity."""
if data is None:
return False
return all(x in data for x in ['id', 'value', 'type'])
def _is_tc_entity_array(self, data):
"""Return True if provided data has proper structure for TC Entity Array."""
for d in data:
if not self._is_tc_entity(d):
return False
return True
@staticmethod
def _load_value(value):
"""Return the loaded JSON value or raise an error.
Args:
value (str): The data from key/value store.
Raises:
RuntimeError: Raise error when data can't be loaded as JSON data.
Returns:
any: The de-serialized value from the key/value store.
"""
try:
return json.loads(value, object_pairs_hook=OrderedDict)
except ValueError as e: # pragma: no cover
raise RuntimeError(f'Failed to JSON load data "{value}" ({e}).')
def _parse_output_variables(self):
"""Parse the output variables provided to Playbook Class.
**Example Variable Format**::
['#App:1234:status!String', '#App:1234:status_code!String']
"""
self._output_variables_by_name = {}
self._output_variables_by_type = {}
for ov in self._output_variables:
# parse the variable to get individual parts
parsed_variable = self.parse_variable(ov)
variable_name = parsed_variable.get('name')
variable_type = parsed_variable.get('type')
# store the variables in dict by name (e.g. "status_code")
self._output_variables_by_name[variable_name] = {'variable': ov}
# store the variables in dict by name-type (e.g. "status_code-String")
self._output_variables_by_type[f'{variable_name}-{variable_type}'] = {'variable': ov}
def _read(self, key, embedded=True, b64decode=True, decode=False):
"""Create the value in Redis if applicable."""
if key is None:
self.log.warning('The key is None.')
return None
# get variable type from variable value
variable_type = self.variable_type(key)
try:
value = self.tcex.key_value_store.read(self._context, key.strip())
except RuntimeError as e:
self.log.error(e)
return None
if value is None:
return value
if variable_type == 'Binary':
value = self._load_value(value)
if b64decode:
value = base64.b64decode(value)
if decode:
value = self._decode_binary(value)
elif variable_type == 'KeyValue':
# embedded variable can be unquoted, which breaks JSON.
value = self._wrap_embedded_keyvalue(value)
if embedded:
value = self._read_embedded(value)
value = self._load_value(value)
elif variable_type == 'String':
if embedded:
value = self._read_embedded(value)
# coerce string values
value = self._coerce_string_value(self._load_value(value))
elif variable_type == 'TCEntity':
value = self._load_value(value)
return value
def _read_array(self, key, embedded=True, b64decode=True, decode=False):
"""Create the value in Redis if applicable."""
if key is None: # pragma: no cover
self.log.warning('The null value for key was provided.')
return None
# get variable type from variable value
variable_type = self.variable_type(key)
try:
value = self.tcex.key_value_store.read(self._context, key.strip())
except RuntimeError as e:
self.log.error(e)
return None
if value is None:
return value
if variable_type == 'BinaryArray':
value = json.loads(value, object_pairs_hook=OrderedDict)
values = []
for v in value:
if v is not None and b64decode:
v = base64.b64decode(v)
if decode:
v = self._decode_binary(v)
values.append(v)
value = values
elif variable_type == 'KeyValueArray':
# embedded variable can be unquoted, which breaks JSON.
value = self._wrap_embedded_keyvalue(value)
if embedded:
value = self._read_embedded(value)
try:
value = json.loads(value, object_pairs_hook=OrderedDict)
except ValueError as e: # pragma: no cover
raise RuntimeError(f'Failed loading JSON data ({value}). Error: ({e})')
elif variable_type == 'StringArray':
if embedded:
value = self._read_embedded(value)
# convert int to str
value_coerced = []
for v in self._load_value(value):
# coerce string values
value_coerced.append(self._coerce_string_value(v))
value = value_coerced
elif variable_type in ['TCEntityArray', 'TCEnhancedEntity', 'TCEnhancedEntityArray']:
value = self._load_value(value)
# self.log.trace(f'pb create - context: {self._context}, key: {key}, value: {value}')
return value
def _read_embedded(self, value):
"""Read method for "embedded" variables.
.. Note:: The ``read()`` method will automatically determine if the input is a variable or
needs to be searched for embedded variables.
Embedded variable rules:
* Only user input can have embedded variables.
* Only String and KeyValueArray variables can have embedded variables.
* Variables can only be embedded one level deep.
This method will automatically covert variables embedded in a string with value retrieved
from DB. If there are no keys/variables the raw string will be returned.
Examples::
DB Values
#App:7979:variable_name!String:
"embedded \\"variable\\""
#App:7979:two!String:
"two"
#App:7979:variable_name!StringArray:
["one", "two", "three"]
Examples 1:
Input: "This input has a embedded #App:7979:variable_name!String"
Examples 2:
Input: ["one", #App:7979:two!String, "three"]
Examples 3:
Input: [{
"key": "embedded string",
"value": "This input has a embedded #App:7979:variable_name!String"
}, {
"key": "string array",
"value": #App:7979:variable_name!StringArray
}, {
"key": "string",
"value": #App:7979:variable_name!String
}]
Args:
value (str): The value to parsed and updated from the DB.
Returns:
(str): Results retrieved from DB
"""
if value is None: # pragma: no cover
return value
for variable in (v.group(0) for v in re.finditer(self._variable_parse, str(value))):
v = self.read(variable)
self.log.trace(f'embedded variable: {variable}, value: {v}')
if isinstance(v, (dict, list)):
v = json.dumps(v)
# for KeyValueArray with nested dict/list type replace the
# quoted value to ensure the resulting data is loadable JSON
value = re.sub(f'"{variable}"', v, value)
if v is not None:
# only replace variable if a non-null value is returned from kv store
# APP-1030 need to revisit this to handle variable references in kv/kvarrays that
# are None. Would like to be able to say if value is just the variable reference,
# sub None value, else insert '' in string. That would require a kv-specific
# version of this method that gets the entire list/dict instead of just the string.
value = re.sub(variable, v, value)
return value
@property
def _variable_pattern(self):
"""Regex pattern to match and parse a playbook variable."""
variable_pattern = r'#([A-Za-z]+)' # match literal (#App,#Trigger) at beginning of String
variable_pattern += r':([\d]+)' # app id (:7979)
variable_pattern += r':([A-Za-z0-9_\.\-\[\]]+)' # variable name (:variable_name)
variable_pattern += r'!(StringArray|BinaryArray|KeyValueArray' # variable type (array)
variable_pattern += r'|TCEntityArray|TCEnhancedEntityArray' # variable type (array)
variable_pattern += r'|String|Binary|KeyValue|TCEntity|TCEnhancedEntity' # variable type
variable_pattern += r'|(?:(?!String)(?!Binary)(?!KeyValue)' # non matching for custom
variable_pattern += r'(?!TCEntity)(?!TCEnhancedEntity)' # non matching for custom
variable_pattern += r'[A-Za-z0-9_-]+))' # variable type (custom)
return variable_pattern
@property
def _variable_array_types(self):
"""Return list of standard playbook array variable types."""
return [
'BinaryArray',
'KeyValueArray',
'StringArray',
'TCEntityArray',
'TCEnhancedEntityArray',
]
@property
def _variable_single_types(self):
"""Return list of standard playbook single variable types."""
return [
'Binary',
'KeyValue',
'String',
'TCEntity',
'TCEnhancedEntity',
]
@property
def _variable_types(self):
"""Return list of standard playbook variable typesd."""
return self._variable_single_types + self._variable_array_types
def _wrap_embedded_keyvalue(self, data):
"""Wrap keyvalue embedded variable in double quotes.
Args:
data (str): The data with embedded variables.
Returns:
(str): Results retrieved from DB
"""
# TODO: need to verify if core still sends improper JSON for KeyValueArrays
if data is not None: # pragma: no cover
variables = []
for v in re.finditer(self._vars_keyvalue_embedded, data):
variables.append(v.group(0))
for var in set(variables): # recursion over set to handle duplicates
# pull (#App:1441:embedded_string!String) from (": #App:1441:embedded_string!String)
variable_string = re.search(self._variable_parse, var).group(0)
# reformat to replace the correct instance only, handling the case where a variable
# is embedded multiple times in the same key value array.
data = data.replace(var, f'": "{variable_string}"')
return data
def create_raw(self, key, value):
"""Create method of CRUD operation for raw data.
..important:: Raw data can only be a byte, str or int. Other data structures
(dict, list, etc) must be serialized.
Args:
key (str): The variable to write to the DB.
value (bytes|int|string): The data to write to the DB.
Returns:
(str): Result of DB write.
"""
data = None
if key is not None and value is not None:
try:
data = self.tcex.key_value_store.create(self._context, key.strip(), value)
except RuntimeError as e:
self.log.error(e)
else:
self.log.warning('The key or value field was None.')
return data
def read_raw(self, key):
"""Read method of CRUD operation for raw data.
..important:: Bytes input will be returned a as string as there is
no way to determine data from redis originated as bytes or string.
Args:
key (str): The variable to read from the DB.
Returns:
(str): Results retrieved from DB.
"""
value = None
if key is not None:
value = self.tcex.key_value_store.read(self._context, key.strip())
else:
self.log.warning('The key field was None.')
return value
def parse_variable(self, variable): # pragma: no cover
"""Set placeholder for child method."""
raise NotImplementedError('Implemented in child class')
def read(self, key, array=False, embedded=True): # pragma: no cover
"""Set placeholder for child method."""
raise NotImplementedError('Implemented in child class')
def variable_type(self, variable): # pragma: no cover
"""Set placeholder for child method."""
raise NotImplementedError('Implemented in child class')
| 1.804688 | 2 |
StaticProcess/apriori.py | NIL-zhuang/NJU-Data-Integration | 0 | 215 | import pandas as pd
import os
from tqdm import tqdm
from collections import defaultdict
from mlxtend.preprocessing import TransactionEncoder
from mlxtend.frequent_patterns import apriori
dataPath = "data/static"
itemSetList = []
def loadDataSet():
with open(os.path.join(dataPath, "aprioriData.csv"), 'r') as f:
for line in f.readlines():
line = line.replace('\n', '')
cates = line.split(' ')
itemSetList.append(list(map(int, cates)))
def myApriori():
te = TransactionEncoder()
te_ary = te.fit(itemSetList).transform(itemSetList)
df = pd.DataFrame(te_ary, columns=te.columns_)
return df
def dataInit():
if os.path.exists(os.path.join(dataPath, "aprioriData.csv")):
return
df = pd.read_csv("data/static/static.csv")
user_category = defaultdict(set)
for idx, row in tqdm(df.iterrows(), total=df.shape[0], desc="category data generate"):
user_category[row['USER_ID']].add(row['CATEGORY_ID'])
with open(os.path.join(dataPath, "aprioriData.csv"), 'w+') as f:
for k, v in tqdm(user_category.items()):
f.write(' '.join(sorted(list(map(str, v))))+'\n')
if __name__ == '__main__':
dataInit()
loadDataSet()
df = myApriori()
frequent_itemsets = apriori(df, min_support=0.0035, use_colnames=True)
frequent_itemsets['length'] = frequent_itemsets['itemsets'].apply(lambda x: len(x))
print(frequent_itemsets[(frequent_itemsets['length'] >= 2)])
| 1.984375 | 2 |
Deep_Q_Network/DQN_for_FrozenLake_Discrete_Domain.py | quangnguyendang/Reinforcement_Learning | 0 | 223 | # Credit to https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0
import gym
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
env = gym.make('FrozenLake-v0')
# NEURAL NETWORK IMPLEMENTATION
tf.reset_default_graph()
# Feature vector for current state representation
input1 = tf.placeholder(shape=[1, env.observation_space.n], dtype=tf.float32)
# tf.Variable(<initial-value>, name=<optional-name>)
# tf.random_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None, name=None)
# Weighting W vector in range 0 - 0.01 (like the way Andrew Ng did with *0.01
W = tf.Variable(tf.random_uniform([env.observation_space.n, env.action_space.n], 0, 0.01))
# Qout with shape [1, env.action_space.n] - Action state value for Q[s, a] with every a available at a state
Qout = tf.matmul(input1, W)
# Greedy action at a state
predict = tf.argmax(Qout, axis=1)
# Feature vector for next state representation
nextQ = tf.placeholder(shape=[1, env.action_space.n], dtype=tf.float32)
# Entropy loss
loss = tf.reduce_sum(tf.square(Qout - nextQ))
trainer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
updateModel = trainer.minimize(loss)
# TRAIN THE NETWORK
init = tf.global_variables_initializer()
# Set learning parameters
y = 0.99
e = 0.1
number_episodes = 2000
# List to store total rewards and steps per episode
jList = []
rList = []
with tf.Session() as sess:
sess.run(init)
for i in range(number_episodes):
print("Episode #{} is running!".format(i))
# First state
s = env.reset()
rAll = 0
d = False
j = 0
# Q network
while j < 200: # or While not d:
j += 1
# Choose action by epsilon (e) greedy
# print("s = ", s," --> Identity s:s+1: ", np.identity(env.observation_space.n)[s:s+1])
# s = 0 --> Identity s: s + 1: [[1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
# s = 1 --> Identity s: s + 1: [[0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]]
# Identity [s:s+1] is a one-hot vector
# Therefore W is the actual Q value
a, allQ = sess.run([predict, Qout], feed_dict={input1: np.identity(env.observation_space.n)[s:s+1]})
if np.random.rand(1) < e:
a[0] = env.action_space.sample()
s1, r, d, _ = env.step(a[0])
# Obtain next state Q value by feeding the new state throughout the network
Q1 = sess.run(Qout, feed_dict={input1: np.identity(env.observation_space.n)[s1:s1+1]})
maxQ1 = np.max(Q1)
targetQ = allQ
targetQ[0, a[0]] = r + y * maxQ1
# Train our network using target and predicted Q values
_, W1 = sess.run([updateModel, W], feed_dict={input1: np.identity(env.observation_space.n)[s:s+1], nextQ: targetQ})
rAll += r
s = s1
if d:
e = 1./((i/50) + 10)
break
jList.append(j)
rList.append(rAll)
env.close()
plt.figure()
plt.plot(rList, label="Return - Q Learning")
plt.show()
plt.figure()
plt.plot(jList, label="Steps - Q Learning")
plt.show()
# -------------------------------------------------------------------------
# TABULAR IMPLEMENTATION
#
# # Set learning parameters
# lr = 0.8
# y = 0.95
# number_episodes = 20000
#
# # Initial table with all zeros
# Q = np.zeros([env.observation_space.n, env.action_space.n])
#
# # List of reward and steps per episode
# rList = []
# for i in range (number_episodes):
# print("Episode #{} is running!".format(i))
# s = env.reset()
# rAll = 0
# d = False
# j = 0
# while j < 99:
# j += 1
# # Choose an action by greedily (with noise) picking from Q table
# # Because of the noise, it is epsilon-greedy with epsilon decreasing over time
# a = np.argmax(Q[s, :] + np.random.rand(1, env.action_space.n)*(1./(i + 1)))
# s1, r, d, _ = env.step(a)
# # env.render()
#
# # Update Q table with new knowledge
# Q[s, a] = Q[s, a] + lr * (r + y * np.max(Q[s1, :]) - Q[s, a])
# rAll += r
# s = s1
# if d:
# break
# rList.append(rAll)
| 3.046875 | 3 |
palm_tree/coconut_1/models.py | m-hintz-42/a-palm-tree | 0 | 231 | from palm_tree import db
class Data(db.Model):
id = db.Column(db.Integer, primary_key=True)
uuid = db.Column(db.Integer)
response = db.Column(db.Text)
datetime = db.Column(db.DateTime)
def __init__(self, uuid, response, datetime):
self.uuid = uuid
self.response = response
self.datetime = datetime
def __repr__(self):
return '<Data %r>' % self.response
#
# class Logs(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# uuid = db.Column(db.Integer)
# payload = db.Column(db.Text)
# datetime = db.Column(db.DateTime)
#
# def __init__(self, uuid, payload, datetime):
# self.uuid = uuid
# self.payload = payload
# self.datetime = datetime
#
# def __repr__(self):
# return '<Data %r>' % self.payload
| 1.40625 | 1 |
tables/migrations/0004_auto_20200901_2004.py | jarnoln/exposures | 0 | 239 | # Generated by Django 3.1.1 on 2020-09-01 17:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tables', '0003_exposure_category'),
]
operations = [
migrations.AlterField(
model_name='exposure',
name='location',
field=models.CharField(blank=True, default='', max_length=200),
),
]
| 0.742188 | 1 |
footmark/ram/regioninfo.py | rockzhu/footmark | 0 | 247 | from footmark.regioninfo import RegionInfo
class RAMRegionInfo(RegionInfo):
"""
Represents an ram Region
"""
def __init__(self, connection=None, name=None, id=None,
connection_cls=None):
from footmark.ram.connection import RAMConnection
super(RAMRegionInfo, self).__init__(connection, name, id,
RAMConnection)
| 0.867188 | 1 |
logger/__init__.py | remmyzen/nqs-tensorflow2 | 4 | 255 | from .logger import Logger
from .logger_supervised import LoggerSupervised
| 0.157227 | 0 |
apps/core/migrations/0001_initial.py | Visualway/Vitary | 4 | 263 | <gh_stars>1-10
# Generated by Django 4.0.2 on 2022-03-02 03:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('vit', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Badge',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('color', models.CharField(choices=[('success', 'Green'), ('info', 'Blue'), ('link', 'Purple'), ('primary', 'Turquoise'), ('warning', 'Yellow'), ('danger', 'Red'), ('dark', 'Black'), ('white', 'White')], max_length=50)),
('special', models.BooleanField(default=False)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Requirments',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('description', models.TextField()),
('badge', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.badge')),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Abuse',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('abuse_type', models.CharField(choices=[('ABUSE', 'Abuse'), ('INAPPROPRIATE', 'Inappropriate'), ('SPAM', 'Spam'), ('BULLYING', 'Bullying'), ('SEXUAL_CONTENT', 'Sexual Content'), ('OTHER', 'Other')], max_length=50)),
('description', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
('to_vit', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='vit.vit')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'Abuses',
'ordering': ['-date'],
},
),
]
| 0.992188 | 1 |
dddppp/settings.py | tysonclugg/dddppp | 0 | 271 | """
Django settings for dddppp project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import pkg_resources
import pwd
PROJECT_NAME = 'dddppp'
# Enforce a valid POSIX environment
# Get missing environment variables via call to pwd.getpwuid(...)
_PW_CACHE = None
_PW_MAP = {
'LOGNAME': 'pw_name',
'USER': 'pw_name',
'USERNAME': 'pw_name',
'UID': 'pw_uid',
'GID': 'pw_gid',
'HOME': 'pw_dir',
'SHELL': 'pw_shell',
}
for _missing_env in set(_PW_MAP).difference(os.environ):
if _PW_CACHE is None:
_PW_CACHE = pwd.getpwuid(os.getuid())
os.environ[_missing_env] = str(getattr(_PW_CACHE, _PW_MAP[_missing_env]))
del _PW_CACHE, _PW_MAP, pwd
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'localhost',
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'dddp',
'dddp.server',
'dddp.accounts',
'dddppp.slides',
]
for (requirement, pth) in [
('django-extensions', 'django_extensions'),
]:
try:
pkg_resources.get_distribution(requirement)
except (
pkg_resources.DistributionNotFound,
pkg_resources.VersionConflict,
):
continue
INSTALLED_APPS.append(pth)
MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#'django.middleware.security.SecurityMiddleware',
]
ROOT_URLCONF = 'dddppp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'dddppp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ.get('PGDATABASE', PROJECT_NAME),
'USER': os.environ.get('PGUSER', os.environ['LOGNAME']),
'PASSWORD': os.environ.get('DJANGO_DATABASE_PASSWORD', ''),
'HOST': os.environ.get('PGHOST', ''),
'PORT': os.environ.get('PGPORT', ''),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-au'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# django-secure
# see: https://github.com/carljm/django-secure/ for more options
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
#SECURE_SSL_REDIRECT = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_FRAME_DENY = True
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
DDDPPP_CONTENT_TYPES = []
PROJ_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
| 1.164063 | 1 |
app/main/config.py | nhattvm11/flask-restful-boilerplate | 0 | 279 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.getenv('SECRET_KEY', '')
DEBUG = False
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'flask_main.db')
SQLALCHEMY_TRACK_MODIFICATIONS = False
class TestingConfig(Config):
DEBUG = True
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'flask_main.db')
PRESERVE_CONTEXT_ON_EXCEPTION = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProductionConfig(Config):
DEBUG = False
config_by_name = dict(
dev=DevelopmentConfig,
test=TestingConfig,
prod=ProductionConfig
)
key = Config.SECRET_KEY
| 1.375 | 1 |
tests/unittests/test_zoo.py | SaizhuoWang/carefree-learn | 0 | 287 | import os
import cflearn
import platform
import unittest
from cfdata.tabular import TabularDataset
num_jobs = 0 if platform.system() == "Linux" else 2
logging_folder = "__test_zoo__"
class TestZoo(unittest.TestCase):
@staticmethod
def _test_zoo_core(model: str) -> None:
x, y = TabularDataset.iris().xy
zoo_folder = os.path.join(logging_folder, f"__{model}__")
zoo = cflearn.Zoo(model)
for key, config in zoo.benchmarks.items():
local_logging_folder = os.path.join(zoo_folder, key)
config["logging_folder"] = local_logging_folder
m = cflearn.make(model, **config).fit(x, y)
cflearn.evaluate(x, y, pipelines=m)
cflearn._rmtree(logging_folder)
def test_fcnn_zoo(self) -> None:
self._test_zoo_core("fcnn")
def test_tree_dnn_zoo(self) -> None:
self._test_zoo_core("tree_dnn")
if __name__ == "__main__":
unittest.main()
| 1.210938 | 1 |
rpython/annotator/annrpython.py | microvm/pypy-mu | 0 | 295 | <filename>rpython/annotator/annrpython.py
from __future__ import absolute_import
import types
from collections import defaultdict
from rpython.tool.ansi_print import AnsiLogger
from rpython.tool.pairtype import pair
from rpython.tool.error import (format_blocked_annotation_error,
gather_error, source_lines)
from rpython.flowspace.model import Variable, Constant, checkgraph
from rpython.translator import simplify, transform
from rpython.annotator import model as annmodel, signature
from rpython.annotator.model import (
typeof, s_ImpossibleValue, SomeInstance, intersection, difference)
from rpython.annotator.bookkeeper import Bookkeeper
from rpython.rtyper.normalizecalls import perform_normalizations
log = AnsiLogger("annrpython")
class RPythonAnnotator(object):
"""Block annotator for RPython.
See description in doc/translation.txt."""
def __init__(self, translator=None, policy=None, bookkeeper=None):
import rpython.rtyper.extfuncregistry # has side effects
if translator is None:
# interface for tests
from rpython.translator.translator import TranslationContext
translator = TranslationContext()
translator.annotator = self
self.translator = translator
self.pendingblocks = {} # map {block: graph-containing-it}
self.annotated = {} # set of blocks already seen
self.added_blocks = None # see processblock() below
self.links_followed = {} # set of links that have ever been followed
self.notify = {} # {block: {positions-to-reflow-from-when-done}}
self.fixed_graphs = {} # set of graphs not to annotate again
self.blocked_blocks = {} # set of {blocked_block: (graph, index)}
# --- the following information is recorded for debugging ---
self.blocked_graphs = {} # set of graphs that have blocked blocks
# --- end of debugging information ---
self.frozen = False
if policy is None:
from rpython.annotator.policy import AnnotatorPolicy
self.policy = AnnotatorPolicy()
else:
self.policy = policy
if bookkeeper is None:
bookkeeper = Bookkeeper(self)
self.bookkeeper = bookkeeper
def __getstate__(self):
attrs = """translator pendingblocks annotated links_followed
notify bookkeeper frozen policy added_blocks""".split()
ret = self.__dict__.copy()
for key, value in ret.items():
if key not in attrs:
assert type(value) is dict, (
"%r is not dict. please update %s.__getstate__" %
(key, self.__class__.__name__))
ret[key] = {}
return ret
#___ convenience high-level interface __________________
def build_types(self, function, input_arg_types, complete_now=True,
main_entry_point=False):
"""Recursively build annotations about the specific entry point."""
assert isinstance(function, types.FunctionType), "fix that!"
from rpython.annotator.policy import AnnotatorPolicy
policy = AnnotatorPolicy()
# make input arguments and set their type
args_s = [self.typeannotation(t) for t in input_arg_types]
# XXX hack
annmodel.TLS.check_str_without_nul = (
self.translator.config.translation.check_str_without_nul)
flowgraph, inputs_s = self.get_call_parameters(function, args_s, policy)
if main_entry_point:
self.translator.entry_point_graph = flowgraph
return self.build_graph_types(flowgraph, inputs_s, complete_now=complete_now)
def get_call_parameters(self, function, args_s, policy):
desc = self.bookkeeper.getdesc(function)
prevpolicy = self.policy
self.policy = policy
self.bookkeeper.enter(None)
try:
return desc.get_call_parameters(args_s)
finally:
self.bookkeeper.leave()
self.policy = prevpolicy
def annotate_helper(self, function, args_s, policy=None):
if policy is None:
from rpython.annotator.policy import AnnotatorPolicy
policy = AnnotatorPolicy()
# XXX hack
annmodel.TLS.check_str_without_nul = (
self.translator.config.translation.check_str_without_nul)
graph, inputcells = self.get_call_parameters(function, args_s, policy)
self.build_graph_types(graph, inputcells, complete_now=False)
self.complete_helpers(policy)
return graph
def complete_helpers(self, policy):
saved = self.policy, self.added_blocks
self.policy = policy
try:
self.added_blocks = {}
self.complete()
# invoke annotation simplifications for the new blocks
self.simplify(block_subset=self.added_blocks)
finally:
self.policy, self.added_blocks = saved
def build_graph_types(self, flowgraph, inputcells, complete_now=True):
checkgraph(flowgraph)
nbarg = len(flowgraph.getargs())
assert len(inputcells) == nbarg # wrong number of args
# register the entry point
self.addpendinggraph(flowgraph, inputcells)
# recursively proceed until no more pending block is left
if complete_now:
self.complete()
return self.annotation(flowgraph.getreturnvar())
def gettype(self, variable):
"""Return the known type of a control flow graph variable,
defaulting to 'object'."""
if isinstance(variable, Constant):
return type(variable.value)
elif isinstance(variable, Variable):
s_variable = variable.annotation
if s_variable:
return s_variable.knowntype
else:
return object
else:
raise TypeError("Variable or Constant instance expected, "
"got %r" % (variable,))
def getuserclassdefinitions(self):
"""Return a list of ClassDefs."""
return self.bookkeeper.classdefs
#___ medium-level interface ____________________________
def addpendinggraph(self, flowgraph, inputcells):
self.addpendingblock(flowgraph, flowgraph.startblock, inputcells)
def addpendingblock(self, graph, block, cells):
"""Register an entry point into block with the given input cells."""
if graph in self.fixed_graphs:
# special case for annotating/rtyping in several phases: calling
# a graph that has already been rtyped. Safety-check the new
# annotations that are passed in, and don't annotate the old
# graph -- it's already low-level operations!
for a, s_newarg in zip(block.inputargs, cells):
s_oldarg = self.binding(a)
assert annmodel.unionof(s_oldarg, s_newarg) == s_oldarg
else:
assert not self.frozen
if block not in self.annotated:
self.bindinputargs(graph, block, cells)
else:
self.mergeinputargs(graph, block, cells)
if not self.annotated[block]:
self.pendingblocks[block] = graph
def complete_pending_blocks(self):
while self.pendingblocks:
block, graph = self.pendingblocks.popitem()
self.processblock(graph, block)
def complete(self):
"""Process pending blocks until none is left."""
while True:
self.complete_pending_blocks()
self.policy.no_more_blocks_to_annotate(self)
if not self.pendingblocks:
break # finished
# make sure that the return variables of all graphs is annotated
if self.added_blocks is not None:
newgraphs = [self.annotated[block] for block in self.added_blocks]
newgraphs = dict.fromkeys(newgraphs)
got_blocked_blocks = False in newgraphs
else:
newgraphs = self.translator.graphs #all of them
got_blocked_blocks = False in self.annotated.values()
if got_blocked_blocks:
for graph in self.blocked_graphs.values():
self.blocked_graphs[graph] = True
blocked_blocks = [block for block, done in self.annotated.items()
if done is False]
assert len(blocked_blocks) == len(self.blocked_blocks)
text = format_blocked_annotation_error(self, self.blocked_blocks)
#raise SystemExit()
raise annmodel.AnnotatorError(text)
for graph in newgraphs:
v = graph.getreturnvar()
if v.annotation is None:
self.setbinding(v, s_ImpossibleValue)
def validate(self):
"""Check that the annotation results are valid"""
self.bookkeeper.check_no_flags_on_instances()
def annotation(self, arg):
"Gives the SomeValue corresponding to the given Variable or Constant."
if isinstance(arg, Variable):
return arg.annotation
elif isinstance(arg, Constant):
return self.bookkeeper.immutablevalue(arg.value)
else:
raise TypeError('Variable or Constant expected, got %r' % (arg,))
def binding(self, arg):
"Gives the SomeValue corresponding to the given Variable or Constant."
s_arg = self.annotation(arg)
if s_arg is None:
raise KeyError
return s_arg
def typeannotation(self, t):
return signature.annotation(t, self.bookkeeper)
def setbinding(self, arg, s_value):
s_old = arg.annotation
if s_old is not None:
if not s_value.contains(s_old):
log.WARNING("%s does not contain %s" % (s_value, s_old))
log.WARNING("%s" % annmodel.unionof(s_value, s_old))
assert False
arg.annotation = s_value
def warning(self, msg, pos=None):
if pos is None:
try:
pos = self.bookkeeper.position_key
except AttributeError:
pos = '?'
if pos != '?':
pos = self.whereami(pos)
log.WARNING("%s/ %s" % (pos, msg))
#___ interface for annotator.bookkeeper _______
def recursivecall(self, graph, whence, inputcells):
if isinstance(whence, tuple):
parent_graph, parent_block, parent_index = whence
tag = parent_block, parent_index
self.translator.update_call_graph(parent_graph, graph, tag)
# self.notify[graph.returnblock] is a dictionary of call
# points to this func which triggers a reflow whenever the
# return block of this graph has been analysed.
callpositions = self.notify.setdefault(graph.returnblock, {})
if whence is not None:
if callable(whence):
def callback():
whence(self, graph)
else:
callback = whence
callpositions[callback] = True
# generalize the function's input arguments
self.addpendingblock(graph, graph.startblock, inputcells)
# get the (current) return value
v = graph.getreturnvar()
try:
return self.binding(v)
except KeyError:
# the function didn't reach any return statement so far.
# (some functions actually never do, they always raise exceptions)
return s_ImpossibleValue
def reflowfromposition(self, position_key):
graph, block, index = position_key
self.reflowpendingblock(graph, block)
def call_sites(self):
newblocks = self.added_blocks
if newblocks is None:
newblocks = self.annotated # all of them
for block in newblocks:
for op in block.operations:
if op.opname in ('simple_call', 'call_args'):
yield op
# some blocks are partially annotated
if op.result.annotation is None:
break # ignore the unannotated part
#___ simplification (should be moved elsewhere?) _______
def simplify(self, block_subset=None, extra_passes=None):
# Generic simplifications
transform.transform_graph(self, block_subset=block_subset,
extra_passes=extra_passes)
if block_subset is None:
graphs = self.translator.graphs
else:
graphs = {}
for block in block_subset:
graph = self.annotated.get(block)
if graph:
graphs[graph] = True
for graph in graphs:
simplify.eliminate_empty_blocks(graph)
self.bookkeeper.compute_at_fixpoint()
if block_subset is None:
perform_normalizations(self)
#___ flowing annotations in blocks _____________________
def processblock(self, graph, block):
# Important: this is not called recursively.
# self.flowin() can only issue calls to self.addpendingblock().
# The analysis of a block can be in three states:
# * block not in self.annotated:
# never seen the block.
# * self.annotated[block] == False:
# the input variables of the block have bindings but we
# still have to consider all the operations in the block.
# * self.annotated[block] == graph-containing-block:
# analysis done (at least until we find we must generalize the
# input variables).
#print '* processblock', block, cells
self.annotated[block] = graph
if block in self.blocked_blocks:
del self.blocked_blocks[block]
try:
self.flowin(graph, block)
except BlockedInference as e:
self.annotated[block] = False # failed, hopefully temporarily
self.blocked_blocks[block] = (graph, e.opindex)
except Exception as e:
# hack for debug tools only
if not hasattr(e, '__annotator_block'):
setattr(e, '__annotator_block', block)
raise
# The dict 'added_blocks' is used by rpython.annlowlevel to
# detect which are the new blocks that annotating an additional
# small helper creates.
if self.added_blocks is not None:
self.added_blocks[block] = True
def reflowpendingblock(self, graph, block):
assert not self.frozen
assert graph not in self.fixed_graphs
self.pendingblocks[block] = graph
assert block in self.annotated
self.annotated[block] = False # must re-flow
self.blocked_blocks[block] = (graph, None)
def bindinputargs(self, graph, block, inputcells):
# Create the initial bindings for the input args of a block.
assert len(block.inputargs) == len(inputcells)
for a, cell in zip(block.inputargs, inputcells):
self.setbinding(a, cell)
self.annotated[block] = False # must flowin.
self.blocked_blocks[block] = (graph, None)
def mergeinputargs(self, graph, block, inputcells):
# Merge the new 'cells' with each of the block's existing input
# variables.
oldcells = [self.binding(a) for a in block.inputargs]
try:
unions = [annmodel.unionof(c1,c2) for c1, c2 in zip(oldcells,inputcells)]
except annmodel.UnionError as e:
# Add source code to the UnionError
e.source = '\n'.join(source_lines(graph, block, None, long=True))
raise
# if the merged cells changed, we must redo the analysis
if unions != oldcells:
self.bindinputargs(graph, block, unions)
def apply_renaming(self, s_out, renaming):
if hasattr(s_out, 'is_type_of'):
renamed_is_type_of = []
for v in s_out.is_type_of:
renamed_is_type_of += renaming[v]
assert s_out.knowntype is type
newcell = typeof(renamed_is_type_of)
if s_out.is_constant():
newcell.const = s_out.const
s_out = newcell
if hasattr(s_out, 'knowntypedata'):
renamed_knowntypedata = {}
for value, constraints in s_out.knowntypedata.items():
renamed_knowntypedata[value] = {}
for v, s in constraints.items():
new_vs = renaming.get(v, [])
for new_v in new_vs:
renamed_knowntypedata[value][new_v] = s
assert isinstance(s_out, annmodel.SomeBool)
newcell = annmodel.SomeBool()
if s_out.is_constant():
newcell.const = s_out.const
s_out = newcell
s_out.set_knowntypedata(renamed_knowntypedata)
return s_out
def whereami(self, position_key):
graph, block, i = position_key
blk = ""
if block:
at = block.at()
if at:
blk = " block"+at
opid=""
if i is not None:
opid = " op=%d" % i
return repr(graph) + blk + opid
def flowin(self, graph, block):
try:
i = 0
while i < len(block.operations):
op = block.operations[i]
with self.bookkeeper.at_position((graph, block, i)):
new_ops = op.transform(self)
if new_ops is not None:
block.operations[i:i+1] = new_ops
if not new_ops:
continue
new_ops[-1].result = op.result
op = new_ops[0]
self.consider_op(op)
i += 1
except BlockedInference as e:
if e.op is block.raising_op:
# this is the case where the last operation of the block will
# always raise an exception which is immediately caught by
# an exception handler. We then only follow the exceptional
# branches.
exits = [link for link in block.exits
if link.exitcase is not None]
elif e.op.opname in ('simple_call', 'call_args', 'next'):
# XXX warning, keep the name of the call operations in sync
# with the flow object space. These are the operations for
# which it is fine to always raise an exception. We then
# swallow the BlockedInference and that's it.
# About 'next': see test_annotate_iter_empty_container().
return
else:
# other cases are problematic (but will hopefully be solved
# later by reflowing). Throw the BlockedInference up to
# processblock().
e.opindex = i
raise
except annmodel.HarmlesslyBlocked:
return
except annmodel.AnnotatorError as e: # note that UnionError is a subclass
e.source = gather_error(self, graph, block, i)
raise
else:
# dead code removal: don't follow all exits if the exitswitch
# is known
exits = block.exits
if isinstance(block.exitswitch, Variable):
s_exitswitch = self.binding(block.exitswitch)
if s_exitswitch.is_constant():
exits = [link for link in exits
if link.exitcase == s_exitswitch.const]
if block.canraise:
op = block.raising_op
s_exception = self.get_exception(op)
for link in exits:
case = link.exitcase
if case is None:
self.follow_link(graph, link, {})
continue
if s_exception == s_ImpossibleValue:
break
s_case = SomeInstance(self.bookkeeper.getuniqueclassdef(case))
s_matching_exc = intersection(s_exception, s_case)
if s_matching_exc != s_ImpossibleValue:
self.follow_raise_link(graph, link, s_matching_exc)
s_exception = difference(s_exception, s_case)
else:
if isinstance(block.exitswitch, Variable):
knowntypedata = getattr(
block.exitswitch.annotation, "knowntypedata", {})
else:
knowntypedata = {}
for link in exits:
constraints = knowntypedata.get(link.exitcase, {})
self.follow_link(graph, link, constraints)
if block in self.notify:
# reflow from certain positions when this block is done
for callback in self.notify[block]:
if isinstance(callback, tuple):
self.reflowfromposition(callback) # callback is a position
else:
callback()
def follow_link(self, graph, link, constraints):
assert not (isinstance(link.exitcase, (types.ClassType, type)) and
issubclass(link.exitcase, BaseException))
ignore_link = False
inputs_s = []
renaming = defaultdict(list)
for v_out, v_input in zip(link.args, link.target.inputargs):
renaming[v_out].append(v_input)
for v_out in link.args:
s_out = self.annotation(v_out)
if v_out in constraints:
s_constraint = constraints[v_out]
s_out = pair(s_out, s_constraint).improve()
# ignore links that try to pass impossible values
if s_out == s_ImpossibleValue:
ignore_link = True
s_out = self.apply_renaming(s_out, renaming)
inputs_s.append(s_out)
if ignore_link:
return
self.links_followed[link] = True
self.addpendingblock(graph, link.target, inputs_s)
def follow_raise_link(self, graph, link, s_last_exc_value):
v_last_exc_type = link.last_exception
v_last_exc_value = link.last_exc_value
assert (isinstance(link.exitcase, (types.ClassType, type)) and
issubclass(link.exitcase, BaseException))
assert v_last_exc_type and v_last_exc_value
if isinstance(v_last_exc_value, Variable):
self.setbinding(v_last_exc_value, s_last_exc_value)
if isinstance(v_last_exc_type, Variable):
self.setbinding(v_last_exc_type, typeof([v_last_exc_value]))
inputs_s = []
renaming = defaultdict(list)
for v_out, v_input in zip(link.args, link.target.inputargs):
renaming[v_out].append(v_input)
for v_out, v_input in zip(link.args, link.target.inputargs):
if v_out == v_last_exc_type:
s_out = typeof(renaming[v_last_exc_value])
if isinstance(v_last_exc_type, Constant):
s_out.const = v_last_exc_type.value
elif v_last_exc_type.annotation.is_constant():
s_out.const = v_last_exc_type.annotation.const
inputs_s.append(s_out)
else:
s_out = self.annotation(v_out)
s_out = self.apply_renaming(s_out, renaming)
inputs_s.append(s_out)
self.links_followed[link] = True
self.addpendingblock(graph, link.target, inputs_s)
#___ creating the annotations based on operations ______
def consider_op(self, op):
# let's be careful about avoiding propagated SomeImpossibleValues
# to enter an op; the latter can result in violations of the
# more general results invariant: e.g. if SomeImpossibleValue enters is_
# is_(SomeImpossibleValue, None) -> SomeBool
# is_(SomeInstance(not None), None) -> SomeBool(const=False) ...
# boom -- in the assert of setbinding()
for arg in op.args:
if isinstance(self.annotation(arg), annmodel.SomeImpossibleValue):
raise BlockedInference(self, op, -1)
resultcell = op.consider(self)
if resultcell is None:
resultcell = s_ImpossibleValue
elif resultcell == s_ImpossibleValue:
raise BlockedInference(self, op, -1) # the operation cannot succeed
assert isinstance(resultcell, annmodel.SomeObject)
assert isinstance(op.result, Variable)
self.setbinding(op.result, resultcell) # bind resultcell to op.result
def get_exception(self, operation):
"""
Return the annotation for all exceptions that `operation` may raise.
"""
can_only_throw = operation.get_can_only_throw(self)
if can_only_throw is None:
return SomeInstance(self.bookkeeper.getuniqueclassdef(Exception))
else:
return self.bookkeeper.new_exception(can_only_throw)
class BlockedInference(Exception):
"""This exception signals the type inference engine that the situation
is currently blocked, and that it should try to progress elsewhere."""
def __init__(self, annotator, op, opindex):
self.annotator = annotator
try:
self.break_at = annotator.bookkeeper.position_key
except AttributeError:
self.break_at = None
self.op = op
self.opindex = opindex
def __repr__(self):
if not self.break_at:
break_at = "?"
else:
break_at = self.annotator.whereami(self.break_at)
return "<BlockedInference break_at %s [%s]>" %(break_at, self.op)
__str__ = __repr__
| 1.789063 | 2 |
png/imageRecognition_Simple.py | tanthanadon/senior | 0 | 303 | from math import sqrt
from skimage import data
from skimage.feature import blob_dog, blob_log, blob_doh
from skimage.color import rgb2gray
from skimage import io
import matplotlib.pyplot as plt
image = io.imread("star.jpg")
image_gray = rgb2gray(image)
blobs_log = blob_log(image_gray, max_sigma=30, num_sigma=10, threshold=.1)
# Compute radii in the 3rd column.
blobs_log[:, 2] = blobs_log[:, 2] * sqrt(2)
blobs_dog = blob_dog(image_gray, max_sigma=30, threshold=.1)
blobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2)
blobs_doh = blob_doh(image_gray, max_sigma=30, threshold=.01)
blobs_list = [blobs_log, blobs_dog, blobs_doh]
colors = ['yellow', 'lime', 'red']
titles = ['Laplacian of Gaussian', 'Difference of Gaussian',
'Determinant of Hessian']
sequence = zip(blobs_list, colors, titles)
fig, axes = plt.subplots(1, 3, figsize=(9, 3), sharex=True, sharey=True)
ax = axes.ravel()
for idx, (blobs, color, title) in enumerate(sequence):
ax[idx].set_title(title)
ax[idx].imshow(image)
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color=color, linewidth=2, fill=False)
ax[idx].add_patch(c)
ax[idx].set_axis_off()
plt.tight_layout()
plt.show() | 2.109375 | 2 |
app/logic/httpcommon/Page.py | imvu/bluesteel | 10 | 311 | <filename>app/logic/httpcommon/Page.py<gh_stars>1-10
""" Page object file """
class Page():
""" Page object, it contains information about the pare we are refering, index, items per page, etc. """
page_index = 0
items_per_page = 0
def __init__(self, items_per_page, page_index):
""" Creates the page """
self.page_index = int(page_index)
self.items_per_page = int(items_per_page)
| 1.320313 | 1 |
app/api/v1/views/auth_views.py | emdeechege/Questionaire-API | 0 | 319 | <filename>app/api/v1/views/auth_views.py
from flask import jsonify, Blueprint, request, json, make_response
from werkzeug.security import generate_password_hash, check_password_hash
from datetime import datetime
from ..utils.validators import Validation
from ..models.auth_models import Users
v1_auth_blueprint = Blueprint('auth', __name__, url_prefix='/api/v1')
USER = Users()
VALIDATOR = Validation()
@v1_auth_blueprint.route('/signup', methods=['POST'])
def signup():
"""View that controls creation of new users"""
try:
data = request.get_json()
except:
return jsonify({
"status": 400,
"message": "Invalid input"
}), 400
firstname = data.get('firstname')
lastname = data.get('lastname')
othername = data.get('othername')
email = data.get('email')
phone_number = data.get('phone_number')
username = data.get('username')
is_admin = data.get('is_admin')
password = data.get('password')
if not firstname or not firstname.split():
return make_response(jsonify({
"status": 400,
"message": "Firstname is required"
})), 400
if not lastname or not lastname.split():
return make_response(jsonify({
"status": 400,
"message": "Lastname is required"
})), 400
if not email or not email.split():
return make_response(jsonify({
"status": 400,
"message": "Email is required"
})), 400
if not phone_number:
return make_response(jsonify({
"status": 400,
"message": "Phone number is required"
})), 400
if not username or not username.split():
return make_response(jsonify({
"status": 400,
"message": "Username is required"
})), 400
if not password or not password.split():
return make_response(jsonify({
"status": 400,
"message": "Password is required"
})), 400
if not VALIDATOR.validate_phone_number(phone_number):
return jsonify({
"status": 400,
"message": "Please input valid phone number"
}), 400
if VALIDATOR.validate_password(password):
return jsonify({
"status": 400,
"message": "Password not valid"
}), 400
if not VALIDATOR.validate_email(email):
return jsonify({
"status": 400,
"message": "Invalid email"
}), 400
if VALIDATOR.username_exists(username):
return jsonify({
"status": 400,
"message": "Username exists"
}), 400
if VALIDATOR.email_exists(email):
return jsonify({
"status": 400,
"message": "Email exists"
}), 400
password = generate_password_hash(
password, method='pbkdf2:sha256', salt_length=8)
res = USER.signup(
firstname, lastname, othername, email, phone_number, username, is_admin, password)
return jsonify({
"status": 201,
"data": [{
"firstname": firstname,
"lastname": lastname,
"othername": othername,
"email": email,
"phone_number": phone_number,
"username": username,
"is_admin": is_admin
}]
}), 201
@v1_auth_blueprint.route('/login', methods=['POST'])
def login():
""" A view to control users login """
try:
data = request.get_json()
except:
return make_response(jsonify({
"status": 400,
"message": "Wrong input"
})), 400
username = data.get('username')
password = data.get('password')
if not username:
return make_response(jsonify({
"status": 400,
"message": "Username is required"
})), 400
if not password:
return make_response(jsonify({
"status": 400,
"message": "Password is required"
})), 400
if not VALIDATOR.username_exists(username):
return jsonify({
"status": 404,
"message": "User does not exist"
}), 404
auth_token = user.generate_auth_token(username)
return make_response(jsonify({
"status": 200,
"message": 'Logged in successfuly',
"token": auth_token
})), 200
| 1.695313 | 2 |
iap/validate_jwt.py | spitfire55/python-docs-samples | 4 | 327 | <gh_stars>1-10
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample showing how to validate the Identity-Aware Proxy (IAP) JWT.
This code should be used by applications in Google Compute Engine-based
environments (such as Google App Engine flexible environment, Google
Compute Engine, or Google Container Engine) to provide an extra layer
of assurance that a request was authorized by IAP.
For applications running in the App Engine standard environment, use
App Engine's Users API instead.
"""
# [START iap_validate_jwt]
import jwt
import requests
def validate_iap_jwt_from_app_engine(iap_jwt, cloud_project_number,
cloud_project_id):
"""Validate a JWT passed to your App Engine app by Identity-Aware Proxy.
Args:
iap_jwt: The contents of the X-Goog-IAP-JWT-Assertion header.
cloud_project_number: The project *number* for your Google Cloud project.
This is returned by 'gcloud projects describe $PROJECT_ID', or
in the Project Info card in Cloud Console.
cloud_project_id: The project *ID* for your Google Cloud project.
Returns:
(user_id, user_email, error_str).
"""
expected_audience = '/projects/{}/apps/{}'.format(
cloud_project_number, cloud_project_id)
return _validate_iap_jwt(iap_jwt, expected_audience)
def validate_iap_jwt_from_compute_engine(iap_jwt, cloud_project_number,
backend_service_id):
"""Validate an IAP JWT for your (Compute|Container) Engine service.
Args:
iap_jwt: The contents of the X-Goog-IAP-JWT-Assertion header.
cloud_project_number: The project *number* for your Google Cloud project.
This is returned by 'gcloud projects describe $PROJECT_ID', or
in the Project Info card in Cloud Console.
backend_service_id: The ID of the backend service used to access the
application. See
https://cloud.google.com/iap/docs/signed-headers-howto
for details on how to get this value.
Returns:
(user_id, user_email, error_str).
"""
expected_audience = '/projects/{}/global/backendServices/{}'.format(
cloud_project_number, backend_service_id)
return _validate_iap_jwt(iap_jwt, expected_audience)
def _validate_iap_jwt(iap_jwt, expected_audience):
try:
key_id = jwt.get_unverified_header(iap_jwt).get('kid')
if not key_id:
return (None, None, '**ERROR: no key ID**')
key = get_iap_key(key_id)
decoded_jwt = jwt.decode(
iap_jwt, key,
algorithms=['ES256'],
audience=expected_audience)
return (decoded_jwt['sub'], decoded_jwt['email'], '')
except (jwt.exceptions.InvalidTokenError,
requests.exceptions.RequestException) as e:
return (None, None, '**ERROR: JWT validation error {}**'.format(e))
def get_iap_key(key_id):
"""Retrieves a public key from the list published by Identity-Aware Proxy,
re-fetching the key file if necessary.
"""
key_cache = get_iap_key.key_cache
key = key_cache.get(key_id)
if not key:
# Re-fetch the key file.
resp = requests.get(
'https://www.gstatic.com/iap/verify/public_key')
if resp.status_code != 200:
raise Exception(
'Unable to fetch IAP keys: {} / {} / {}'.format(
resp.status_code, resp.headers, resp.text))
key_cache = resp.json()
get_iap_key.key_cache = key_cache
key = key_cache.get(key_id)
if not key:
raise Exception('Key {!r} not found'.format(key_id))
return key
# Used to cache the Identity-Aware Proxy public keys. This code only
# refetches the file when a JWT is signed with a key not present in
# this cache.
get_iap_key.key_cache = {}
# [END iap_validate_jwt]
| 1.71875 | 2 |
projects/eyetracking/gen_adhd_sin.py | nirdslab/streaminghub | 0 | 335 | <reponame>nirdslab/streaminghub
#!/usr/bin/env python3
import glob
import os
import pandas as pd
import dfs
SRC_DIR = f"{dfs.get_data_dir()}/adhd_sin_orig"
OUT_DIR = f"{dfs.get_data_dir()}/adhd_sin"
if __name__ == '__main__':
files = glob.glob(f"{SRC_DIR}/*.csv")
file_names = list(map(os.path.basename, files))
for file_name in file_names:
df: pd.DataFrame = pd.read_csv(f'{SRC_DIR}/{file_name}').set_index('EyeTrackerTimestamp').sort_index()[
['GazePointX (ADCSpx)', 'GazePointY (ADCSpx)', 'PupilLeft', 'PupilRight']].reset_index()
df.columns = ['t', 'x', 'y', 'dl', 'dr']
# fill blanks (order=interpolate(inter)->bfill+ffill(edges))->zerofill
df = df.apply(lambda x: x.interpolate().fillna(method="bfill").fillna(method="ffill")).fillna(0)
df['x'] = df['x'] / 1920
df['y'] = df['y'] / 1080
df['d'] = (df['dl'] + df['dr']) / 2
# start with t=0, and set unit to ms
df['t'] = (df['t'] - df['t'].min()) / 1000
df = df[['t', 'x', 'y', 'd']].round(6).set_index('t')
df.to_csv(f'{OUT_DIR}/{file_name}')
print(f'Processed: {file_name}')
| 1.75 | 2 |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
Use the Edit dataset card button to edit it.
- Downloads last month
- 347