max_stars_repo_path
stringlengths 4
305
| max_stars_repo_name
stringlengths 4
130
| max_stars_count
int64 0
191k
| id
stringlengths 1
8
| content
stringlengths 6
1.02M
| score
float64 -1.16
4.16
| int_score
int64 0
4
|
---|---|---|---|---|---|---|
brains/namelist/models.py | crisisking/udbraaains | 1 | 12796408 | <gh_stars>1-10
import datetime
from django.db import models
class Category(models.Model):
class Meta:
verbose_name_plural = 'categories'
name = models.CharField(max_length=25, null=False, blank=False)
color_code = models.CharField(max_length=7, null=False, blank=False)
def __unicode__(self):
return self.name
class Player(models.Model):
name = models.CharField(max_length=50, null=False, db_index=True)
profile_id = models.IntegerField(null=False, unique=True, db_index=True)
group_name = models.CharField(max_length=50, blank=True, null=True,
default=None, db_index=True)
category = models.ForeignKey(Category, null=True, blank=True)
join_date = models.DateTimeField(default=datetime.datetime.now)
scrape_date = models.DateTimeField(auto_now=True, auto_now_add=True)
is_dead = models.BooleanField(default=False, db_index=True)
def last_known_position(self):
"""Grabs the player's last known location from the report set."""
try:
last_filed = self.report_set.filter(zombies_only=False)
last_filed = last_filed.order_by('-reported_date')[0]
except IndexError:
last_filed = None
try:
last_spotted = self.reported_at.order_by('-reported_date')[0]
except IndexError:
last_spotted = None
if last_filed is None and last_spotted is None:
return u"Never seen"
else:
if last_filed is None:
return last_spotted
elif last_spotted is None:
return last_filed
else:
if last_filed.reported_date >= last_spotted.reported_date:
return last_filed
else:
return last_spotted
def __unicode__(self):
return self.name
| 1.625 | 2 |
templates/includes/loader.py | angeal185/flask-jinja-greensock-portfolio-webapp | 0 | 12796416 | <script id="sf" type="x-shader/x-fragment">
precision highp float;
uniform float time;
uniform vec2 mouse;
uniform vec2 resolution;
float ball(vec2 p, float k, float d) {
vec2 r = vec2(p.x - cos(time * k) * d, p.y + sin(time * k) * d);
return smoothstep(0.0, 1.0, 0.03 / length(r));
}
void main(void) {
vec2 q = gl_FragCoord.xy / resolution.xy;
vec2 p = -1.0 + 2.0 * q;
p.x *= resolution.x / resolution.y;
float col = 0.0;
for (int i = 1; i <= 7; ++i) {
col += ball(p, float(i), 0.3);
}
for (int i = 1; i <= 5; ++i) {
col += ball(p, float(i), 0.1);
}
gl_FragColor = vec4(col*0.8, col, col*1.8, 1.0);
}
</script>
<script id="sv" type="x-shader/x-vertex">
attribute vec4 vPosition;
void main (void) {
gl_Position = vPosition;
}
</script>
<canvas id="cnv"></canvas> | 1.8125 | 2 |
main.py | zeek0x/common | 0 | 12796424 | import sys
from urllib import request, parse, error
from multiprocessing import Process
urls = [
'https://github.com/',
'https://twitter.com/',
'https://hub.docker.com/v2/users/'
]
def inspect_status_code(url):
try:
response = request.urlopen(url)
return response.code
except error.HTTPError as e:
return e.code
def inspect(url, user_id):
code = inspect_status_code(url+user_id)
title = parse.urlparse(url).netloc
prefix = '\033[32m' if code == 404 else '\033[31m'
suffix = '\033[0m'
result = '{}{}{}'.format(prefix, code, suffix)
print(title.ljust(16), result)
def main():
if len(sys.argv) < 2:
print('usage: python3 main.py ${USER_ID}')
exit(1)
user_id = sys.argv[1]
ps = [Process(target=inspect, args=(url, user_id)).start() for url in urls]
if __name__ == '__main__':
main()
| 1.734375 | 2 |
randaugment.py | Hayoung93/UDA | 0 | 12796432 | import torch
import torch.nn as nn
from torchvision import transforms as ttf
class RandAugment(nn.Module):
def __init__(self, N, M):
super().__init__()
"""
rotate
shear x
shear y
translate y
translate x
autoContrast
sharpness
identity
contrast
color
brightness
eqaulize
solarize
posterize
"""
self.N = N
self.M = M
self.aug_list = [Rotate, ShearX, ShearY, TranslateX, TranslateY, AutoContrast,
Sharpness, Identity, Contrast, Color, Brightness, Equalize,
Solarize, Posterize]
def forward(self, img):
self.aug_index = torch.randperm(len(self.aug_list))[:self.N]
self.augmentations = nn.ModuleList([])
for aug_id in self.aug_index:
self.augmentations.append(self.aug_list[aug_id](self.M))
self.augmentations = nn.Sequential(*self.augmentations)
return self.augmentations(img)
class Rotate(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
self.angle = 359 / 10 * self.M
def forward(self, img):
return ttf.functional.rotate(img, self.angle)
class ShearX(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
self.angle = 359 / 10 * self.M - 180
def forward(self, img):
return ttf.functional.affine(img, 0, [0, 0], 1, [self.angle, 0])
class ShearY(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
self.angle = 359 / 10 * self.M - 180
def forward(self, img):
return ttf.functional.affine(img, 0, [0, 0], 1, [0, self.angle])
class TranslateX(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
try:
max_size = img.size()[0]
except TypeError:
max_size = img.size()[0]
return ttf.functional.affine(img, 0, [(max_size - 1) / 10 * self.M, 0], 1, [0, 0])
class TranslateY(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
try:
max_size = img.size()[1]
except TypeError:
max_size = img.size()[1]
return ttf.functional.affine(img, 0, [0, (max_size - 1) / 10 * self.M], 1, [0, 0])
class AutoContrast(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.autocontrast(img)
class Sharpness(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.adjust_sharpness(img, self.M / 5.)
class Identity(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return img
class Contrast(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.adjust_contrast(img, self.M / 5.)
class Color(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.adjust_saturation(img, self.M / 5.)
class Brightness(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.adjust_brightness(img, self.M / 5.)
class Equalize(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.equalize(img)
class Solarize(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.solarize(img, (10 - self.M) * 25.5)
class Posterize(nn.Module):
def __init__(self, M):
super().__init__()
self.M = M
def forward(self, img):
return ttf.functional.posterize(img, round((10 - self.M) / 10 * 8)) | 2.390625 | 2 |
day18/test/test_main.py | JoseTomasTocino/AdventOfCode2020 | 0 | 12796440 | <filename>day18/test/test_main.py
import logging
import os.path
from day18.code.main import evaluate_expression
logger = logging.getLogger(__name__)
local_path = os.path.abspath(os.path.dirname(__file__))
sample_input = None
def test_sample_input(caplog):
# caplog.set_level(logging.INFO)
assert evaluate_expression("1 + 2 * 3 + 4 * 5 + 6") == 71
assert evaluate_expression("1 + (2 * 3) + (4 * (5 + 6))") == 51
assert evaluate_expression("2 * 3 + (4 * 5)") == 26
assert evaluate_expression("5 + (8 * 3 + 9 + 3 * 4 * 3)") == 437
assert evaluate_expression("5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))") == 12240
assert evaluate_expression("((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2") == 13632
def test_sample_input_with_advanced_priorities(caplog):
# caplog.set_level(logging.INFO)
assert evaluate_expression("1 + 2 * 3 + 4 * 5 + 6", use_advanced_precedence=True) == 231
assert evaluate_expression("1 + (2 * 3) + (4 * (5 + 6))", use_advanced_precedence=True) == 51
assert evaluate_expression("2 * 3 + (4 * 5)", use_advanced_precedence=True) == 46
assert evaluate_expression("5 + (8 * 3 + 9 + 3 * 4 * 3)", use_advanced_precedence=True) == 1445
assert evaluate_expression("5 * 9 * (7 * 3 * 3 + 9 * 3 + (8 + 6 * 4))", use_advanced_precedence=True) == 669060
assert evaluate_expression("((2 + 4 * 9) * (6 + 9 * 8 + 6) + 6) + 2 + 4 * 2", use_advanced_precedence=True) == 23340
def test_big_input(caplog):
# caplog.set_level(logging.INFO)
with open(os.path.join(local_path, "input"), "r") as f:
content = f.read()
assert sum(evaluate_expression(x) for x in content.split("\n") if x) == 4696493914530
assert sum(evaluate_expression(x, use_advanced_precedence=True) for x in content.split("\n") if x) == 362880372308125
| 1.695313 | 2 |
bin/config-get.py | chrisbrierley/jaspy-manager | 0 | 12796448 | #!/usr/bin/env python
import os
import json
import sys
import argparse
def _find_config_file():
config = 'etc/minicondas.json'
while not os.path.isfile(config):
config = '../{}'.format(config)
if len(config) > 70:
raise Exception('Cannot locate config file "etc/minicondas.json".')
return config
def _get(py_version, miniconda_version, attribute):
config = _find_config_file()
with open(config) as reader:
data = json.load(reader)
if miniconda_version == 'latest':
_all_versions = [i.split('-')[1] for i in data['minicondas'][py_version].keys()]
m_start = 'm' + py_version.replace('py', '')[0]
_av_ints = sorted([[int(i) for i in item.split('.')] for item in _all_versions])
_all_versions = ['.'.join([str(item) for item in items]) for items in _av_ints]
miniconda_version = m_start + '-' + _all_versions[-1]
try:
attr = data['minicondas'][py_version][miniconda_version][attribute]
except:
print('Could not find {} attribute for python version: "{}"'.format(attribute, py_version))
return attr
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("py_version", type=str, help="Python version")
parser.add_argument("attribute", type=str, choices=['url', 'md5', 'short_id'],
help="Attribute")
parser.add_argument('-m', '--miniconda-version', default='latest',
help='Add Miniconda version (or use "latest").',
type=str)
args = parser.parse_args()
print(_get(args.py_version, args.miniconda_version, args.attribute))
| 1.742188 | 2 |
tuba/run_disc.py | korhanpolat/phoenix_term_discovery | 0 | 12796456 | import argparse
import sys
from os.path import join
from os import chdir
import subprocess
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--sge', type=str, default='nosge')
parser.add_argument('-l', '--filelist', type=str, default='')
parser.add_argument('-zr_root', '--zr_root', type=str, default='/home/korhan/Desktop/zerospeech2017/track2/src/ZRTools')
args = parser.parse_args()
chdir(args.zr_root)
command = './run_disc {} {}'.format(args.sge,args.filelist)
print(command)
subprocess.call(command.split())
| 1.078125 | 1 |
nsd1806/python/day08/tcpserv2.py | MrWangwf/nsd1806 | 0 | 12796464 | <gh_stars>0
import socket
host = ''
port = 12345
addr = (host, port)
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(addr)
s.listen(1)
while True:
try:
cli_sock, cli_addr = s.accept()
except KeyboardInterrupt:
break
print('Hello,', cli_addr)
while True:
data = cli_sock.recv(1024).decode() # 把bytes类型解码为str类型
if data.strip() == 'quit':
break
print(data)
sdata = input('> ') + '\r\n'
cli_sock.send(sdata.encode()) # 将str编码为bytes
cli_sock.close()
s.close()
| 1.414063 | 1 |
app_modules/widgets/numpad.py | l337quez/Aplicaci-n-ANDROID-para-control-del-suministro-de-energia- | 14 | 12796472 | <reponame>l337quez/Aplicaci-n-ANDROID-para-control-del-suministro-de-energia-
from kivy.uix.gridlayout import GridLayout
from kivy.lang import Builder
Builder.load_string('''
<NumpadBlueButton@BlueButton>:
on_release: self.parent.select_btn(self.text)
<Numpad>:
cols: 3
rows: 4
spacing: 3, 3
NumpadBlueButton:
text: '1'
NumpadBlueButton:
text: '2'
NumpadBlueButton:
text: '3'
NumpadBlueButton:
text: '4'
NumpadBlueButton:
text: '5'
NumpadBlueButton:
text: '6'
NumpadBlueButton:
text: '7'
NumpadBlueButton:
text: '8'
NumpadBlueButton:
text: '9'
<ActivityNumpad>:
cols: 1
rows: 2
spacing: 3, 3
BoxLayout:
size_hint_y: 0.5
orientation: 'horizontal'
BlueButton:
size_hint: 0.2, 1
text: '1'
on_release: root.select_btn(1)
BlueButton:
size_hint: 0.8, 1
text: 'App'
on_release: root.select_btn(1)
BoxLayout:
size_hint_y: 0.5
orientation: 'horizontal'
BlueButton:
size_hint: 0.2, 1
text: '2'
on_release: root.select_btn(2)
BlueButton:
size_hint: 0.8, 1
text: 'Service'
on_release: root.select_btn(2)
''')
class Numpad(GridLayout):
selected_btn = 1
callback = None
def select_btn(self, num):
self.selected_btn = num
if self.callback:
self.callback(num)
class ActivityNumpad(GridLayout):
selected_btn = 1
callback = None
def select_btn(self, num):
self.selected_btn = num
if self.callback:
self.callback(num)
| 1.539063 | 2 |
frame/snake.py | Rosikobu/snake-reloaded | 0 | 12796480 | import pygame, sys
import time
from pygame.math import Vector2
from .config import FPS, xSize, ySize, cell_size, cell_number, CUTTING
from .eatable.saw import Saw
from .eatable.cake import Cake
class Snake(object):
is_moving = False
def __init__(self, screen: pygame.Surface) -> None:
self.load_snake_texture()
self.body = [Vector2(5,10),Vector2(4,10),Vector2(3,10)]
self.pyScreen = screen
self.direction = Vector2(1,0)
self.new_block = False
self.slowed = False
def draw_snake_object(self) -> None:
for index, block in enumerate(self.body):
# rect for positioning
x_pos = int(block.x * cell_size)
y_pos = int(block.y * cell_size)
block_rect = pygame.Rect(x_pos, y_pos, cell_size, cell_size)
# what direction is tha face
if index == 0:
self.pyScreen.blit(self.head,block_rect)
elif index == len(self.body) - 1:
self.pyScreen.blit(self.tail,block_rect)
else:
previous_block = self.body[index + 1] - block
next_block = self.body[index - 1] - block
if previous_block.x == next_block.x:
self.pyScreen.blit(self.body_vertical, block_rect)
elif previous_block.y == next_block.y:
self.pyScreen.blit(self.body_horizontal, block_rect)
else:
if previous_block.x == -1 and next_block.y == -1 or previous_block.y == -1 and next_block.x == -1:
self.pyScreen.blit(self.body_tl, block_rect)
elif previous_block.x == -1 and next_block.y == 1 or previous_block.y == 1 and next_block.x == -1:
self.pyScreen.blit(self.body_bl, block_rect)
elif previous_block.x == 1 and next_block.y == -1 or previous_block.y == -1 and next_block.x == 1:
self.pyScreen.blit(self.body_tr, block_rect)
elif previous_block.x == 1 and next_block.y == 1 or previous_block.y == 1 and next_block.x == 1:
self.pyScreen.blit(self.body_br, block_rect)
def draw_snake(self) -> None:
# Update Snake-Model
self.update_head_graphics()
self.update_tail_graphics()
self.draw_snake_object()
def update_tail_graphics(self) -> pygame.Surface:
tail_relation = self.body[-2] - self.body[-1]
if tail_relation == Vector2(-1,0): self.tail = self.tail_left
elif tail_relation == Vector2(1,0): self.tail = self.tail_right
elif tail_relation == Vector2(0,-1): self.tail = self.tail_up
elif tail_relation == Vector2(0,1): self.tail = self.tail_down
def update_head_graphics(self) -> pygame.Surface:
head_relation = self.body[1] - self.body[0]
if head_relation == Vector2(-1,0): self.head = self.head_left
elif head_relation == Vector2(1,0): self.head = self.head_right
elif head_relation == Vector2(0,-1): self.head = self.head_up
elif head_relation == Vector2(0,1): self.head = self.head_down
def move_snake(self) -> None:
if Saw.get_cutted() == False or len(self.body) < (abs(CUTTING)+1):
if self.new_block == True:
body_copy = self.body[:]
body_copy.insert(0, body_copy[0] + self.direction)
self.body = body_copy[:]
if Cake.eated_the_cake():
if Cake.get_cake_countdown() != 0:
Cake.decrase_cake_countdown()
else:
Cake.remove_cake()
self.new_block = False
else:
self.new_block = False
else:
body_copy = self.body[:-1]
body_copy.insert(0, body_copy[0] + self.direction)
self.body = body_copy[:]
else:
self.new_block = False
body_copy = self.body[:CUTTING]
body_copy.insert(0, body_copy[0] + self.direction)
self.body = body_copy[:]
Saw.cutting_done()
Snake.is_moving = False
def set_direction(self, vec) -> pygame.Surface:
#Snake.is_moving = True
self.direction = vec
def add_block(self) -> None:
self.new_block = True
def load_snake_texture(self) -> pygame.Surface:
# Kopf
self.head_up = pygame.image.load('assets/Schlange/Schlange_Kopf_oben.png')
self.head_right = pygame.image.load('assets/Schlange/Schlange_Kopf_rechts.png')
self.head_left = pygame.image.load('assets/Schlange/Schlange_Kopf_links.png')
self.head_down = pygame.image.load('assets/Schlange/Schlange_Kopf_unten.png')
# Schwanz
self.tail_up = pygame.image.load('assets/Schlange/Schlange_Schwanz_oben.png')
self.tail_down = pygame.image.load('assets/Schlange/Schlange_Schwanz_unten.png')
self.tail_right = pygame.image.load('assets/Schlange/Schlange_Schwanz_rechts.png')
self.tail_left = pygame.image.load('assets/Schlange/Schlange_Schwanz_links.png')
# Körper
self.body_vertical = pygame.image.load('assets/Schlange/Schlange_vertikal.png')
self.body_horizontal = pygame.image.load('assets/Schlange/Schlange_horizontal.png')
# Directions
self.body_tr = pygame.image.load('assets/Schlange/Schlange_Ecke_rechts_oben.png')
self.body_tl = pygame.image.load('assets/Schlange/Schlange_Ecke_links_oben.png')
self.body_br = pygame.image.load('assets/Schlange/Schlange_Ecke_rechts_unten.png')
self.body_bl = pygame.image.load('assets/Schlange/Schlange_Ecke_links_unten.png') | 2.6875 | 3 |
nitro/resource/stat/snmp/snmp_stats.py | HanseMerkur/nitro-python | 2 | 12796488 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class snmp_stats(base_resource) :
""" """
def __init__(self) :
self._clearstats = ""
self._snmptotrxpkts = 0
self._snmprxpktsrate = 0
self._snmptottxpkts = 0
self._snmptxpktsrate = 0
self._snmptotgetreqs = 0
self._snmpgetreqsrate = 0
self._snmptotgetnextreqs = 0
self._snmpgetnextreqsrate = 0
self._snmptotgetbulkreqs = 0
self._snmpgetbulkreqsrate = 0
self._snmptotresponses = 0
self._snmpresponsesrate = 0
self._snmptottraps = 0
self._snmptoterrreqdropped = 0
self._snmptotparseerrs = 0
self._snmptotbadversions = 0
self._snmptotbadcommname = 0
self._snmptotbadcommuse = 0
self._snmpunsupportedsecuritylevel = 0
self._snmpnotintimewindow = 0
self._snmpunknownusername = 0
self._snmpunknownengineids = 0
self._snmpwrongdigests = 0
self._snmpdecryptionerrors = 0
@property
def clearstats(self) :
"""Clear the statsistics / counters.<br/>Possible values = basic, full."""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
"""Clear the statsistics / counters
:param clearstats:
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def snmpdecryptionerrors(self) :
"""SNMP packets that were dropped because they could not be decrypted."""
try :
return self._snmpdecryptionerrors
except Exception as e:
raise e
@property
def snmptotresponses(self) :
"""SNMP Get-Response PDUs that have been generated by the NetScaler."""
try :
return self._snmptotresponses
except Exception as e:
raise e
@property
def snmptotbadcommuse(self) :
"""The total number of SNMP Messages received that represented an SNMP operation which was not allowed by the SNMP community named in the Message."""
try :
return self._snmptotbadcommuse
except Exception as e:
raise e
@property
def snmptoterrreqdropped(self) :
"""SNMP requests dropped."""
try :
return self._snmptoterrreqdropped
except Exception as e:
raise e
@property
def snmpgetnextreqsrate(self) :
"""Rate (/s) counter for snmptotgetnextreqs."""
try :
return self._snmpgetnextreqsrate
except Exception as e:
raise e
@property
def snmptotrxpkts(self) :
"""SNMP packets received."""
try :
return self._snmptotrxpkts
except Exception as e:
raise e
@property
def snmptottxpkts(self) :
"""SNMP packets transmitted."""
try :
return self._snmptottxpkts
except Exception as e:
raise e
@property
def snmptotparseerrs(self) :
"""Number of ASN.1 or BER errors encountered when decoding received SNMP Messages."""
try :
return self._snmptotparseerrs
except Exception as e:
raise e
@property
def snmptottraps(self) :
"""SNMP Trap PDUs that have been generated by the NetScaler."""
try :
return self._snmptottraps
except Exception as e:
raise e
@property
def snmptotbadversions(self) :
"""Number of SNMP messages received, which were for an unsupported SNMP version."""
try :
return self._snmptotbadversions
except Exception as e:
raise e
@property
def snmptxpktsrate(self) :
"""Rate (/s) counter for snmptottxpkts."""
try :
return self._snmptxpktsrate
except Exception as e:
raise e
@property
def snmpresponsesrate(self) :
"""Rate (/s) counter for snmptotresponses."""
try :
return self._snmpresponsesrate
except Exception as e:
raise e
@property
def snmpgetreqsrate(self) :
"""Rate (/s) counter for snmptotgetreqs."""
try :
return self._snmpgetreqsrate
except Exception as e:
raise e
@property
def snmptotbadcommname(self) :
"""SNMP messages received, which used an SNMP community name not known to the NetScaler."""
try :
return self._snmptotbadcommname
except Exception as e:
raise e
@property
def snmptotgetnextreqs(self) :
"""SNMP Get-Next PDUs that have been accepted and processed."""
try :
return self._snmptotgetnextreqs
except Exception as e:
raise e
@property
def snmpunknownengineids(self) :
"""SNMP packets that were dropped because they referenced an SNMP engine ID that was not known to the NetScaler."""
try :
return self._snmpunknownengineids
except Exception as e:
raise e
@property
def snmpwrongdigests(self) :
"""SNMP packets that were dropped because they did not contain the expected digest value."""
try :
return self._snmpwrongdigests
except Exception as e:
raise e
@property
def snmpgetbulkreqsrate(self) :
"""Rate (/s) counter for snmptotgetbulkreqs."""
try :
return self._snmpgetbulkreqsrate
except Exception as e:
raise e
@property
def snmpnotintimewindow(self) :
"""SNMP packets that were dropped because they appeared outside of the authoritative SNMP engine's window."""
try :
return self._snmpnotintimewindow
except Exception as e:
raise e
@property
def snmptotgetbulkreqs(self) :
"""SNMP Get-Bulk PDUs that have been accepted and proZcessed."""
try :
return self._snmptotgetbulkreqs
except Exception as e:
raise e
@property
def snmpunknownusername(self) :
"""SNMP packets that were dropped because they referenced a user that was not known to the SNMP engine."""
try :
return self._snmpunknownusername
except Exception as e:
raise e
@property
def snmpunsupportedsecuritylevel(self) :
"""SNMP packets that were dropped because they requested a security level that was
unknown to the NetScaler or otherwise unavailable.
"""
try :
return self._snmpunsupportedsecuritylevel
except Exception as e:
raise e
@property
def snmptotgetreqs(self) :
"""SNMP Get-Request PDUs that have been accepted and processed."""
try :
return self._snmptotgetreqs
except Exception as e:
raise e
@property
def snmprxpktsrate(self) :
"""Rate (/s) counter for snmptotrxpkts."""
try :
return self._snmprxpktsrate
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(snmp_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.snmp
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
return 0
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
"""Use this API to fetch the statistics of all snmp_stats resources that are configured on netscaler.
:param service:
:param name: (Default value = "")
:param option_: (Default value = "")
"""
try :
obj = snmp_stats()
if not name :
response = obj.stat_resources(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
""" """
basic = "basic"
full = "full"
class snmp_response(base_response) :
""" """
def __init__(self, length=1) :
self.snmp = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.snmp = [snmp_stats() for _ in range(length)]
| 0.910156 | 1 |
test/qe/compression-filter/test_compression.py | sharwell/repose | 0 | 12796496 | #!/usr/bin/env python
from narwhal import repose
import unittest
from narwhal import conf
from narwhal import pathutil
import xmlrunner as _xmlrunner
import logging
import time
import argparse
import os
import deproxy
logger = logging.getLogger(__name__)
config_dir = pathutil.join(os.getcwd(), 'etc/repose')
deployment_dir = pathutil.join(os.getcwd(), 'var/repose')
artifact_dir = pathutil.join(os.getcwd(), 'usr/share/repose/filters')
log_file = pathutil.join(os.getcwd(), 'var/log/repose/current.log')
repose_port = 8888
stop_port = 7777
deproxy_port = 9999
headers = {}
startup_wait_time = 15
def setUpModule():
# Set up folder hierarchy
logger.debug('setUpModule')
pathutil.create_folder(config_dir)
pathutil.create_folder(deployment_dir)
pathutil.create_folder(os.path.dirname(log_file))
config_verbose = False
def apply_config_set(config_set_name, params=None):
if params is None:
params = {}
conf.process_config_set(config_set_name, verbose=config_verbose,
destination_path=config_dir, params=params)
class TestCompression(unittest.TestCase):
def setUp(self):
logger.debug('setUp')
self.deproxy = deproxy.Deproxy()
self.end_point = self.deproxy.add_endpoint(('localhost', deproxy_port))
pathutil.clear_folder(config_dir)
params = {
'port': repose_port,
'target_hostname': 'localhost',
'target_port': deproxy_port,
'deployment_dir': deployment_dir,
'artifact_dir': artifact_dir,
'log_file': log_file
}
apply_config_set('configs/.config-set.xml', params=params)
self.valve = repose.ReposeValve(config_dir=config_dir,
stop_port=stop_port)
time.sleep(startup_wait_time)
def test_compression_with_gzip(self):
logger.debug('test_compression_with_gzip')
url = 'http://localhost:%i/' % repose_port
logger.debug('url = %s' % url)
time.sleep(1)
mc = self.deproxy.make_request(method='GET', url=url,
headers=headers)
self.assertEqual(mc.received_response.code, '200', msg=mc)
self.assertEqual(len(mc.handlings), 1, msg=mc)
def tearDown(self):
logger.debug('tearDown')
if self.valve is not None:
self.valve.stop()
if self.deproxy is not None:
self.deproxy.shutdown_all_endpoints()
available_test_cases = [
TestCompression
]
def run():
test_case_map = dict()
for tc_class in available_test_cases:
test_case_map[tc_class.__name__] = tc_class
parser = argparse.ArgumentParser()
parser.add_argument('--print-log', help="Print the log to STDERR.",
action='store_true')
parser.add_argument('--test-case', action='append',
help="Which test case to run. Can be specififed "
"multiple times. 'all' is the default, and runs all "
"available test cases",
choices=['all'] + test_case_map.keys(),
type=str)
args = parser.parse_args()
if args.print_log:
logging.basicConfig(level=logging.DEBUG,
format=('%(asctime)s %(levelname)s:%(name)s:'
'%(funcName)s:'
'%(filename)s(%(lineno)d):'
'%(threadName)s(%(thread)d):%(message)s'))
global config_verbose
config_verbose = True
if args.test_case is None:
args.test_case = ['all']
test_cases = []
test_cases_set = set()
for tc in args.test_case:
if tc == 'all':
test_cases = available_test_cases
break
if tc not in test_cases_set:
test_cases_set.add(tc)
test_cases.append(test_case_map[tc])
logger.debug('run')
setUpModule()
suite = unittest.TestSuite()
loader = unittest.TestLoader()
load_tests = loader.loadTestsFromTestCase
for test_case in test_cases:
suite.addTest(load_tests(test_case))
testRunner = _xmlrunner.XMLTestRunner(output='test-reports')
result = testRunner.run(suite)
if __name__ == '__main__':
run()
| 1.421875 | 1 |
porters/QQbot/nonebot_plugins/nonebot_porter.py | lizard1998myx/MultiBot | 3 | 12796504 | from nonebot import CommandSession, on_command
from nonebot import on_natural_language, NLPSession, IntentCommand
from ....requests import Request
from ....responses import *
from ....distributor import Distributor
from ....utils import image_url_to_path
from ....paths import PATHS
import os, logging, traceback
# BLACKLIST = [3288849221]
BLACKLIST = []
@on_natural_language(only_to_me=False, only_short_message=False, allow_empty_message=True)
async def _(session: NLPSession):
return IntentCommand(100.0, 'porter', args={'message': session.msg_text})
@on_command('porter')
async def porter(session: CommandSession):
logging.debug('=========== [MultiBot] Entered nonebot porter ==========')
# 在任何情况下,把所有消息打包成Request交给分拣中心(Distributor),然后处理分拣中心发回的Response序列
# Resqust打包
request = Request()
request.platform = 'CQ'
request.user_id = str(session.ctx['user_id'])
self_id = str(session.self_id)
self_names = ['韩大佬', 'lzy', '林子逸', '子兔', 'xsx', '小石像']
bot_called = False
if request.user_id == self_id:
logging.debug('=========== [MultiBot] Left nonebot porter ==========')
return
elif request.user_id in BLACKLIST:
logging.debug('=========== [MultiBot] Left nonebot porter ==========')
return
if '[CQ:at,qq={}]'.format(self_id) in session.ctx['raw_message']:
# 被at时
bot_called = True
if 'group_id' in session.ctx.keys():
request.group_id = str(session.ctx['group_id'])
else:
# 私聊时
bot_called = True
for message in session.ctx['message']:
if message['type'] == 'text' and request.msg is None:
text = message['data']['text'].strip()
# 呼叫检测
for name in self_names:
if name in text:
# 被叫到时
bot_called = True
text = text.strip()
while text[:len(name)] == name:
text = text[len(name):]
while text[-len(name):] == name:
text = text[:-len(name)]
for sign in [None, ',', ',', None]:
text = text.strip(sign)
# 消息段检测
if '请使用' in text and '新版手机QQ' in text:
request.echo = True
request.msg = '【NonebotPorter】不支持的消息段:"%s"' % text
continue
# 空文本检测
if text != '':
request.msg = text
elif message['type'] == 'image' and request.img is None:
# 先不下载图片,获取response时下载
request.img = message['data']['url']
# request.img = image_url_to_path(message['data']['url'], header='QQBot')
elif message['type'] == 'record' and request.aud is None:
request.aud = os.path.join(PATHS['cqhttp'], 'data', 'voices', message['data']['file'])
elif message['type'] == 'location':
request.loc = {'longitude': float(message['data']['lon']),
'latitude': float(message['data']['lat'])}
elif message['type'] not in ['face', 'at', 'anonymous', 'share', 'reply']:
request.echo = True
request.msg = f"【NonebotPorter】不支持的消息段[{message['type']}]:" \
f"{str(message).replace('CQ:', '$CQ$:')}"
continue
# 初始化分拣中心
distributor = Distributor()
# 获取Response序列,同时下载图片,若出错则返回错误信息
def get_responses():
if request.img:
request.img = image_url_to_path(request.img, header='QQBot')
response_list = distributor.handle(request=request)
return response_list
# 用于执行Response序列
async def execute(response_list: list):
for response in response_list:
try:
if isinstance(response, ResponseMsg) or isinstance(response, ResponseGrpMsg):
msg = response.text
for at_id in response.at_list:
msg += '[CQ:at,qq=%s]' % str(at_id)
# 过长文本多次发送
max_length = 2000
while len(msg) > 0:
msg_left = msg[max_length:] # msg超出maxL的部分
msg = msg[:max_length] # msg只保留maxL内的部分
if isinstance(response, ResponseMsg): # 私聊
await session.send(message=msg)
else: # 群消息
await session.bot.send_group_msg(group_id=response.group_id, message=msg)
if msg_left != '': # 这轮超出部分为0时
msg = msg_left
else:
msg = ''
elif isinstance(response, ResponseMusic):
await session.send(message=f'[CQ:music,type={response.platform},id={response.music_id}]')
elif isinstance(response, ResponseImg) or isinstance(response, ResponseGrpImg):
# 需要在盘符之后加入一个反斜杠,并且不使用双引号
img_msg = '[CQ:image,file=file:///%s]' % os.path.abspath(response.file).replace(':', ':\\')
if isinstance(response, ResponseImg):
await session.send(message=img_msg)
else:
await session.bot.send_group_msg(group_id=response.group_id, message=img_msg)
elif isinstance(response, ResponseCQFunc):
try:
output = await eval('session.bot.%s' % response.func_name)(**response.kwargs)
except AttributeError:
await session.send('【NonebotPorter】不支持的函数:%s' % response.func_name)
except TypeError:
await session.send('【NonebotPorter】不支持的参数:%s' % str(response.kwargs))
except SyntaxError:
await session.send('【NonebotPorter】语法错误')
else:
await execute(distributor.process_output(output=output)) # 递归处理新的Response序列
except:
# 诸如发送失败等问题
logging.error(traceback.format_exc())
# 在筛选后,把Request交给分拣中心,执行返回的Response序列
if bot_called:
# 符合呼出条件的,直接执行
await execute(response_list=get_responses())
elif distributor.use_active(request=request, save=False):
# 不符合呼出条件的,若有活动Session对应,也可以执行
await execute(response_list=get_responses())
else:
logging.debug('=========== [MultiBot] Left nonebot porter ==========')
return
# 刷新并保存最新的session信息
distributor.refresh_and_save()
logging.debug('=========== [MultiBot] Completed nonebot porter ==========')
| 1.4375 | 1 |
chb/models/DllFunctionAPI.py | psifertex/CodeHawk-Binary | 0 | 12796512 | <reponame>psifertex/CodeHawk-Binary
# ------------------------------------------------------------------------------
# Access to the CodeHawk Binary Analyzer Analysis Results
# Author: <NAME>
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
from chb.models.DllFunctionParameter import DllFunctionParameter
class DllFunctionAPI(object):
def __init__(self,summary,xnode):
self.summary = summary
self.xnode = xnode
def get_calling_convention(self): return xnode.get('cc')
def get_adjustment(self): return int(xnode.get('adj'))
def get_parameters(self):
return [ DllFunctionParameter(self,p) for p in self.xnode.findall('par') ]
def get_stack_parameters(self):
stackparams = [ p for p in self.get_parameters() if p.is_stack_parameter() ]
return sorted(stackparams,key=lambda p:p.get_stack_nr())
def get_stack_parameter_names(self):
stackparams = self.get_stack_parameters()
return [ p.name for p in stackparams ]
| 1.234375 | 1 |
NetworkConstants.py | LostMyAccount/Game-Maker-Server | 3 | 12796520 | from enum import Enum
receive_codes = {
"PING": 0,
"HANDSHAKE": 1,
"DISCONNECT": 2,
}
handshake_codes = {
"UNKNOWN": 0,
"WAITING_ACK": 1,
"COMPLETED": 2
}
| 0.863281 | 1 |
tests/settings_test.py | uploadcare/intercom-rank | 12 | 12796528 | from app.settings import *
DEBUG = False
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:'
SERVER_NAME = 'localhost'
WTF_CSRF_ENABLED = False
WTF_CSRF_CHECK_DEFAULT = False
WTF_CSRF_METHODS = []
| 0.703125 | 1 |
tests/operators/test_op/test_csr_mul.py | KnowingNothing/akg-test | 0 | 12796536 | import numpy as np
import scipy.sparse
import akg
from akg import tvm
from akg import topi
from tests.common.base import get_rtol_atol
from tests.common.gen_random import random_gaussian
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from akg.utils.result_analysis import target_profiling
from akg.utils.format_transform import to_tvm_nd_array, get_shape
from akg.utils.dsl_create import get_broadcast_shape
def csr_mul(dense, sparse_data, col_idx, row_idx, shape):
assert len(shape) == 2, "only supports 2-dim sparse tensor"
assert len(dense.shape) <= 2
assert dense.dtype == sparse_data.dtype, "data and weight must have the same dtype"
num_rows = row_idx.shape[0] - 1
dense_shape = get_shape(dense.shape)
sparse_shape = get_shape(shape)
broadcast_shape = get_broadcast_shape(dense_shape, sparse_shape)
need_expand = tvm.const(len(dense_shape) < len(broadcast_shape))
need_broadcast_first_dim = tvm.const(
len(dense_shape) == len(broadcast_shape) and dense_shape[0] < broadcast_shape[0])
need_broadcast_last_dim = tvm.const(
len(dense_shape) == len(broadcast_shape) and dense_shape[1] < broadcast_shape[1])
def gen_ir(dense, sparse_data, col_idx, row_idx, output):
ib = tvm.ir_builder.create()
with ib.for_range(0, num_rows, name='i') as i:
start = ib.load(row_idx, i)
end = ib.load(row_idx, i + 1)
with ib.for_range(0, end - start, name='j') as j:
pos = start + j
with ib.if_scope(pos < end):
val = ib.load(sparse_data, pos)
col = ib.load(col_idx, pos)
with ib.if_scope(need_expand):
ib.store(output, pos, val * ib.load(dense, [col]))
with ib.else_scope():
with ib.if_scope(need_broadcast_first_dim):
ib.store(output, pos, val * ib.load(dense, [0, col]))
with ib.else_scope():
with ib.if_scope(need_broadcast_last_dim):
ib.store(output, pos, val * ib.load(dense, [i, 0]))
with ib.else_scope():
ib.store(output, pos, val * ib.load(dense, [i, col]))
return ib.get()
output_name = "T_csr_mul_" + dense.op.name + "_" + sparse_data.op.name
out_buf = tvm.decl_buffer(sparse_data.shape, sparse_data.dtype, output_name)
return tvm.extern([shape],
[dense, sparse_data, col_idx, row_idx],
lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),
dtype=sparse_data.dtype, out_buffers=[out_buf], name=output_name)
def gen_data(shape1, shape2, dtype1, dtype2):
dense = random_gaussian(shape1).astype(dtype1)
sparse_data = scipy.sparse.rand(shape2[0], shape2[1], density=0.2, format='csr', dtype=dtype1)
expect = sparse_data.multiply(np.broadcast_to(dense, shape2))
return dense, sparse_data.data, sparse_data.indices.astype(dtype2), sparse_data.indptr.astype(dtype2), expect.data
def test_csr_mul(shape1, shape2, dtype1, dtype2, poly_sch=False, attrs=None):
if not attrs:
attrs = {"target": "cuda"}
# gen data
op_attrs = [shape2]
dense, sparse_data, col_idx, row_idx, expect = gen_data(shape1, shape2, dtype1, dtype2)
output_shape = expect.shape
attrs["csr_avg_row"] = sparse_data.shape[0] // shape1[0]
mod = utils.op_build_test(csr_mul, [shape1, sparse_data.shape, col_idx.shape, row_idx.shape],
[dtype1, dtype1, dtype2, dtype2], op_attrs=op_attrs, polyhedral=poly_sch,
attrs=attrs, kernel_name="csr_mul")
if len(expect.shape) == 0:
output_shape = (1, )
output = np.zeros(output_shape, expect.dtype)
output = utils.mod_launch(mod, (dense, sparse_data, col_idx, row_idx, output), expect=expect)
atol, rtol = get_rtol_atol("csr_mul", dtype1)
res = compare_tensor(output, expect, rtol=rtol, atol=atol)
print("Test {}".format("Pass" if res else "Failed"))
target_name = attrs["target"].split()[0]
if not res:
mod_source = mod
if target_name != "llvm":
mod_source = mod.imported_modules[0]
print("Error {}:========================".format(target_name))
print(mod_source.get_source())
raise AssertionError("Test fail")
if attrs["profiling"]:
args_list = to_tvm_nd_array(
[dense, sparse_data, col_idx, row_idx, output, expect], akg.tvm.context(target_name, 0))
target_profiling(mod, *args_list, target=target_name, repeat_time=attrs["repeat_time"])
| 1.585938 | 2 |
venv/lib/python3.6/site-packages/madmom/io/__init__.py | metu-sparg/higrid | 8 | 12796544 | # encoding: utf-8
"""
Input/output package.
"""
from __future__ import absolute_import, division, print_function
import io as _io
import contextlib
import numpy as np
from .audio import load_audio_file
from .midi import load_midi, write_midi
from ..utils import suppress_warnings, string_types
ENCODING = 'utf8'
# dtype for numpy structured arrays that contain labelled segments
# 'label' needs to be castable to str
SEGMENT_DTYPE = [('start', np.float), ('end', np.float), ('label', object)]
# overwrite the built-in open() to transparently apply some magic file handling
@contextlib.contextmanager
def open_file(filename, mode='r'):
"""
Context manager which yields an open file or handle with the given mode
and closes it if needed afterwards.
Parameters
----------
filename : str or file handle
File (handle) to open.
mode: {'r', 'w'}
Specifies the mode in which the file is opened.
Yields
------
Open file (handle).
"""
# check if we need to open the file
if isinstance(filename, string_types):
f = fid = _io.open(filename, mode)
else:
f = filename
fid = None
# yield an open file handle
yield f
# close the file if needed
if fid:
fid.close()
@suppress_warnings
def load_events(filename):
"""
Load a events from a text file, one floating point number per line.
Parameters
----------
filename : str or file handle
File to load the events from.
Returns
-------
numpy array
Events.
Notes
-----
Comments (lines starting with '#') and additional columns are ignored,
i.e. only the first column is returned.
"""
# read in the events, one per line
events = np.loadtxt(filename, ndmin=2)
# 1st column is the event's time, the rest is ignored
return events[:, 0]
def write_events(events, filename, fmt='%.3f', delimiter='\t', header=None):
"""
Write the events to a file, one event per line.
Parameters
----------
events : numpy array
Events to be written to file.
filename : str or file handle
File to write the events to.
fmt : str or sequence of strs, optional
A single format (e.g. '%.3f'), a sequence of formats, or a multi-format
string (e.g. '%.3f %.3f'), in which case `delimiter` is ignored.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file as comment.
"""
events = np.array(events)
# reformat fmt to be a single string if needed
if isinstance(fmt, (list, tuple)):
fmt = delimiter.join(fmt)
# write output
with open_file(filename, 'wb') as f:
# write header
if header is not None:
f.write(bytes(('# ' + header + '\n').encode(ENCODING)))
# write events
for e in events:
try:
string = fmt % tuple(e.tolist())
except AttributeError:
string = e
except TypeError:
string = fmt % e
f.write(bytes((string + '\n').encode(ENCODING)))
f.flush()
load_onsets = load_events
write_onsets = write_events
@suppress_warnings
def load_beats(filename, downbeats=False):
"""
Load the beats from the given file, one beat per line of format
'beat_time' ['beat_number'].
Parameters
----------
filename : str or file handle
File to load the beats from.
downbeats : bool, optional
Load only downbeats instead of beats.
Returns
-------
numpy array
Beats.
"""
values = np.loadtxt(filename, ndmin=1)
if values.ndim > 1:
if downbeats:
# rows with a "1" in the 2nd column are downbeats
return values[values[:, 1] == 1][:, 0]
else:
# 1st column is the beat time, the rest is ignored
return values[:, 0]
return values
def write_beats(beats, filename, fmt=None, delimiter='\t', header=None):
"""
Write the beats to a file.
Parameters
----------
beats : numpy array
Beats to be written to file.
filename : str or file handle
File to write the beats to.
fmt : str or sequence of strs, optional
A single format (e.g. '%.3f'), a sequence of formats (e.g.
['%.3f', '%d']), or a multi-format string (e.g. '%.3f %d'), in which
case `delimiter` is ignored.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file as comment.
"""
if fmt is None and beats.ndim == 2:
fmt = ['%.3f', '%d']
elif fmt is None:
fmt = '%.3f'
write_events(beats, filename, fmt, delimiter, header)
def load_downbeats(filename):
"""
Load the downbeats from the given file.
Parameters
----------
filename : str or file handle
File to load the downbeats from.
Returns
-------
numpy array
Downbeats.
"""
return load_beats(filename, downbeats=True)
def write_downbeats(beats, filename, fmt=None, delimiter='\t', header=None):
"""
Write the downbeats to a file.
Parameters
----------
beats : numpy array
Beats or downbeats to be written to file.
filename : str or file handle
File to write the beats to.
fmt : str or sequence of strs, optional
A single format (e.g. '%.3f'), a sequence of formats (e.g.
['%.3f', '%d']), or a multi-format string (e.g. '%.3f %d'), in which
case `delimiter` is ignored.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file as comment.
Notes
-----
If `beats` contains both time and number of the beats, they are filtered
to contain only the downbeats (i.e. only the times of those beats with a
beat number of 1).
"""
if beats.ndim == 2:
beats = beats[beats[:, 1] == 1][:, 0]
if fmt is None:
fmt = '%.3f'
write_events(beats, filename, fmt, delimiter, header)
@suppress_warnings
def load_notes(filename):
"""
Load the notes from the given file, one note per line of format
'onset_time' 'note_number' ['duration' ['velocity']].
Parameters
----------
filename: str or file handle
File to load the notes from.
Returns
-------
numpy array
Notes.
"""
return np.loadtxt(filename, ndmin=2)
def write_notes(notes, filename, fmt=None, delimiter='\t', header=None):
"""
Write the notes to a file.
Parameters
----------
notes : numpy array, shape (num_notes, 2)
Notes, row format 'onset_time' 'note_number' ['duration' ['velocity']].
filename : str or file handle
File to write the notes to.
fmt : str or sequence of strs, optional
A sequence of formats (e.g. ['%.3f', '%d', '%.3f', '%d']), or a
multi-format string, e.g. '%.3f %d %.3f %d', in which case `delimiter`
is ignored.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file as comment.
Returns
-------
numpy array
Notes.
"""
# set default format
if fmt is None:
fmt = ['%.3f', '%d', '%.3f', '%d']
if not notes.ndim == 2:
raise ValueError('unknown format for `notes`')
# truncate format to the number of colums given
fmt = delimiter.join(fmt[:notes.shape[1]])
# write the notes
write_events(notes, filename, fmt=fmt, delimiter=delimiter, header=header)
def load_segments(filename):
"""
Load labelled segments from file, one segment per line. Each segment is of
form <start> <end> <label>, where <start> and <end> are floating point
numbers, and <label> is a string.
Parameters
----------
filename : str or file handle
File to read the labelled segments from.
Returns
-------
segments : numpy structured array
Structured array with columns 'start', 'end', and 'label',
containing the beginning, end, and label of segments.
"""
start, end, label = [], [], []
with open_file(filename) as f:
for line in f:
s, e, l = line.split()
start.append(float(s))
end.append(float(e))
label.append(l)
segments = np.zeros(len(start), dtype=SEGMENT_DTYPE)
segments['start'] = start
segments['end'] = end
segments['label'] = label
return segments
def write_segments(segments, filename, fmt=None, delimiter='\t', header=None):
"""
Write labelled segments to a file.
Parameters
----------
segments : numpy structured array
Labelled segments, one per row (column definition see SEGMENT_DTYPE).
filename : str or file handle
Output filename or handle.
fmt : str or sequence of strs, optional
A sequence of formats (e.g. ['%.3f', '%.3f', '%s']), or a multi-format
string (e.g. '%.3f %.3f %s'), in which case `delimiter` is ignored.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file as comment.
Returns
-------
numpy structured array
Labelled segments
Notes
-----
Labelled segments are represented as numpy structured array with three
named columns: 'start' contains the start position (e.g. seconds),
'end' the end position, and 'label' the segment label.
"""
if fmt is None:
fmt = ['%.3f', '%.3f', '%s']
write_events(segments, filename, fmt=fmt, delimiter=delimiter,
header=header)
load_chords = load_segments
write_chords = write_segments
def load_key(filename):
"""
Load the key from the given file.
Parameters
----------
filename : str or file handle
File to read key information from.
Returns
-------
str
Key.
"""
with open_file(filename) as f:
return f.read().strip()
def write_key(key, filename, header=None):
"""
Write key string to a file.
Parameters
----------
key : str
Key name.
filename : str or file handle
Output file.
header : str, optional
String that will be written at the beginning of the file as comment.
Returns
-------
key : str
Key name.
"""
write_events([key], filename, fmt='%s', header=header)
def load_tempo(filename, split_value=1., sort=None, norm_strengths=None,
max_len=None):
"""
Load tempo information from the given file.
Tempo information must have the following format:
'main tempo' ['secondary tempo' ['relative_strength']]
Parameters
----------
filename : str or file handle
File to load the tempo from.
split_value : float, optional
Value to distinguish between tempi and strengths.
`values` > `split_value` are interpreted as tempi [bpm],
`values` <= `split_value` are interpreted as strengths.
sort : bool, deprecated
Sort the tempi by their strength.
norm_strengths : bool, deprecated
Normalize the strengths to sum 1.
max_len : int, deprecated
Return at most `max_len` tempi.
Returns
-------
tempi : numpy array, shape (num_tempi[, 2])
Array with tempi. If no strength is parsed, a 1-dimensional array of
length 'num_tempi' is returned. If strengths are given, a 2D array
with tempi (first column) and their relative strengths (second column)
is returned.
"""
# try to load the data from file
values = np.loadtxt(filename, ndmin=1)
# split the filename according to their filename into tempi and strengths
# TODO: this is kind of hack-ish, find a better solution
tempi = values[values > split_value]
strengths = values[values <= split_value]
# make the strengths behave properly
strength_sum = np.sum(strengths)
# relative strengths are given (one less than tempi)
if len(tempi) - len(strengths) == 1:
strengths = np.append(strengths, 1. - strength_sum)
if np.any(strengths < 0):
raise AssertionError('strengths must be positive')
# no strength is given, assume an evenly distributed one
if strength_sum == 0:
strengths = np.ones_like(tempi) / float(len(tempi))
# normalize the strengths
if norm_strengths is not None:
import warnings
warnings.warn('`norm_strengths` is deprecated as of version 0.16 and '
'will be removed in 0.18. Please normalize strengths '
'separately.')
strengths /= float(strength_sum)
# tempi and strengths must have same length
if len(tempi) != len(strengths):
raise AssertionError('tempi and strengths must have same length')
# order the tempi according to their strengths
if sort:
import warnings
warnings.warn('`sort` is deprecated as of version 0.16 and will be '
'removed in 0.18. Please sort the returned array '
'separately.')
# Note: use 'mergesort', because we want a stable sorting algorithm
# which keeps the order of the keys in case of duplicate keys
# but we need to apply this '(-strengths)' trick because we want
# tempi with uniformly distributed strengths to keep their order
sort_idx = (-strengths).argsort(kind='mergesort')
tempi = tempi[sort_idx]
strengths = strengths[sort_idx]
# return at most 'max_len' tempi and their relative strength
if max_len is not None:
import warnings
warnings.warn('`max_len` is deprecated as of version 0.16 and will be '
'removed in 0.18. Please truncate the returned array '
'separately.')
return np.vstack((tempi[:max_len], strengths[:max_len])).T
def write_tempo(tempi, filename, delimiter='\t', header=None, mirex=None):
"""
Write the most dominant tempi and the relative strength to a file.
Parameters
----------
tempi : numpy array
Array with the detected tempi (first column) and their strengths
(second column).
filename : str or file handle
Output file.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file as comment.
mirex : bool, deprecated
Report the lower tempo first (as required by MIREX).
Returns
-------
tempo_1 : float
The most dominant tempo.
tempo_2 : float
The second most dominant tempo.
strength : float
Their relative strength.
"""
# make the given tempi a 2d array
tempi = np.array(tempi, ndmin=2)
# default values
t1 = t2 = strength = np.nan
# only one tempo was detected
if len(tempi) == 1:
t1 = tempi[0][0]
strength = 1.
# consider only the two strongest tempi and strengths
elif len(tempi) > 1:
t1, t2 = tempi[:2, 0]
strength = tempi[0, 1] / sum(tempi[:2, 1])
# for MIREX, the lower tempo must be given first
if mirex is not None:
import warnings
warnings.warn('`mirex` argument is deprecated as of version 0.16 '
'and will be removed in version 0.17. Please sort the '
'tempi manually')
if t1 > t2:
t1, t2, strength = t2, t1, 1. - strength
# format as a numpy array and write to output
out = np.array([t1, t2, strength], ndmin=2)
write_events(out, filename, fmt=['%.2f', '%.2f', '%.2f'],
delimiter=delimiter, header=header)
| 2.15625 | 2 |
tools/initialcompdata/abundngc5286.py | lukeshingles/evelchemevol | 2 | 12796552 | from abundsolar import elsolarlogepsilon
zfactor = 10 ** -1.92
# mean of s-poor population in NGC5286
# from Marino et al. (2015) 2015MNRAS.450..815M
# [Fe/H] = -1.92
# log X/Fe = [X/Fe] + log(X/Fe)_solar
targetlogxtofe = { 'o': 0.58 + elsolarlogepsilon['o'] - elsolarlogepsilon['fe'],
'na': 0.18 + elsolarlogepsilon['na'] - elsolarlogepsilon['fe'],
'y': -0.04 +elsolarlogepsilon['y'] - elsolarlogepsilon['fe'],
'zr': 0.17 + elsolarlogepsilon['zr'] - elsolarlogepsilon['fe'],
'ba': 0.03 + elsolarlogepsilon['ba'] - elsolarlogepsilon['fe'],
'la': 0.29 + elsolarlogepsilon['la'] - elsolarlogepsilon['fe'],
'ce': 0.24 + elsolarlogepsilon['ce'] - elsolarlogepsilon['fe'],
'pr': 0.38 + elsolarlogepsilon['pr'] - elsolarlogepsilon['fe'],
'nd': 0.20 + elsolarlogepsilon['nd'] - elsolarlogepsilon['fe']
}
| 1.304688 | 1 |
kungfucms/apps/account/signals.py | youngershen/kungfucms | 0 | 12796560 | # PROJECT : kungfucms
# TIME : 2020/6/9 12:54
# AUTHOR : <NAME>
# EMAIL : <EMAIL>
# PHONE : 13811754531
# WECHAT : 13811754531
# https://github.com/youngershen
from django.core.signals import request_started, \
request_finished
from django.dispatch import Signal, receiver
before_sign_in = Signal(providing_args=["toppings", "size"])
after_sign_in = Signal(providing_args=["toppings", "size"])
sign_in_post_permission = Signal(providing_args=["toppings", "size"])
@receiver(request_started)
def before_request(sender, **kwargs):
pass
@receiver(request_finished)
def after_request(sender, **kwargs):
pass
| 1.15625 | 1 |
test.py | 12beesinatrenchcoat/yt-playlist-discord-webhook | 2 | 12796568 | <filename>test.py<gh_stars>1-10
# Unit tests, I guess.
import unittest
import typing
import main
# A test playlist created by myself to test various features of the script.
# It contains five videos, added by two different users,
# and some videos don't have maxres thumbnails.
# This playlist shouldn't be changed.
TEST_PLAYLIST: typing.Final = 'PLB2AcRG34VQWlArTnlLR98RZeOnep8-Zb'
# Testing functions revolving around YouTube and video filtering.
class TestVideoFunctions(unittest.TestCase):
def test_get_playlist_items(self):
r = main.get_playlist_items(TEST_PLAYLIST)
self.assertEqual(len(r['items']), 5)
def test_filter_items_by_timestamp(self):
r = main.get_playlist_items(TEST_PLAYLIST)
filtered = main.filter_playlist_items_by_timestamp(r, 1617985920)
self.assertEqual(len(filtered), 2)
# Not a test, but used in tests below.
def get_playlist_item_embed(pos: int):
r = main.get_playlist_items(TEST_PLAYLIST)
playlist_item = r['items'][pos]
epoch = main.iso_string_to_epoch(playlist_item
['snippet']['publishedAt'])
playlist_item['snippet']['publishedAt'] = epoch
embed = main.video_info_to_embed(playlist_item)
return embed
# Testing stuff with the Discord Embeds.
class TestEmbeds(unittest.TestCase):
def test_maxres_thumbnail(self):
embed = get_playlist_item_embed(1)
self.assertRegex(embed.thumbnail['url'], '(maxresdefault)')
def test_hq_thumbnail_when_no_maxres(self):
embed = get_playlist_item_embed(2)
self.assertRegex(embed.thumbnail['url'], '(hqdefault)')
if __name__ == '__main__':
unittest.main()
| 1.96875 | 2 |
appify/scan.py | akx/appify | 0 | 12796576 | import subprocess
import threading
from collections import defaultdict
from concurrent.futures import Executor
from concurrent.futures.thread import ThreadPoolExecutor
class RecursiveLibraryScanner:
def __init__(self, executor: Executor, scan_private: bool):
self.executor = executor
self.libraries = defaultdict(set)
self.scanned = set()
self.scan_private = scan_private
self.jobs = []
self.all_done = threading.Event()
def _check(self, job):
if all(j.done() for j in self.jobs):
self.all_done.set()
def _enqueue(self, target):
job = self.executor.submit(self._scan, target)
job.add_done_callback(self._check)
self.jobs.append(job)
def _scan(self, target):
# print("scanning", target, file=sys.stderr)
self.scanned.add(target)
for lib in scan_libraries(target):
self.libraries[target].add(lib)
if lib not in self.scanned:
is_private = smells_private(lib)
if (is_private and self.scan_private) or not is_private:
self._enqueue(lib)
def scan(self, target):
self._enqueue(target)
self.all_done.wait()
return self.libraries
def scan_libraries_recursive(initial_target, scan_private=True):
with ThreadPoolExecutor() as executor:
rls = RecursiveLibraryScanner(executor, scan_private=scan_private)
return rls.scan(initial_target)
def scan_libraries(target):
in_load_dylib = False
libraries = set()
for line in subprocess.check_output(
["otool", "-l", target], encoding="utf-8"
).splitlines():
line = line.strip()
if line == "cmd LC_LOAD_DYLIB":
in_load_dylib = True
if in_load_dylib and line.startswith("name "):
words = line.split()
lib = words[1]
libraries.add(lib)
in_load_dylib = False
return libraries
def smells_private(lib):
if lib.startswith("/System/Library"):
return True
if lib.startswith("/usr/lib/"):
return True
if lib.startswith("/usr/local/lib/"):
return True
return False
def filter_private(scanned_libraries):
public_libraries = {
target: {lib for lib in libraries if not smells_private(lib)}
for (target, libraries) in scanned_libraries.items()
if not smells_private(target)
}
return public_libraries
| 1.90625 | 2 |
methods/model_n.py | behrouzsh/deepPhosAPI | 0 | 12796584 | <filename>methods/model_n.py
import functools
import itertools
import os
import random
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from sklearn import metrics
from sklearn import preprocessing
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from keras.layers import Dense, Activation, Flatten, Dropout, Reshape
from keras.layers import Conv1D, Conv2D, MaxPooling2D
from keras.models import Sequential, Model
from keras.utils.np_utils import to_categorical
from keras import optimizers
from keras.optimizers import Adam, SGD
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
import copy
def model_net(X_train1, X_train2, X_train3, y_train,
nb_epoch=60,weights=None):
nb_classes = 2
img_dim1 = X_train1.shape[1:]
img_dim2 = X_train2.shape[1:]
img_dim3 = X_train3.shape[1:]
##########parameters#########
init_form = 'RandomUniform'
learning_rate = 0.001
nb_dense_block = 1
nb_layers = 5
nb_filter = 32
growth_rate = 32
# growth_rate = 24
filter_size_block1 = 13
filter_size_block2 = 7
filter_size_block3 = 3
filter_size_ori = 1
dense_number = 32
dropout_rate = 0.2
dropout_dense = 0.3
weight_decay = 0.0001
nb_batch_size = 512
###################
# Construct model #
###################
from methods.phosnet import Phos
model = Phos(nb_classes, nb_layers, img_dim1, img_dim2, img_dim3, init_form, nb_dense_block,
growth_rate, filter_size_block1, filter_size_block2, filter_size_block3,
nb_filter, filter_size_ori,
dense_number, dropout_rate, dropout_dense, weight_decay)
# Model output
# choose optimazation
opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
# model compile
model.compile(loss='binary_crossentropy',
optimizer=opt,
metrics=['accuracy'])
# load weights#
if weights is not None:
model.load_weights(weights)
# model2 = copy.deepcopy(model)
model2 = model
model2.load_weights(weights)
for num in range(len(model2.layers) - 1):
model.layers[num].set_weights(model2.layers[num].get_weights())
if nb_epoch > 0 :
model.fit([X_train1, X_train2, X_train3], y_train, batch_size=nb_batch_size,
# validation_data=([X_val1, X_val2, X_val3, y_val),
# validation_split=0.1,
epochs= nb_epoch, shuffle=True, verbose=1)
return model
| 2.015625 | 2 |
misc/ConvertOrdersCSVtoJS.py | celstark/MSTonline | 1 | 12796592 | <filename>misc/ConvertOrdersCSVtoJS.py
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 1 17:05:31 2020
@author: craig
We've been using some online order files in our original PsychoPy-derived
web-based MST. This converts those actual .csv files into the .js ones
we'll be using here
"""
import os, csv, glob
inpath=os.path.join('G:',os.sep,'Shared drives','Stark Lab','MST_Psychopy','InitialPPy_Online_Version','OnlineOrders')
outpath=os.path.join("C:",os.sep,"Users","craig","OneDrive - University of California - Irvine","Documents","cordova_cMST","www","jsOrders")
studyfiles=glob.glob(os.path.join(inpath,"MST*p1_o*csv"))
testfiles=glob.glob(os.path.join(inpath,"MST*p2_o*csv"))
for fname in studyfiles:
print(fname)
stim=[]
cond=[]
with open(fname,"r") as infile:
reader=csv.reader(infile,delimiter=',')
next(reader)
for row in reader:
stim.append(row[0])
cond.append(row[1])
infile.close()
outfname=fname.replace('csv','js').replace(inpath,outpath)
outfile=open(outfname,"w")
outfile.write('var trial_stim=[\n')
for i in range(len(cond)):
outfile.write(' {' + "stim: '{0}', cond: '{1}'".format(stim[i],cond[i]) + '}')
if i < (len(cond)-1):
outfile.write(',\n')
else:
outfile.write('\n')
outfile.write(']\n')
outfile.close()
for fname in testfiles:
print(fname)
stim=[]
cond=[]
lbin=[]
corr3=[]
corr2=[]
with open(fname,"r") as infile:
reader=csv.reader(infile,delimiter=',')
next(reader)
for row in reader:
stim.append(row[0])
cond.append(row[1])
lbin.append(row[2])
if row[3]=='v':
corr3.append('0')
corr2.append('0')
elif row[3]=='b':
corr3.append('1')
corr2.append('2')
elif row[3]=='n':
corr3.append('2')
corr2.append('2')
else:
corr3.append('-1')
corr2.append('-1')
infile.close()
outfname=fname.replace('csv','js').replace(inpath,outpath)
outfile=open(outfname,"w")
outfile.write('var trial_stim=[\n')
for i in range(len(cond)):
outfile.write(' {' + "stim: '{0}', cond: '{1}', lbin: {2}, corr3: {3}, corr2: {4}".format(stim[i],cond[i],lbin[i],corr3[i],corr2[i]) + '}')
if i < (len(cond)-1):
outfile.write(',\n')
else:
outfile.write('\n')
outfile.write(']\n')
outfile.close()
| 1.75 | 2 |
fabfile/__init__.py | doutriaux1/ocgis | 1 | 12796600 | from fabric.decorators import task
from fabric.operations import run, sudo, local
from ConfigParser import ConfigParser
import geospatial
from fabric.context_managers import lcd
cp = ConfigParser()
cp.read('ocgis.cfg')
if cp.get('install','location') == 'local':
run = local
cd = lcd
def lsudo(op):
local('sudo {0}'.format(op))
sudo = lsudo
SRC = cp.get('install','src')
INSTALL = cp.get('install','install')
J = cp.get('install','j')
@task(default=True)
def deploy():
# geospatial.install_hdf()
geospatial.install_netCDF4() | 1.109375 | 1 |
example_snippets/multimenus_snippets/NewSnippets/SymPy/Functions/Combinatorial functions/Stirling number of the second kind.py | kuanpern/jupyterlab-snippets-multimenus | 0 | 12796608 | stirling(n, k) | -0.15918 | 0 |
example_project/example_project/urls.py | pwilczynskiclearcode/django-nuit | 5 | 12796616 | from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'^', include('demo.urls')),
url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'nuit/generic/login.html'}),
url(r'^logout/$', 'django.contrib.auth.views.logout'),
)
| 0.996094 | 1 |
yepes/fields/char.py | samuelmaudo/yepes | 0 | 12796624 | <reponame>samuelmaudo/yepes
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from django.core import checks
from django.core.validators import MinLengthValidator
from django.db import models
from django.utils import six
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from yepes import forms
from yepes.fields.calculated import CalculatedField
from yepes.utils import unidecode
from yepes.utils.deconstruct import clean_keywords
from yepes.validators import CharSetValidator
def check_max_length_attribute(self, **kwargs):
if (self.max_length is not None
and (not isinstance(self.max_length, six.integer_types)
or self.max_length <= 0)):
return [
checks.Error(
"'max_length' must be None or a positive integer.",
hint=None,
obj=self,
id='yepes.E111',
)
]
else:
return []
def check_min_length_attribute(self, **kwargs):
if self.min_length is None:
return []
elif (not isinstance(self.min_length, six.integer_types)
or self.min_length <= 0):
return [
checks.Error(
"'min_length' must be None or a positive integer.",
hint=None,
obj=self,
id='yepes.E112',
)
]
elif (isinstance(self.max_length, six.integer_types)
and self.max_length < self.min_length):
return [
checks.Error(
"'min_length' cannot be greater than 'max_length'.",
hint="Decrease 'min_length' or increase 'max_length'.",
obj=self,
id='yepes.E113',
)
]
else:
return []
class CharField(CalculatedField, models.CharField):
description = _('String')
def __init__(self, *args, **kwargs):
self.charset = kwargs.pop('charset', None)
self.force_ascii = kwargs.pop('force_ascii', False)
self.force_lower = kwargs.pop('force_lower', False)
self.force_upper = kwargs.pop('force_upper', False)
self.min_length = kwargs.pop('min_length', None)
self.normalize_spaces = kwargs.pop('normalize_spaces', True)
self.trim_spaces = kwargs.pop('trim_spaces', False)
super(CharField, self).__init__(*args, **kwargs)
if self.min_length is not None:
self.validators.append(MinLengthValidator(self.min_length))
if self.charset is not None:
self.validators.append(CharSetValidator(self.charset))
def check(self, **kwargs):
errors = super(CharField, self).check(**kwargs)
errors.extend(self._check_min_length_attribute(**kwargs))
return errors
_check_min_length_attribute = check_min_length_attribute
def deconstruct(self):
name, path, args, kwargs = super(CharField, self).deconstruct()
path = path.replace('yepes.fields.char', 'yepes.fields')
clean_keywords(self, kwargs, variables={
'charset': None,
'force_ascii': False,
'force_lower': False,
'force_upper': False,
'min_length': None,
'normalize_spaces': True,
'trim_spaces': False,
})
return (name, path, args, kwargs)
def formfield(self, **kwargs):
params = {
'form_class': forms.CharField,
'charset': self.charset,
'force_ascii': self.force_ascii,
'force_lower': self.force_lower,
'force_upper': self.force_upper,
'max_length': self.max_length,
'min_length': self.min_length,
'normalize_spaces': self.normalize_spaces,
'trim_spaces': self.trim_spaces,
}
params.update(kwargs)
return super(CharField, self).formfield(**params)
def to_python(self, value):
if value is None:
return value
if not isinstance(value, six.string_types):
value = force_text(value)
if self.normalize_spaces:
value = ' '.join(value.split())
elif self.trim_spaces:
value = value.strip()
if not value:
return value
if self.force_ascii:
value = unidecode(value)
if self.force_lower:
value = value.lower()
elif self.force_upper:
value = value.upper()
return value
| 1.851563 | 2 |
src/tests/hoplalib/hatchery/test_hatchpotionmodels.py | rickie/hopla | 0 | 12796632 | <reponame>rickie/hopla<filename>src/tests/hoplalib/hatchery/test_hatchpotionmodels.py<gh_stars>0
#!/usr/bin/env python3
import random
from typing import List
import click
import pytest
from hopla.hoplalib.errors import YouFoundABugRewardError
from hopla.hoplalib.hatchery.hatchdata import HatchPotionData
from hopla.hoplalib.hatchery.hatchpotionmodels import HatchPotion, HatchPotionCollection, \
HatchPotionException
_SAMPLE_SIZE = 10
class TestHatchPotion:
def test__init__invalid_name_fail(self):
name = "InvalidName"
with pytest.raises(HatchPotionException) as exec_info:
HatchPotion(name, quantity=1)
assert str(exec_info.value).startswith(f"{name} is not a valid hatching potion name.")
assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException))
@pytest.mark.parametrize(
"potion_name,quantity",
list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE),
range(-_SAMPLE_SIZE, 0)))
)
def test__init__invalid_quantity_fail(self, potion_name: str, quantity: int):
with pytest.raises(HatchPotionException) as exec_info:
HatchPotion(potion_name, quantity=quantity)
assert str(exec_info.value).startswith(f"{quantity} is below 0.")
assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException))
@pytest.mark.parametrize(
"potion_name,quantity",
list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE),
range(0, _SAMPLE_SIZE)))
)
def test__repr__ok(self, potion_name: str, quantity: int):
potion = HatchPotion(potion_name, quantity=quantity)
result: str = repr(potion)
assert result == f"HatchPotion({potion_name}: {quantity})"
def test__eq__(self):
assert HatchPotion("Red") == HatchPotion("Red")
assert HatchPotion("Shimmer", quantity=1) == HatchPotion("Shimmer")
assert HatchPotion("Silver") != HatchPotion("Silver", quantity=2)
assert HatchPotion("Watery") != HatchPotion("Glow")
@pytest.mark.parametrize("potion_name,quantity", [
("Base", 10),
("CottonCandyBlue", 1),
("Golden", 0),
])
def test_is_standard_potion(self, potion_name: str, quantity: int):
potion = HatchPotion(potion_name, quantity=quantity)
assert potion.is_standard_hatch_potion() is True
assert potion.is_magic_hatch_potion() is False
assert potion.is_wacky_hatch_potion() is False
@pytest.mark.parametrize("potion_name,quantity", [
("BirchBark", 10),
("Windup", 1),
("Vampire", 0),
("Ruby", 9),
("Amber", 69),
("MossyStone", 42),
("SolarSystem", 9001),
])
def test_is_magic_potion(self, potion_name: str, quantity: int):
potion = HatchPotion(potion_name, quantity=quantity)
assert potion.is_standard_hatch_potion() is False
assert potion.is_magic_hatch_potion() is True
assert potion.is_wacky_hatch_potion() is False
@pytest.mark.parametrize("potion_name,quantity", [
("Veggie", 10),
("Dessert", 0),
])
def test_is_wacky_hatch_potion(self, potion_name: str, quantity: int):
potion = HatchPotion(potion_name, quantity=quantity)
assert potion.is_standard_hatch_potion() is False
assert potion.is_magic_hatch_potion() is False
assert potion.is_wacky_hatch_potion() is True
class TestHatchPotionCollection:
def test__init__empty_ok(self):
collection = HatchPotionCollection()
assert collection == HatchPotionCollection({})
assert len(collection) == 0
def test__init__ok(self):
potion_dict = {"Base": 0, "Moonglow": 42, "Sunset": 2}
collection = HatchPotionCollection(potion_dict)
assert collection["Base"] == HatchPotion("Base", quantity=0)
assert collection["Moonglow"] == HatchPotion("Moonglow", quantity=42)
assert collection["Sunset"] == HatchPotion("Sunset", quantity=2)
def test__eq__ok(self):
left = HatchPotionCollection({"Frost": 1, "Glow": 1})
right = HatchPotionCollection({"Glow": 1, "Frost": 2})
assert left != right
assert HatchPotionCollection() == HatchPotionCollection()
assert HatchPotionCollection({"StarryNight": 1}) != HatchPotionCollection()
assert HatchPotionCollection({"Windup": 2}) == HatchPotionCollection({"Windup": 2})
assert HatchPotionCollection({"Frost": 1}) != HatchPotionCollection({"Frost": 2})
def test__iter__ok(self):
collection = HatchPotionCollection({"Base": 1, "Moonglow": 42, "Sunset": 2})
iterator = iter(collection)
assert next(iterator) == "Base"
assert next(iterator) == "Moonglow"
assert next(iterator) == "Sunset"
with pytest.raises(StopIteration):
next(iterator)
def test__getitem__ok(self):
collection = HatchPotionCollection({"Base": 1, "Moonglow": 42, "Sunset": 0})
assert collection["Base"] == HatchPotion("Base", quantity=1)
assert collection["Moonglow"] == HatchPotion("Moonglow", quantity=42)
assert collection["Sunset"] == HatchPotion("Sunset", quantity=0)
def test_values_ok(self):
potion1, quantity1 = "Dessert", 10
potion2, quantity2 = "MossyStone", 1
potion3, quantity3 = "StainedGlass", 2
collection = HatchPotionCollection({
potion1: quantity1, potion2: quantity2, potion3: quantity3
})
generator = collection.values()
assert next(generator) == HatchPotion(potion1, quantity=quantity1)
assert next(generator) == HatchPotion(potion2, quantity=quantity2)
assert next(generator) == HatchPotion(potion3, quantity=quantity3)
with pytest.raises(StopIteration):
_ = next(generator)
def test_values_as_list_ok(self):
potion1, quantity1 = "Golden", 1
potion2, quantity2 = "Sunshine", 41
potion3, quantity3 = "Vampire", 3
collection = HatchPotionCollection({
potion1: quantity1, potion2: quantity2, potion3: quantity3
})
result: List[HatchPotion] = list(collection.values())
expected: List[HatchPotion] = [
HatchPotion(potion1, quantity=quantity1),
HatchPotion(potion2, quantity=quantity2),
HatchPotion(potion3, quantity=quantity3)
]
assert result == expected
def test_remove_hatch_potion_ok(self):
potion1_quantity = 3
potion2_quantity = 42
potion3_name, potion3_quantity = "Sunset", 1
collection = HatchPotionCollection({
"Base": potion1_quantity,
"Moonglow": potion2_quantity,
potion3_name: potion3_quantity
})
collection.remove_hatch_potion(HatchPotion("Base"))
collection.remove_hatch_potion(HatchPotion("Moonglow"))
collection.remove_hatch_potion(HatchPotion(potion3_name))
assert collection["Base"] == HatchPotion("Base",
quantity=potion1_quantity - 1)
assert collection["Moonglow"] == HatchPotion("Moonglow",
quantity=potion2_quantity - 1)
assert collection[potion3_name] == HatchPotion(potion3_name,
quantity=potion3_quantity - 1)
def test_remove_hatch_potion_not_available_faile(self):
collection = HatchPotionCollection({"Base": 1})
not_found_potion_name = "Moonglow"
with pytest.raises(HatchPotionException) as exec_info:
collection.remove_hatch_potion(HatchPotion(not_found_potion_name))
expected_msg = f"{not_found_potion_name} was not in the collection "
assert str(exec_info.value).startswith(expected_msg)
| 1.460938 | 1 |
Badger/DataLoader/AMGA/readAttributes.py | zhangxt-ihep/IHEPDIRAC | 0 | 12796640 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# author: linlei
#for data/all name of file like run_0023454_All_file014_SFO-2.dst
#for data/skim & mc, we use new file naming rule,
#file name like resonance_eventType_streamId_runL_runH_*.dst
import os
import os.path
import ROOT
from ROOT import gROOT
from amga import mdclient,mdinterface
import string
import re
import time
#get number behiend string "exp"
def getNum(expNum):
format = re.compile(r"\d+")
res = format.search(expNum)
if res is not None:
return res.group()
#Get expNum and resonance from ExpSearch according runids
def getExpRes(runids):
entries = []
expRes = {}
expNumList = []
resList = []
#print"runids",runids
client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r')
#client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root')
#get all entries under catalog "/BES3/ExpSearch"
client.listEntries('/BES3_test/ExpSearch')
entry = client.getEntry()[0]
while entry:
entries.append(entry)
entry = client.getEntry()[0]
if entries is None:
print "ExpSearch directory is empty, please run createBesDir first"
return Flase
for item in entries:
#for each entry,get its attributes in amga
client.getattr(item,['Id','runFrm','runTo','expNum','resonance'])
result = client.getEntry()[1]
# print item
# print result
runfrm = string.atoi(result[1])
runto = string.atoi(result[2])
for runid in runids:
#check all runid whether between runfrm and runto of each entry
#under catalog "/BES3/ExpSearch"
if runfrm<=runid<=runto:
#if this runid between runfrm and runto,and expNum isn't in expNumList
#add this expNum to expNumList
if result[3] not in expNumList:
expNumList.append(result[3])
#resonance of this id isn't in resonance List,add it to resList
if result[4] not in resList:
resList.append(result[4])
#only including one resonance
if len(resList) == 1:
expRes["resonance"] = resList[0]
else:
#has several resonances,may be has something wrong to this file
print "serveral resonance:",resList
return False
#only including one expNum
if len(expNumList) == 1:
expRes["expNum"] = expNumList[0]
else:
#if including several expNums,combine these expNum into mexpN1pN2p...
sorted(expNumList)
str = "m" + expNumList[0]
for expNum in expNumList[1:]:
str = str + "p+" + getNum(expNum)
expRes["expNum"] = str
return expRes
#check whether eventType is stored in eventTypeList in amga
def eventTypeCheck(eventType):
entries = []
client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r')
#client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root')
client.listEntries('/BES3_test/EventTypeList')
entry = client.getEntry()[0]
while entry:
entries.append(entry)
entry = client.getEntry()[0]
for entry in entries:
#get name of each entry
client.getattr(entry,['FILE'])
result = client.getEntry()[1]
#compare eventType with name of each entry
if eventType == result[0]:
return True
return False
#judge format of file
class JudgeFormat(Exception):
def __init__(self, format):
self.format = format
def __str__(self):
return repr("the File's format is not ",self.format)
#type of srcformat is list,it includes many formats
def checkFormat(srcformat,file):
flag = 0
#print "file",file
for format in srcformat:
#if format of file is in srcformat
if file.endswith(format):
flag = 1
return flag
#Before reading information from .root file,we need to use changeFormat
#function to create a .root link for .dst file
def changeFormat(dstfile,rootfile,srcformat=[".dst",".tag"],destformat=[".root"]):
flag = checkFormat(srcformat,dstfile)
if flag==0:
raise JudgeFormat(srcformat)
return
flag = checkFormat(destformat,rootfile)
if flag==0:
raise JudgeFormat(destformat)
return
#if this rootfile has exists,then delete it
if os.path.exists(rootfile):
os.unlink(rootfile)
#create a new rootfile for dstfile
os.symlink(dstfile,rootfile)
return rootfile
#dstfile like /bes3fs/offline/data/655-1/4040/dst/110504/run_0023474_All_file007_SFO-2.dst,
#return run_0023474_All_file007_SFO-2
def getLFN(dstfile,format=[".dst",".tag"]):
flag = checkFormat(format,dstfile)
if flag==0:
raise JudgeFormat(format)
return
#split dstfile by "/",then get "lfn.dst"
items=dstfile.split("/")
length=len(items)
filename=items[length-1]
#split "*.dst" by "."
#get lfn
lfn = filename.split('.')[0]
return lfn
#get size of dst file
def getFileSize(dstfile,format = [".dst",".tag"]):
flag = checkFormat(format,dstfile)
if flag==0:
raise JudgeFormat(format)
return
if os.path.exists(dstfile):
#get file's size
return os.path.getsize(dstfile)
#lfn like resonance_eventType_streamId_runL_runH_*,get attributes:resonance,eventType,streamId,runL,runH
#lfn like run_0009947_All_file001_SFO-1,get attribute runId
def splitLFN(lfn,type):
result = {}
items = lfn.split("_")
if type == "all":
if items[2] == "All":
runId = string.atoi(items[1])
return runId
else:
result["resonance"] = items[0]
result["eventType"] = items[1]
result["streamId"] = items[2]
result["runL"] = string.atoi(items[3])
result["runH"] = string.atoi(items[4])
return result
#get runIdList from JobOptions
def getRunIdList(jobOptions):
result = {}
runIdList = []
str1=jobOptions[0]
pat = re.compile(r'RunIdList= {-\d+(,-?\d+)+}')
res1 = pat.search(str1)
if res1 is not None:
#get a string like:RunIdList={-10513,0,-10629}
str2 = res1.group()
result["description"] = str2
pat = re.compile(r'-\d+(,-?\d+)+')
list = pat.search(str2)
if list is not None:
#get a string like:-10513,0,-10629
runIds = list.group()
#split runIds according ','
items=runIds.split(',')
#members' style in items is string,we need to change their style to integer
for i in items:
if i!='0':
runid=abs(string.atoi(i))
runIdList.append(runid)
result["runIdList"] = runIdList
return result
#get Boss version, runid, Entry number, JobOptions from root file
def getCommonInfo(rootfile):
commoninfo = {}
gROOT.ProcessLine('gSystem->Load("libRootEventData.so");')
gROOT.ProcessLine('TFile file("%s");'%rootfile)
gROOT.ProcessLine('TTree* tree =(TTree*)file.Get("JobInfoTree");')
gROOT.ProcessLine('TTree* tree1 =(TTree*)file.Get("Event");')
gROOT.ProcessLine('TBranch* branch =(TBranch*)tree->GetBranch("JobInfo");')
gROOT.ProcessLine('TBranch* branch1 =(TBranch*)tree1->GetBranch("TEvtHeader");')
gROOT.ProcessLine('TJobInfo* jobInfo = new TJobInfo();')
gROOT.ProcessLine('TEvtHeader* evtHeader = new TEvtHeader();')
gROOT.ProcessLine('branch->SetAddress(&jobInfo);')
gROOT.ProcessLine('branch1->SetAddress(&evtHeader);')
gROOT.ProcessLine('branch->GetEntry(0);')
gROOT.ProcessLine('branch1->GetEntry(0);')
gROOT.ProcessLine('Int_t num=tree1.GetEntries()')
#get Boss Version
commoninfo["bossVer"] = ROOT.jobInfo.getBossVer()
#get RunId
commoninfo["runId"] = abs(ROOT.evtHeader.getRunId())
#get all entries
commoninfo["eventNum"] = ROOT.num
#get TotEvtNo
#commoninfo["TotEvtNo"] = list(i for i in ROOT.jobInfo.getTotEvtNo())
#get JobOption
commoninfo["jobOptions"] = list(i for i in ROOT.jobInfo.getJobOptions())
#set DataType
commoninfo["dataType"]='dst'
return commoninfo
#get bossVer,eventNum,dataType,fileSize,name,eventType,expNum,
#resonance,runH,runL,status,streamId,description
class DataAll(object):
def __init__(self,dstfile,rootfile):
self.dstfile = dstfile
self.rootfile = rootfile
def getAttributes(self):
#store all attributes
attributes = {}
expRes = {}
runIds = []
#change the .dst file to .root file
rootfile = changeFormat(self.dstfile,self.rootfile)
if getFileSize(self.dstfile)<5000:
print "Content of this file is null:",self.dstfile
return "error"
else:
attributes = getCommonInfo(rootfile)
#get filesize by calling getFileSize function
#get name by calling getLFN function
attributes["fileSize"] = getFileSize(self.dstfile)
attributes["LFN"] = getLFN(self.dstfile)
#for .dst files of Data/All,their EventType are "all"
attributes["eventType"] = "all"
#get runId from filename
runId = splitLFN(attributes["LFN"],"all")
#compare runid of rootfile with runid in filename
if attributes["runId"] == runId:
runIds.append(attributes["runId"])
#get expNum and Resonance by calling getExpRes(runIds)
expRes = getExpRes(runIds)
if expRes == False:
print "Can't get expNum and resonance of this file"
return "error"
attributes["expNum"] = expRes["expNum"]
attributes["resonance"] = expRes["resonance"]
#set RunH=RunId and RunL=RunId
attributes["runH"] = attributes["runId"]
attributes["runL"] = attributes["runId"]
else:
print "runId of %s,in filename is %d,in rootfile is %d"%(self.dstfile,lfnInfo["runId"],attributes["runId"])
return "error"
#set values of attribute status,streamId,Description
#and these values are null
#-1 <=> value of status is null
#-1 <=> value of streamId is null
#null <=> value of Description is null
attributes["status"] = -1
attributes["streamId"] = 'stream0'
attributes["description"] = 'null'
del attributes["runId"]
del attributes["jobOptions"]
return attributes
#get resonance,runL,runH,eventType,streamId,LFN from file name
#file name like resonance_eventType_streamId_runL_runH_*.dst
#get bossVer,runL,runH,eventNum by reading information from rootfile
class Others(object):
def __init__(self,dstfile,rootfile):
self.dstfile = dstfile
self.rootfile = rootfile
def getAttributes(self):
#store all attributes
attributes = {}
expRes = {}
lfnInfo = {}
runIds = []
#change the .dst file to .root file
rootfile = changeFormat(self.dstfile,self.rootfile)
if getFileSize(self.dstfile)<5000:
print "Content of this file is null:",self.dstfile
return "error"
else:
attributes = getCommonInfo(rootfile)
#get filesize by calling getFileSize function
#get lfn by calling getLFN function
attributes["fileSize"] = getFileSize(self.dstfile)
attributes["LFN"] = getLFN(self.dstfile)
#get resonance,eventType,streamId,runL,runH in filename by calling splitLFN function
lfnInfo = splitLFN(attributes["LFN"],"others")
#if runL is equal to runH,this file only has one runId
if lfnInfo["runL"] == lfnInfo["runH"]:
#if runId in filename also is equal to runId in rootfile
if attributes["runId"] == lfnInfo["runL"]:
runIds.append(attributes["runId"])
attributes["runL"] = attributes["runId"]
attributes["runH"] = attributes["runId"]
#get expNum and Resonance by calling getExpRes()
expRes = getExpRes(runIds)
if expRes == False:
print "Can't get expNum and resonance of this file"
return "error"
attributes["expNum"] = expRes["expNum"]
attributes["description"] = "null"
#if resonance in filename is same as resonance that get from ExpSearch
if expRes["resonance"] == lfnInfo["resonance"]:
attributes["resonance"] = expRes["resonance"]
else:
print "Error %s:resonance in filename is %s,in ExpSearch is %s"%(self.dstfile,lfnInfo["resonance"],expRes["resonance"])
return "error"
else:
print "Error %s:in the filename,runL = runH = %d,but runId in the root file is %d"%(self.dstfile,lfnInfo["runL"],attributes["runId"])
return "error"
else:
#this dst file has several runIds,get them from JobOptions by calling getRunIdList function
result = getRunIdList(attributes["jobOptions"])
if result is not None:
runH = max(result["runIdList"])
runL = min(result["runIdList"])
if runL == lfnInfo["runL"]:
if runH == lfnInfo["runH"]:
attributes["runL"] = lfnInfo["runL"]
attributes["runH"] = lfnInfo["runH"]
#get expNum and Resonance by calling getExpRes(runid)
expRes = getExpRes(result["runIdList"])
if expRes == False:
print "Error:",this.dstfile
return "error"
attributes["expNum"] = expRes["expNum"]
attributes["description"] = result["description"]
if expRes["resonance"] == lfnInfo["resonance"]:
attributes["resonance"] = lfnInfo["resonance"]
else:
print "Error %s:resonance in filename is %s,in ExpSearch is %s"%(self.dstfile,lfnInfo["resonance"],expRes["resonance"])
return "error"
else:
print "Error %s:runH in filename is %d,in jobOptions is %d"%(self.dstfile,lfnInfo["runH"],runH)
return "error"
else:
print "Error %s:runL in filename is %d,in jobOptions is %d"%(self.dstfile,lfnInfo["runL"],runL)
return "error"
#get streamId from filename
attributes["streamId"] = lfnInfo["streamId"]
#check eventType in filename
evtType_exists = eventTypeCheck(lfnInfo["eventType"])
if evtType_exists == True:
attributes["eventType"] = lfnInfo["eventType"]
else:
print "Error %s:eventType %s in filename is not stored in AMGA"%(self.dstfile,lfnInfo["eventType"])
return "error"
#set values of attribute status
#-1 <=> value of status is null
attributes["status"] = -1
del attributes["runId"]
del attributes["jobOptions"]
return attributes
if __name__=="__main__":
import time
start=time.time()
obj = DataAll("/bes3fs/offline/data/661-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst","/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_661.root")
end = time.time()
print "661:",str(start - end)
start = time.time()
obj = DataAll("/bes3fs/offline/data/655-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst","/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_655.root")
end = time.time()
print "655:",str(start - end)
| 1.375 | 1 |
apis_configs/config_helper.py | yewu/icdc-demo | 2 | 12796648 | import json
import os
# make it easy to change this for testing
XDG_DATA_HOME=os.getenv('XDG_DATA_HOME','/usr/share/')
def default_search_folders(app_name):
'''
Return the list of folders to search for configuration files
'''
return [
'%s/cdis/%s' % (XDG_DATA_HOME, app_name),
'/usr/share/cdis/%s' % app_name,
'/var/www/%s' % app_name
]
def find_paths(file_name,app_name,search_folders=None):
'''
Search the given folders for file_name
search_folders defaults to default_search_folders if not specified
return the first path to file_name found
'''
search_folders = search_folders or default_search_folders(app_name)
possible_files = [ os.path.join(folder, file_name) for folder in search_folders ]
return [ path for path in possible_files if os.path.exists(path) ]
def load_json(file_name,app_name,search_folders=None):
'''
json.load(file_name) after finding file_name in search_folders
return the loaded json data or None if file not found
'''
actual_files = find_paths(file_name, app_name, search_folders)
if not actual_files:
return None
with open(actual_files[0], 'r') as reader:
return json.load(reader)
| 1.554688 | 2 |
MC_Event_Generator_with_Vegas/event_output.py | GuojinTseng/MY_MC_Generator | 0 | 12796656 | #==========================================================#
# Process: e+e- -> Z/gamma -> mu+mu-
# Author: <NAME>
# Date: 2018.7.16
# Version: 1.0
#==========================================================#
class Event_Output(object):
def output(self, i, p1, p2, p3, p4):
with open("event.txt","a") as events:
events.write("===================="+"event "+str(i)+"====================")
events.write("\n")
events.write("pem: "+str(p1))
events.write("\n")
events.write("pep: "+str(p2))
events.write("\n")
events.write("pmm: "+str(p3))
events.write("\n")
events.write("pmp: "+str(p4))
events.write("\n")
events.write("\n")
| 1.679688 | 2 |
neb/util.py | cstein/neb | 20 | 12796664 | import numpy
""" Utility variables and functions
"""
aa2au = 1.8897261249935897 # bohr / AA
# converts nuclear charge to atom label
Z2LABEL = {
1: 'H', 2: 'He',
3: 'Li', 4: 'Be', 5: 'B', 6: 'C', 7: 'N', 8: 'O', 9: 'F', 10: 'Ne',
11: 'NA', 12: 'Mg', 13: 'Al', 14: 'Si', 15: 'P', 16: 'S', 17: 'Cl', 18: 'Ar'
}
# converts an atomic label to a nuclear charge
LABEL2Z = {}
for key in Z2LABEL:
LABEL2Z[Z2LABEL[key]] = key
# masses from UIPAC: http://www.chem.qmul.ac.uk/iupac/AtWt/
MASSES = {0: 0.00,
1: 1.00784, 2: 4.002602,
3: 6.938, 4: 9.01218, 5: 10.806, 6: 12.0096, 7: 14.00643, 8: 15.99903, 9: 18.998403, 10: 20.1797,
11: 22.9898, 12: 24.304, 13: 26.9815, 14: 28.084, 15: 30.973, 16: 32.059, 17: 35.446, 18: 39.948
}
# <NAME>al radii from Alvarez (2013), DOI: 2013/dt/c3dt50599e
# all values in Angstrom
VDWRADII = {0: 0.00,
1: 1.20, 2: 1.43,
3: 2.12, 4: 1.98, 5: 1.91, 6: 1.77, 7: 1.66, 8: 1.50, 9: 1.46, 10: 1.58,
11: 2.50, 12: 2.51, 13: 2.25, 14: 2.19, 15: 1.90, 16: 1.89, 17: 1.82, 18: 1.83
}
# Covalent radii from Pykko and Atsumi (2009), DOI: 0.1002/chem.200800987
# all values in Angstrom
COVALENTRADII = {0: 0.00,
1: 0.32, 2: 0.46,
3: 1.33, 4: 1.02, 5: 0.85, 6: 0.75, 7: 0.71, 8: 0.63, 9: 0.64, 10: 0.67,
11: 1.55, 12: 1.39, 13: 1.26, 14: 1.16, 15: 1.11, 16: 1.03, 17: 0.99, 18: 0.96
}
# Coordination numbers from Pykko and Atsumi (2009), DOI: 0.1002/chem.200800987
COORDINATION = {0: 0,
1: 1, 2: 1,
3: 1, 4: 2, 5: 3, 6: 4, 7: 3, 8: 2, 9: 1, 10: 1,
11: 1, 12: 2, 13: 3, 14: 4, 15: 3, 16: 2, 17: 1, 18: 1
}
def idamax(a):
""" Returns the index of maximum absolute value (positive or negative)
in the input array a.
Note: Loosely based of a subroutine in GAMESS with the same name
Arguments:
a -- a numpy array where we are to find the maximum
value in (either positive or negative)
Returns:
the index in the array where the maximum value is.
"""
idx = -1
v = 0.0
for i, value in enumerate(numpy.abs(a)):
if value > v:
idx = i
v = value
return idx
def idamin(a):
""" Returns the index of minimum absolute value (positive or negative)
in the input array a.
Arguments:
a -- a numpy array where we are to find the minimum
value in (either positive or negative)
Returns:
the index in the array where the maximum value is.
"""
idx = -1
v = 1.0e30
for i, value in enumerate(numpy.abs(a)):
if value < v:
idx = i
v = value
return idx
| 1.921875 | 2 |
LC3-Longest Substring Without Repeating Characters.py | karthyvenky/LeetCode-Challenges | 0 | 12796672 | <filename>LC3-Longest Substring Without Repeating Characters.py
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
#Leetcode 3 - Longest substring without repeating characters
st = en = poi = 0
substr = temp = ''
maxlen = 0
for i in range(len(s)):
if s[i] not in temp:
temp += s[i]
else:
if maxlen < len(temp):
maxlen = len(temp)
substr = temp
st = poi
en = i - 1
while s[i] in temp:
temp = temp[1:]
poi = poi + i
temp += s[i]
if maxlen < len(temp):
maxlen = len(temp)
substr = temp
#print(f"Longest substring is {substr} and length is {maxlen} and from {st} to {en}")
return(maxlen) | 2.515625 | 3 |
makeIGVToolsSortScript.py | imk1/MethylationQTLCode | 0 | 12796680 | <reponame>imk1/MethylationQTLCode
def makeIGVToolsSortScript(bismarkFileNameListFileName, suffix, scriptFileName, codePath):
# Make a script that will use gatk to convert Hapmap files to VCF files
bismarkFileNameListFile = open(bismarkFileNameListFileName)
scriptFile = open(scriptFileName, 'w+')
for line in bismarkFileNameListFile:
# Iterate through the chromosomes and write a line in the script for each for each population
bismarkFileName = line.strip()
bismarkFileNameElements = bismarkFileName.split(".")
fileTypeLength = len(bismarkFileNameElements[-1])
outputFileName = bismarkFileName[0:len(bismarkFileName) - fileTypeLength] + suffix
scriptFile.write("java -Xmx4g -jar " + codePath + "/" + "igvtools.jar sort " + bismarkFileName + " " + outputFileName + "\n")
bismarkFileNameListFile.close()
scriptFile.close()
if __name__=="__main__":
import sys
bismarkFileNameListFileName = sys.argv[1]
suffix = sys.argv[2]
scriptFileName = sys.argv[3]
codePath = sys.argv[4] # Should not contain a / at the end
makeIGVToolsSortScript(bismarkFileNameListFileName, suffix, scriptFileName, codePath)
| 1.546875 | 2 |
hackerrank/data-structures/2d-array.py | Ashindustry007/competitive-programming | 506 | 12796688 | #!/usr/bin/env python3
# https://www.hackerrank.com/challenges/2d-array
a=[0]*6
for i in range(6): a[i]=[int(x) for x in input().split()]
c=-9*9
for i in range(1,5):
for j in range(1,5):
c=max(c,a[i-1][j-1]+a[i-1][j]+a[i-1][j+1]+a[i][j]+a[i+1][j-1]+a[i+1][j]+a[i+1][j+1])
print(c)
| 2.109375 | 2 |
config.py | bert386/rpi-flask-bluez-controller | 0 | 12796696 | # -*- coding: utf-8 -*-
""" Singleton class to manage configuration
Description:
Todo:
"""
import json
import os
import sys
import logging
import constant
class Config(object):
# Here will be the instance stored.
__instance = None
@classmethod
def getInstance(cls):
""" Static access method. """
if Config.__instance == None:
raise Exception("Any configuration is not initialized yet!")
return Config.__instance
def __init__(self, url):
""" Virtually private constructor. """
if Config.__instance != None:
raise Exception("This class is a singleton!")
else:
self.config = dict()
self.load(url)
self._url = url
Config.__instance = self
def load(self, url):
try:
self.config = json.load(open(url))
self.config["version"] = constant.APPVERSION
logging.info(self.config)
except Exception as error:
logging.error(error, exc_info=True)
return self.config
def store(self):
try:
with open(self._url, "w") as outfile:
json.dump(self.config, outfile, indent=4)
except Exception as error:
logging.error(error, exc_info=True)
| 2.375 | 2 |
tests/test_sdf_gradient_field_wrt_twist.py | Algomorph/LevelSetFusion-Python | 8 | 12796704 | <gh_stars>1-10
# import unittest
from unittest import TestCase
import numpy as np
from rigid_opt.sdf_gradient_field import calculate_gradient_wrt_twist
from math_utils.transformation import twist_vector_to_matrix2d
def sdf_gradient_wrt_to_twist(live_field, y_field, x_field, twist_vector, offset, voxel_size):
sdf_gradient_wrt_to_voxel = np.zeros((1, 2))
if y_field - 1 < 0:
post_sdf = live_field[y_field + 1, x_field]
if post_sdf < -1:
sdf_gradient_wrt_to_voxel[0, 1] = 0
else:
sdf_gradient_wrt_to_voxel[0, 1] = post_sdf - live_field[y_field, x_field]
elif y_field + 1 > live_field.shape[0] - 1:
pre_sdf = live_field[y_field - 1, x_field]
if pre_sdf < -1:
sdf_gradient_wrt_to_voxel[0, 1] = 0
else:
sdf_gradient_wrt_to_voxel[0, 1] = live_field[y_field, x_field] - pre_sdf
else:
pre_sdf = live_field[y_field - 1, x_field]
post_sdf = live_field[y_field + 1, x_field]
if (post_sdf < -1) or (pre_sdf < -1):
sdf_gradient_wrt_to_voxel[0, 1] = 0
else:
sdf_gradient_wrt_to_voxel[0, 1] = (post_sdf - pre_sdf) / 2
if x_field - 1 < 0:
post_sdf = live_field[y_field, x_field + 1]
if post_sdf < -1:
sdf_gradient_wrt_to_voxel[0, 0] = 0
else:
sdf_gradient_wrt_to_voxel[0, 0] = post_sdf - live_field[y_field, x_field]
elif x_field + 1 > live_field.shape[1] - 1:
pre_sdf = live_field[y_field, x_field - 1]
if pre_sdf < -1:
sdf_gradient_wrt_to_voxel[0, 0] = 0
else:
sdf_gradient_wrt_to_voxel[0, 0] = live_field[y_field, x_field] - pre_sdf
else:
pre_sdf = live_field[y_field, x_field - 1]
post_sdf = live_field[y_field, x_field + 1]
if (post_sdf < -1) or (pre_sdf < -1):
sdf_gradient_wrt_to_voxel[0, 0] = 0
else:
sdf_gradient_wrt_to_voxel[0, 0] = (post_sdf - pre_sdf) / 2
x_voxel = (x_field + offset[0])*voxel_size
z_voxel = (y_field + offset[2])*voxel_size
point = np.array([[x_voxel, z_voxel, 1.]], dtype=np.float32).T
twist_matrix_homo_inv = twist_vector_to_matrix2d(-twist_vector)
trans = np.dot(twist_matrix_homo_inv, point)
voxel_gradient_wrt_to_twist = np.array([[1, 0, trans[1]],
[0, 1, -trans[0]]])
return np.dot(sdf_gradient_wrt_to_voxel/voxel_size, voxel_gradient_wrt_to_twist).reshape((1, -1))
class MyTestCase(TestCase):
def test_sdf_gradient_wrt_twist01(self):
live_field = np.array([[1, 0, -1],
[1, 0, -1],
[1, 0, -1]])
twist_vector = np.array([[0.],
[0.],
[0.]])
offset = np.array([-1, -1, 1])
voxel_size = 1
gradient_field = calculate_gradient_wrt_twist(live_field,
twist_vector,
array_offset=offset,
voxel_size=voxel_size)
expected_gradient_field = np.zeros((live_field.shape[0], live_field.shape[1], 3), dtype=np.float32)
for y_field in range(live_field.shape[0]):
for x_field in range(live_field.shape[1]):
expected_gradient_field[y_field, x_field] = sdf_gradient_wrt_to_twist(live_field, y_field, x_field,
twist_vector, offset, voxel_size)
self.assertTrue(np.allclose(expected_gradient_field, gradient_field))
def test_sdf_gradient_wrt_twist02(self):
live_field = np.array([[1, 1, 1],
[0, 0, 0],
[-1, -1, -1]])
twist_vector = np.array([[0.],
[0.],
[0.]])
offset = np.array([-1, -1, 1])
voxel_size = 1
gradient_field = calculate_gradient_wrt_twist(live_field,
twist_vector,
array_offset=offset,
voxel_size=voxel_size)
expected_gradient_field = np.zeros((live_field.shape[0], live_field.shape[1], 3), dtype=np.float32)
for y_field in range(live_field.shape[0]):
for x_field in range(live_field.shape[1]):
expected_gradient_field[y_field, x_field] = sdf_gradient_wrt_to_twist(live_field, y_field, x_field,
twist_vector, offset, voxel_size)
self.assertTrue(np.allclose(expected_gradient_field, gradient_field))
def test_sdf_gradient_wrt_twist03(self):
live_field = np.array([[1, 1, 1],
[0, 0, 0],
[-1, -1, -1]])
twist_vector = np.array([[0.],
[0.],
[0.]])
offset = np.array([-1, -1, 1])
voxel_size = 2
gradient_field = calculate_gradient_wrt_twist(live_field,
twist_vector,
array_offset=offset,
voxel_size=voxel_size)
expected_gradient_field = np.zeros((live_field.shape[0], live_field.shape[1], 3), dtype=np.float32)
for y_field in range(live_field.shape[0]):
for x_field in range(live_field.shape[1]):
expected_gradient_field[y_field, x_field] = sdf_gradient_wrt_to_twist(live_field, y_field, x_field,
twist_vector, offset, voxel_size)
self.assertTrue(np.allclose(expected_gradient_field, gradient_field))
def test_sdf_gradient_wrt_twist04(self):
live_field = np.array([[1, 0, -1],
[1, 0, -1],
[1, 0, -1]])
twist_vector = np.array([[0.],
[1.],
[0.]])
offset = np.array([-1, -1, 1])
voxel_size = 1
gradient_field = calculate_gradient_wrt_twist(live_field,
twist_vector,
array_offset=offset,
voxel_size=voxel_size)
expected_gradient_field = np.zeros((live_field.shape[0], live_field.shape[1], 3), dtype=np.float32)
for y_field in range(live_field.shape[0]):
for x_field in range(live_field.shape[1]):
expected_gradient_field[y_field, x_field] = sdf_gradient_wrt_to_twist(live_field, y_field, x_field,
twist_vector, offset, voxel_size)
self.assertTrue(np.allclose(expected_gradient_field, gradient_field))
def test_sdf_gradient_wrt_twist05(self):
live_field = np.array([[1, 0, -1],
[1, 0, -1],
[1, 0, -1]])
twist_vector = np.array([[0.],
[-1.],
[0.]])
offset = np.array([-1, -1, 1])
voxel_size = 1
gradient_field = calculate_gradient_wrt_twist(live_field,
twist_vector,
array_offset=offset,
voxel_size=voxel_size)
expected_gradient_field = np.zeros((live_field.shape[0], live_field.shape[1], 3), dtype=np.float32)
for y_field in range(live_field.shape[0]):
for x_field in range(live_field.shape[1]):
expected_gradient_field[y_field, x_field] = sdf_gradient_wrt_to_twist(live_field, y_field, x_field,
twist_vector, offset, voxel_size)
self.assertTrue(np.allclose(expected_gradient_field, gradient_field))
def test_sdf_gradient_wrt_twist06(self):
live_field = np.array([[1, 0, -1],
[1, 0, -1],
[1, 0, -1]])
twist_vector = np.array([[0.],
[0.],
[.5]])
offset = np.array([-1, -1, 1])
voxel_size = 0.5
gradient_field = calculate_gradient_wrt_twist(live_field,
twist_vector,
array_offset=offset,
voxel_size=voxel_size)
expected_gradient_field = np.zeros((live_field.shape[0], live_field.shape[1], 3), dtype=np.float32)
for y_field in range(live_field.shape[0]):
for x_field in range(live_field.shape[1]):
expected_gradient_field[y_field, x_field] = sdf_gradient_wrt_to_twist(live_field, y_field, x_field,
twist_vector, offset, voxel_size)
self.assertTrue(np.allclose(expected_gradient_field, gradient_field))
| 1.867188 | 2 |
apps/combineCSVElectrochem.py | ryanpdwyer/pchem | 0 | 12796712 |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
import streamlit as st
import io
import base64
from util import process_file
def limit_x_values(data, x_column, settings):
st.markdown("### Limit x Range")
x_min = st.number_input("Choose minimum x:", value=min([min(df[x_column].values) for df in data]))
x_max = st.number_input("Choose maximum x:", value=max([max(df[x_column].values) for df in data]))
settings['x_min'] = x_min
settings['x_max'] = x_max
data_out = []
for df in data:
mask = (df[x_column].values > x_min) * (df[x_column].values < x_max)
data_out.append(df[mask])
return data_out, settings
scales = {'A': 1, 'mA': 1e3, 'µA': 1e6}
def scale_current(data, y_column, settings):
st.markdown("### Scale Current")
scale = st.selectbox("Scale:", list(scales.keys()), index=1)
settings['y_scale'] = scale
data_out = []
for df in data:
df2 = df.copy()
df2[y_column] = df2[y_column] * scales[scale]
data_out.append(df2)
return data_out, settings
# def process_data(data, y_column, settings):
# st.markdown("### Rescale y-axis")
# st.selectbox("Choose y-axis scale:", value=[0, 3, 6, 9], format_func=
def run():
df = None
cols = None
x_column = y_column = None
combined_data = None
processing="None"
if 'ever_submitted' not in st.session_state:
st.session_state.ever_submitted = False
settings = {"processing": "None"}
st.markdown("""## Combine CSV Electrochemistry files
This helper will combine multiple CSV files (or Excel spreadsheets)
for easy plotting.
""")
files = st.file_uploader("Upload CSV or Excel Files",
accept_multiple_files=True)
if files:
st.write(files)
filenames = [(i, f.name) for i, f in enumerate(files)]
data = [process_file(f) for f in files]
ind_fname = st.selectbox("Choose data to display: ", filenames,
format_func=lambda x: x[1], index=0)
st.write("""## Labels
Use the boxes below to change the labels for each line that will go on the graph.
""")
labels = [st.text_input(f"{filename[0]}. {filename[1]}", value=filename[1]) for filename in filenames]
if ind_fname:
df = data[ind_fname[0]]
cols = list(df.columns)
st.write("## Choose columns")
with st.form("column_chooser_and_run"):
x_column = st.selectbox("Choose the x column: ", cols)
y_column = st.selectbox("Choose y column: ", cols, index=len(cols)-1)
submitted = st.form_submit_button()
st.session_state.ever_submitted = submitted | st.session_state.ever_submitted
use_plotly = st.checkbox("Use plotly?", value=False)
if data is not None:
data, settings = limit_x_values(data, x_column, settings)
data, settings = scale_current(data, y_column, settings)
# data, settings = normalize_data(data, x_column, settings)
# x_data = combined_data[x_column].values
# Plotting
if use_plotly:
fig = go.Figure()
else:
fig, ax = plt.subplots()
for df, fname, label in zip(data, filenames, labels):
if use_plotly:
fig.add_trace(go.Line(x=df[x_column], y=df[y_column], name=str(fname[0])+"-"+label))
else:
ax.plot(df[x_column].values, df[y_column].values, label=str(fname[0])+"-"+label)
y_label_default = f"{y_column} ({settings['y_scale']})"
st.markdown("### Plotting options")
x_label = st.text_input("x-axis label: ", value=x_column)
y_label = st.text_input('y-axis label: ', value=y_label_default)
grid = st.checkbox("Grid?", value=False)
if grid and not use_plotly:
ax.grid()
if use_plotly:
fig.update_layout(xaxis_title=x_label, yaxis_title=y_label)
st.plotly_chart(fig)
else:
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.legend()
st.pyplot(fig)
# # Saving
# st.markdown("### Output options")
# st.write(combined_data)
# filename = st.text_input("Filename:", value="data")
# write_excel(combined_data, filename)
if __name__ == "__main__":
run()
| 2.140625 | 2 |
Externals/micromegas_4.3.5/Packages/smodels-v1.1.0patch1/smodels/tools/externalNllFast.py | yuanfangtardis/vscode_project | 0 | 12796720 | <gh_stars>0
#!/usr/bin/env python
"""
.. module:: externalNllFast
:synopsis: Wrapper for all nllfast versions.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
from __future__ import print_function
try:
import commands as executor
except ImportError:
import subprocess as executor
import os
from smodels.tools.externalTool import ExternalTool
from smodels.tools.smodelsLogging import logger
class ExternalNllFast(ExternalTool):
"""
An instance of this class represents the installation of nllfast.
"""
def __init__(self, sqrts, nllfastVersion, testParams, testCondition):
"""
:param sqrts: sqrt of s, in TeV, as an integer,
:param nllfastVersion: version of the nllfast tool
:param testParams: what are the test params we need to run things with?
:param testCondition: the line that should be the last output line when
running executable
:srcPath: the path of the source code, for compilation
"""
ExternalTool.__init__(self)
self.sqrts = int(sqrts)
self.name = "nllfast%d" % sqrts
self.nllfastVersion = nllfastVersion
path = "<install>/lib/nllfast/nllfast-"
location = path + self.nllfastVersion + "/"
self.cdPath = self.absPath(location)
self.executablePath = self.cdPath + "/nllfast_%dTeV" % self.sqrts
self.testParams = testParams
self.testCondition = testCondition
self.srcPath = self.cdPath
self.executable = ""
def compile(self):
"""
Try to compile nllfast.
"""
logger.info("Trying to compile %s", self.name)
cmd = "cd %s; make" % self.srcPath
out = executor.getoutput(cmd)
# out = subprocess.check_output ( cmd, shell=True, universal_newlines=True )
logger.info(out)
return True
def fetch(self):
"""
Fetch and unpack tarball.
"""
import urllib, tarfile
tempfile = "/tmp/nllfast7.tar.gz"
f = open(tempfile, "w")
url = "http://smodels.hephy.at/externaltools/nllfast%d.tar.gz" \
% self.sqrts
logger.info("fetching tarball from " + url)
R = urllib.urlopen(url)
l = R.readlines()
for line in l:
f.write(line)
R.close()
f.close()
tar = tarfile.open(tempfile)
for item in tar:
tar.extract(item, self.srcPath + "/")
def unlink(self, inputFile):
"""
Remove inputFile.out
"""
return
# fname = "%s/%s.out" % (self.cdPath, inputFile)
# if os.path.exists(fname):
# os.unlink(fname)
def run_(self, params):
"""
Execute nllfast7.
:params params: parameters used (e.g. gg cteq5 .... )
:returns: stdout and stderr, or error message
"""
cmd = "cd %s; %s %s" % (self.cdPath, self.executablePath, params)
out = executor.getoutput(cmd)
# out = subprocess.check_output ( cmd, shell=True, universal_newlines=True )
out = out.split("\n")
return out
def run(self, process, pdf, squarkmass, gluinomass):
"""
Execute nllfast.
:params process: which process: st, sb, gg, gdcpl, sdcpl, ss, sg, tot
:params pdf: cteq=cteq6, mstw2008
:params squarkmass: squarkmass, None if squark decoupled
:params gluinomass: gluinomass, None if gluino decoupled
:returns: stdout and stderr, or error message
"""
processes = ["st", "sb", "gg", "gdcpl", "sdcpl", "ss", "sg", "tot"]
if not process in processes:
return None
if not pdf in ["cteq", "cteq6", "mstw", "mstw2008"]:
return None
if not squarkmass:
return self.run_("%s %s %s") % (process, pdf, gluinomass)
if not gluinomass:
return self.run_("%s %s %s") % (process, pdf, squarkmass)
return self.run_("%s %s %s %s") % \
(process, pdf, squarkmass, gluinomass)
def checkInstallation(self):
"""
Checks if installation of tool is valid by looking for executable and
executing it.
"""
if not os.path.exists(self.executablePath):
logger.error("Executable '%s' not found. Maybe you didn't compile " \
"the external tools in smodels/lib?", self.executablePath)
return False
if not os.access(self.executablePath, os.X_OK):
logger.error("%s is not executable", self.executable)
return False
out = self.run_(self.testParams)
if out[-1].find(self.testCondition) == -1:
logger.error("Setup invalid: " + str(out))
return False
self.unlink("gg")
return True
class ExternalNllFast7(ExternalNllFast):
"""
An instance of this class represents the installation of nllfast 7.
"""
def __init__(self):
ExternalNllFast.__init__(self, 7, "1.2",
testParams="gg cteq 500 600",
testCondition="500. 600. 0.193E+00 "
"0.450E+00 0.497E+00")
class ExternalNllFast8(ExternalNllFast):
"""
An instance of this class represents the installation of nllfast 8.
"""
def __init__(self):
ExternalNllFast.__init__(self, 8, "2.1",
testParams="gg cteq 500 600",
testCondition="500. 600. 0.406E+00 "
"0.873E+00 0.953E+00")
class ExternalNllFast13(ExternalNllFast):
"""
An instance of this class represents the installation of nllfast 8.
"""
def __init__(self):
ExternalNllFast.__init__(self, 13, "3.1",
testParams="gg cteq 500 600",
testCondition="600. 0.394E+01 0.690E+01 "
"0.731E+01 0.394E+00" )
nllFastTools = { 7 : ExternalNllFast7(),
8 : ExternalNllFast8(),
13 : ExternalNllFast13() }
if __name__ == "__main__":
for (sqrts, tool) in nllFastTools.items():
print("%s: installed in %s" % (tool.name, tool.installDirectory()))
| 1.460938 | 1 |
iota/commands/extended/broadcast_and_store.py | EasonC13/iota.py | 347 | 12796728 | <reponame>EasonC13/iota.py<gh_stars>100-1000
from iota.commands import FilterCommand
from iota.commands.core.broadcast_transactions import \
BroadcastTransactionsCommand
from iota.commands.core.store_transactions import StoreTransactionsCommand
import asyncio
__all__ = [
'BroadcastAndStoreCommand',
]
class BroadcastAndStoreCommand(FilterCommand):
"""
Executes ``broadcastAndStore`` extended API command.
See :py:meth:`iota.api.Iota.broadcast_and_store` for more info.
"""
command = 'broadcastAndStore'
def get_request_filter(self):
pass
def get_response_filter(self):
pass
async def _execute(self, request: dict) -> dict:
# Submit the two coroutines to the already running event loop
await asyncio.gather(
BroadcastTransactionsCommand(self.adapter)(**request),
StoreTransactionsCommand(self.adapter)(**request),
)
return {
'trytes': request['trytes'],
}
| 1.125 | 1 |
sung.pw/test_shellcode32.py | rmagur1203/exploit-codes | 0 | 12796736 | from pwn import *
context.log_level = 'debug'
e = ELF('./test_shellcode32')
r = remote("sunrin.site", 9017)#process('./test_shellcode32')
context(arch='i386', os='linux')
sc = shellcraft.pushstr('/home/pwn/flag')
sc += shellcraft.open("esp", 0, 0) # fd = open("./flag", 0, 0);
sc += shellcraft.read("eax", "esp", 100) # read(fd, esp, 100);
sc += shellcraft.write(1, "esp", 100) # write(1, esp, 100);
r.sendline(asm(sc))
r.interactive() | 0.917969 | 1 |
main.py | CarlFredriksson/sentiment_classification | 0 | 12796744 | import sc_utils
import model_factory
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
INPUT_LENGTH = 100
# Prepare data
X_train, Y_train, X_test, Y_test = sc_utils.load_data()
X_train, Y_train, X_val, Y_val, X_test, Y_test, tokenizer = sc_utils.preprocess_data(X_train, Y_train, X_test, Y_test, INPUT_LENGTH)
embedding_matrix = sc_utils.create_embedding_matrix(tokenizer)
print("X_train.shape: " + str(X_train.shape))
print("Y_train.shape: " + str(Y_train.shape))
print("X_val.shape: " + str(X_val.shape))
print("Y_val.shape: " + str(Y_val.shape))
print("X_test.shape: " + str(X_test.shape))
print("Y_test.shape: " + str(Y_test.shape))
print("embedding_matrix.shape: " + str(embedding_matrix.shape))
# Create model
#model = model_factory.create_baseline_model(embedding_matrix, INPUT_LENGTH)
model = model_factory.create_rnn_model(embedding_matrix, INPUT_LENGTH)
#model = model_factory.create_bidir_rnn_model(embedding_matrix, INPUT_LENGTH)
#model = model_factory.create_train_emb_rnn_model(embedding_matrix, INPUT_LENGTH)
model.summary()
# Train model
model.fit(X_train, Y_train, batch_size=200, epochs=30)
# Evaluate model on validation set
val_loss, val_accuracy = model.evaluate(X_val, Y_val, verbose=0)
print("Accuracy on validation set: " + str(val_accuracy * 100) + "%")
# Evaluate model on test set
test_loss, test_accuracy = model.evaluate(X_test, Y_test, verbose=0)
print("Accuracy on test set: " + str(test_accuracy * 100) + "%")
# Test model on my own texts
reviews = [
"This movie is bad. I don't like it it all. It's terrible.",
"I love this movie. I've seen it many times and it's still awesome.",
"I don't think this movie is as bad as most people say. It's actually pretty good."
]
print("Testing model on my own texts:")
print(reviews)
reviews = tokenizer.texts_to_sequences(reviews)
reviews = pad_sequences(reviews, maxlen=INPUT_LENGTH, padding="post")
reviews = np.array(reviews)
pred = model.predict(reviews)
print(pred)
print("The model predicts:")
sentiment_str = "Negative" if pred[0][0] < 0.5 else "Positive"
print(sentiment_str + " on the first text")
sentiment_str = "Negative" if pred[1][0] < 0.5 else "Positive"
print(sentiment_str + " on the second text")
sentiment_str = "Negative" if pred[2][0] < 0.5 else "Positive"
print(sentiment_str + " on the third text")
| 2.0625 | 2 |
modules/parser/nodes/line_node.py | DavidMacDonald11/sea-to-c-transpiler-python-based | 0 | 12796752 | from modules.visitor.symbol_table import SymbolTable
from .ast_node import ASTNode
from .if_node import IfNode
class LineNode(ASTNode):
def __init__(self, expression, depth, no_end = False):
self.expression = expression
self.depth = depth
self.no_end = no_end
super().__init__(expression.position)
def __repr__(self):
return f"[{self.expression}]"
def interpret(self, interpreter):
return self.get_expression(interpreter)
def transpile(self, transpiler):
transpiler.depth = self.depth
expression = self.get_expression(transpiler)
indent = "\t" * self.depth
return f"{indent}{expression}{'' if self.no_end else ';'}\n"
def get_expression(self, visitor):
if isinstance(self.expression, IfNode):
visitor.symbol_table = SymbolTable(visitor.symbol_table)
expression = self.expression.visit(visitor)
visitor.symbol_table = visitor.symbol_table.parent
return expression
return self.expression.visit(visitor)
| 1.507813 | 2 |
datasets/create_tf_record.py | ace19-dev/image-retrieval-tf | 6 | 12796760 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import io
import os
import random
from PIL import Image, ImageStat
import tensorflow as tf
from datasets import dataset_utils
flags = tf.app.flags
flags.DEFINE_string('dataset_dir',
'/home/ace19/dl_data/materials',
'Root Directory to dataset.')
flags.DEFINE_string('output_path',
'/home/ace19/dl_data/materials/query.record',
'Path to output TFRecord')
flags.DEFINE_string('dataset_category',
'query',
'dataset category, train|validation|test')
FLAGS = flags.FLAGS
def get_label_map(label_to_index):
label_map = {}
# cls_lst = os.listdir(FLAGS.dataset_dir)
cls_path = os.path.join(FLAGS.dataset_dir, FLAGS.dataset_category)
cls_lst = os.listdir(cls_path)
for i, cls in enumerate(cls_lst):
data_path = os.path.join(cls_path, cls)
img_lst = os.listdir(data_path)
for n, img in enumerate(img_lst):
img_path = os.path.join(data_path, img)
label_map[img_path] = label_to_index[cls]
return label_map
def dict_to_tf_example(image_name,
dataset_directory,
label_map=None,
image_subdirectory='train'):
"""
Args:
image: a single image name
dataset_directory: Path to root directory holding PCam dataset
label_map: A map from string label names to integers ids.
image_subdirectory: String specifying subdirectory within the
PCam dataset directory holding the actual image data.
Returns:
example: The converted tf.Example.
Raises:
ValueError: if the image pointed to by image is not a valid PNG
"""
# full_path = os.path.join(dataset_directory, image_subdirectory, image_name)
full_path = os.path.join(dataset_directory, image_name)
with tf.io.gfile.GFile(full_path, 'rb') as fid:
encoded = fid.read()
encoded_io = io.BytesIO(encoded)
image = Image.open(encoded_io)
width, height = image.size
format = image.format
image_stat = ImageStat.Stat(image)
mean = image_stat.mean
std = image_stat.stddev
key = hashlib.sha256(encoded).hexdigest()
# if image_subdirectory.lower() == 'test':
# label = -1
# else:
# label = int(label_map[image_name])
label = int(label_map[full_path])
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_utils.int64_feature(height),
'image/width': dataset_utils.int64_feature(width),
'image/filename': dataset_utils.bytes_feature(image_name.encode('utf8')),
'image/fullpath': dataset_utils.bytes_feature(full_path.encode('utf8')),
'image/source_id': dataset_utils.bytes_feature(image_name.encode('utf8')),
'image/key/sha256': dataset_utils.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_utils.bytes_feature(encoded),
'image/format': dataset_utils.bytes_feature(format.encode('utf8')),
'image/class/label': dataset_utils.int64_feature(label),
# 'image/text': dataset_util.bytes_feature('label_text'.encode('utf8'))
'image/mean': dataset_utils.float_list_feature(mean),
'image/std': dataset_utils.float_list_feature(std)
}))
return example
def main(_):
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
options = tf.io.TFRecordOptions(tf.io.TFRecordCompressionType.GZIP)
writer = tf.io.TFRecordWriter(FLAGS.output_path, options=options)
# cls_lst = os.listdir(FLAGS.dataset_dir)
dataset_lst = os.path.join(FLAGS.dataset_dir, FLAGS.dataset_category)
cls_lst = os.listdir(dataset_lst)
cls_lst.sort()
label_to_index = {}
for i, cls in enumerate(cls_lst):
cls_path = os.path.join(dataset_lst, cls)
if os.path.isdir(cls_path):
label_to_index[cls] = i
label_map = get_label_map(label_to_index)
random.shuffle(cls_lst)
for i, cls in enumerate(cls_lst):
cls_path = os.path.join(dataset_lst, cls)
img_lst = os.listdir(cls_path)
total = len(img_lst)
for idx, image in enumerate(img_lst):
if idx % 100 == 0:
tf.compat.v1.logging.info('On image %d of %d', idx, total)
tf_example = dict_to_tf_example(image, cls_path, label_map, FLAGS.dataset_category)
writer.write(tf_example.SerializeToString())
writer.close()
if __name__ == '__main__':
tf.compat.v1.app.run()
| 1.710938 | 2 |
bubble_sorter/__init__.py | joshuabode/bubble-sort-python | 0 | 12796768 | from .bubble_sort import *
| 0.570313 | 1 |
stack/valid_parenthesis.py | javyxu/algorithms-python | 8 | 12796776 | """
Given a string containing just the characters
'(', ')', '{', '}', '[' and ']',
determine if the input string is valid.
The brackets must close in the correct order,
"()" and "()[]{}" are all valid but "(]" and "([)]" are not.
"""
def is_valid(s:"str")->"bool":
stack = []
dic = { ")":"(",
"}":"{",
"]":"["}
for char in s:
if char in dic.values():
stack.append(char)
elif char in dic.keys():
if stack == []:
return False
s = stack.pop()
if dic[char] != s:
return False
return stack == []
if __name__ == "__main__":
paren = "[]"
print(paren, is_valid(paren))
paren = "[]()[]"
print(paren, is_valid(paren))
paren = "[[[]]"
print(paren, is_valid(paren))
paren = "{([])}"
print(paren, is_valid(paren))
paren = "(}"
print(paren, is_valid(paren))
| 3.09375 | 3 |
languages/python3/pdf/pdfminer/main.py | jcnaud/snippet | 5 | 12796784 | <reponame>jcnaud/snippet<gh_stars>1-10
# coding: utf-8
## Source : https://lobstr.io/index.php/2018/07/30/scraping-document-pdf-python-pdfminer/
import os
from io import BytesIO
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.pdfpage import PDFPage
def pdf2txt(path):
"""
Extract text from PDF file, and return
the string contained inside
:param path (str) path to the .pdf file
:return: text (str) string extracted
"""
rsrcmgr = PDFResourceManager()
retstr = BytesIO()
device = TextConverter(rsrcmgr, retstr)
with open(path, "rb") as fp: # open in 'rb' mode to read PDF bytes
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.get_pages(fp, check_extractable=True):
interpreter.process_page(page)
device.close()
text = retstr.getvalue()
retstr.close()
return text.decode('utf-8')
def main():
print(pdf2txt('./simple1.pdf'))
if __name__ == '__main__':
main()
| 2.28125 | 2 |
scripts/sprint_report.py | AndrewDVXI/kitsune | 929 | 12796792 | <gh_stars>100-1000
#!/usr/bin/env python
import logging
import sys
import textwrap
import xmlrpc.client
USAGE = 'Usage: sprint_report.py <SPRINT>'
HEADER = 'sprint_report.py: your friendly report view of the sprint!'
# Note: Most of the bugzila api code comes from Scrumbugz.
cache = {}
log = logging.getLogger(__name__)
BZ_URL = 'http://bugzilla.mozilla.org/xmlrpc.cgi'
SESSION_COOKIES_CACHE_KEY = 'bugzilla-session-cookies'
BZ_RESOLUTIONS = ['', 'FIXED', 'INVALID', 'WONTFIX', 'DUPLICATE',
'WORKSFORME', 'DUPLICATE']
BZ_FIELDS = [
'id',
'status',
'resolution',
'summary',
'whiteboard',
'assigned_to',
'priority',
'severity',
'product',
'component',
'blocks',
'depends_on',
'creation_time',
'last_change_time',
'target_milestone',
]
UNWANTED_COMPONENT_FIELDS = [
'sort_key',
'is_active',
'default_qa_contact',
'default_assigned_to',
'description'
]
class SessionTransport(xmlrpc.client.SafeTransport):
"""
XML-RPC HTTPS transport that stores auth cookies in the cache.
"""
_session_cookies = None
@property
def session_cookies(self):
if self._session_cookies is None:
cookie = cache.get(SESSION_COOKIES_CACHE_KEY)
if cookie:
self._session_cookies = cookie
return self._session_cookies
def parse_response(self, response):
cookies = self.get_cookies(response)
if cookies:
self._session_cookies = cookies
cache.set(SESSION_COOKIES_CACHE_KEY,
self._session_cookies, 0)
log.debug('Got cookie: %s', self._session_cookies)
return xmlrpc.client.Transport.parse_response(self, response)
def send_host(self, connection, host):
cookies = self.session_cookies
if cookies:
for cookie in cookies:
connection.putheader('Cookie', cookie)
log.debug('Sent cookie: %s', cookie)
return xmlrpc.client.Transport.send_host(self, connection, host)
def get_cookies(self, response):
cookie_headers = None
if hasattr(response, 'msg'):
cookies = response.msg.getheaders('set-cookie')
if cookies:
log.debug('Full cookies: %s', cookies)
cookie_headers = [c.split(';', 1)[0] for c in cookies]
return cookie_headers
class BugzillaAPI(xmlrpc.client.ServerProxy):
def get_bug_ids(self, **kwargs):
"""Return list of ids of bugs from a search."""
kwargs.update({
'include_fields': ['id'],
})
log.debug('Searching bugs with kwargs: %s', kwargs)
bugs = self.Bug.search(kwargs)
return [bug['id'] for bug in bugs.get('bugs', [])]
def get_bugs(self, **kwargs):
get_history = kwargs.pop('history', True)
get_comments = kwargs.pop('comments', True)
kwargs.update({
'include_fields': BZ_FIELDS,
})
if 'ids' in kwargs:
kwargs['permissive'] = True
log.debug('Getting bugs with kwargs: %s', kwargs)
bugs = self.Bug.get(kwargs)
else:
if 'whiteboard' not in kwargs:
kwargs['whiteboard'] = ['u=', 'c=', 'p=']
log.debug('Searching bugs with kwargs: %s', kwargs)
bugs = self.Bug.search(kwargs)
bug_ids = [bug['id'] for bug in bugs.get('bugs', [])]
if not bug_ids:
return bugs
# mix in history and comments
history = comments = {}
if get_history:
history = self.get_history(bug_ids)
if get_comments:
comments = self.get_comments(bug_ids)
for bug in bugs['bugs']:
bug['history'] = history.get(bug['id'], [])
bug['comments'] = comments.get(bug['id'], {}).get('comments', [])
bug['comments_count'] = len(comments.get(bug['id'], {})
.get('comments', []))
return bugs
def get_history(self, bug_ids):
log.debug('Getting history for bugs: %s', bug_ids)
try:
history = self.Bug.history({'ids': bug_ids}).get('bugs')
except xmlrpc.client.Fault:
log.exception('Problem getting history for bug ids: %s', bug_ids)
return {}
return dict((h['id'], h['history']) for h in history)
def get_comments(self, bug_ids):
log.debug('Getting comments for bugs: %s', bug_ids)
try:
comments = self.Bug.comments({
'ids': bug_ids,
'include_fields': ['id', 'creator', 'time', 'text'],
}).get('bugs')
except xmlrpc.client.Fault:
log.exception('Problem getting comments for bug ids: %s', bug_ids)
return {}
return dict((int(bid), cids) for bid, cids in comments.items())
def wrap(text, indent=' '):
text = text.split('\n\n')
text = [textwrap.fill(part, expand_tabs=True, initial_indent=indent,
subsequent_indent=indent)
for part in text]
return '\n\n'.join(text)
def sprint_stats(bugs):
"""Print bugs stats block."""
# Return dict of bugs stats
#
# * total points
# * breakdown of points by component
# * breakdown of points by focus
# * breakdown of points by priority
# * other things?
def parse_whiteboard(whiteboard):
bits = {
'u': '',
'c': '',
'p': '',
's': ''
}
for part in whiteboard.split(' '):
part = part.split('=')
if len(part) != 2:
continue
if part[0] in bits:
bits[part[0]] = part[1]
return bits
def get_history(bugs, sprint):
history = []
for bug in bugs:
for item in bug.get('history', []):
for change in item.get('changes', []):
added = parse_whiteboard(change['added'])
removed = parse_whiteboard(change['removed'])
if ((change['field_name'] == 'status_whiteboard'
and removed['s'] != sprint
and added['s'] == sprint)):
history.append((
item['when'],
bug,
item['who'],
removed['s'],
added['s']
))
return history
def sprint_timeline(bugs, sprint):
"""Print timeline block."""
timeline = []
history = get_history(bugs, sprint)
# Try to associate the change that added the sprint to the
# whiteboard with a comment.
for when, bug, who, removed, added in history:
reason = 'NO COMMENT'
for comment in bug.get('comments', []):
if comment['time'] == when and comment['creator'] == who:
reason = comment['text']
break
timeline.append((
when,
bug['id'],
who,
removed,
added,
reason
))
timeline.sort(key=lambda item: item[0])
for mem in timeline:
print('%s: %s: %s' % (mem[0], mem[1], mem[2]))
print(' %s -> %s' % (mem[3] if mem[3] else 'unassigned', mem[4]))
print(wrap(mem[5]))
print('')
def print_header(text):
print(text)
print('=' * len(text))
print('')
def main(argv):
# logging.basicConfig(level=logging.DEBUG)
if not argv:
print(USAGE)
print('Error: Must specify the sprint to report on. e.g. 2012.19')
return 1
sprint = argv[0]
print(HEADER)
print('')
print('Working on %s' % sprint)
print('')
bugzilla = BugzillaAPI(
BZ_URL,
transport=SessionTransport(use_datetime=True),
allow_none=True)
bugs = bugzilla.get_bugs(
product=['support.mozilla.org'],
whiteboard=['s=' + sprint],
resolution=BZ_RESOLUTIONS,
history=True,
comments=True)
bugs = bugs['bugs']
print_header('Timeline')
sprint_timeline(bugs, sprint)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 1.640625 | 2 |
views.py | jfroejk/cartridge_quickpay | 0 | 12796800 | <reponame>jfroejk/cartridge_quickpay<gh_stars>0
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect, JsonResponse, \
HttpResponseBadRequest, HttpResponseForbidden
from django.template import loader
from django.template.response import TemplateResponse
from django.shortcuts import redirect, render
from django.utils.timezone import now
from django.views.decorators.csrf import csrf_exempt
from django.core.urlresolvers import reverse
from django.db import transaction
from mezzanine.conf import settings
from mezzanine.utils.importing import import_dotted_path
from cartridge.shop import checkout
from cartridge.shop.models import Order
from cartridge.shop.forms import OrderForm
import json
import logging
import re
from urllib.parse import urlencode
from typing import Callable, List, Optional
from .payment import get_quickpay_link, sign, sign_order, start_subscription, capture_subscription_order, \
acquirer_requires_popup, acquirer_supports_subscriptions, order_currency
from .models import QuickpayPayment, get_private_key
handler = lambda s: import_dotted_path(s) if s else lambda *args: None
billship_handler = handler(settings.SHOP_HANDLER_BILLING_SHIPPING)
tax_handler = handler(settings.SHOP_HANDLER_TAX)
order_handler = handler(settings.SHOP_HANDLER_ORDER)
order_form_class = (lambda s: import_dotted_path(s) if s else OrderForm)(getattr(settings, 'QUICKPAY_ORDER_FORM', None))
def quickpay_checkout(request: HttpRequest) -> HttpResponse:
"""Checkout using Quickpay payment form.
Use the normal cartridge.views.checkout_steps for GET and for the rest other payments steps,
use this special version for POSTing paument form for Quickpay.
Settings:
QUICKPAY_ORDER_FORM = dotted path to order form to use
QUICKPAY_FRAMED_MODE = <whether to use framed Quickpay>
QUICKPAY_SHOP_BASE_URL: str required = URL of the shop for success, cancel and callback URLs
QUICKPAY_ACQUIRER: str|list required = The acquirer(s) to use, e.g. 'clearhaus'
QUICKPAY_AUTO_CAPTURE: bool default False = Whether to auto-capture payment
urls.py setup:
from cartridge_quickpay.views import checkout_quickpay, order_form_class
...
url("^shop/checkout/", checkout_steps, {'form_class': order_form_class}),
url("^shop/checkout_quickpay/", checkout_quickpay, name="checkout_quickpay"),
url("^shop/", include("cartridge.shop.urls")),
...
** FOR FRAMED MODE: **
Change checkout.html
- <form ... onsubmit="return false">
- Change submit button to:
- <button class="btn btn-lg btn-primary pull-right" onclick="checkout_quickpay();">Go to payment</button>
- add payment modal
<div class="modal db-modal fade" id="payment_window" tabindex="-1" role="dialog" aria-labelledby="payment_window_label">
<div class="modal-dialog" role="document">
<div class="modal-content">
<div class="modal-body">
<iframe id="payment_iframe" style="width: 100%; border: none; height: 90vh;"></iframe>
</div>
</div>
</div>
</div>
- and add JS at the bottom:
<script>
function checkout_quickpay() {
$.post("{% url 'quickpay_checkout' %}", $('.checkout-form').serialize(), function(data) {
if (data.success) {
$('#payment_iframe').attr('src', data.payment_link);
$('#payment_window').modal('show');
} else {
alert("failed");
}
});
}
</script>
"""
framed: bool = getattr(settings, 'QUICKPAY_FRAMED_MODE', False)
acquirer = request.POST.get('acquirer', None)
logging.debug("quickpay_checkout: using acquirer {}".format(acquirer or '<any>'))
in_popup = acquirer_requires_popup(acquirer)
step = checkout.CHECKOUT_STEP_FIRST # Was: _LAST
checkout_errors = []
initial = checkout.initial_order_data(request, order_form_class)
logging.debug("quickpay_checkout: initial order data = {}".format(initial))
form = order_form_class(request, step, initial=initial, data=request.POST)
if form.is_valid():
logging.debug("quickpay_checkout() - Form valid")
request.session["order"] = dict(form.cleaned_data)
try:
billship_handler(request, form)
tax_handler(request, form)
except checkout.CheckoutError as e:
logging.warn("quickpay_checkout() - billship or tax handler failed")
checkout_errors.append(e)
# Create order and Quickpay payment, redirect to Quickpay/Mobilepay form
order = form.save(commit=False)
order.setup(request) # Order is saved here so it gets an ID
# Handle subscription or one-time order
if (hasattr(order, 'has_subscription')
and order.has_subscription()
and acquirer_supports_subscriptions(acquirer)):
quickpay_subs_id, quickpay_link = start_subscription(
order, order.items.all().order_by('id')[0])
logging.debug("quickpay_checkout() - starting subscription {}, payment link {}"
.format(quickpay_subs_id, quickpay_link))
else:
# One-time order OR subscription with acquirer that doesn't support subscriptions
quickpay_link: str = get_quickpay_link(order, acquirer)['url']
logging.debug("quickpay_checkout() - product purchase (or subscription w/o auto-renewal), payment link {}"
.format(quickpay_link))
# Redirect to Quickpay
if framed:
logging.debug("quickpay_checkout() - JSON response {}"
.format(str({'success': True, 'payment_link': quickpay_link})))
return JsonResponse({'success': True, 'payment_link': quickpay_link})
# Medsende om url skal åbnes i nyt vindue, åben i JS, håndtere at returside havner i iframe igen
elif in_popup:
logging.debug("quickpay_checkout() - Opening popup window")
return render(request, "cartridge_quickpay/payment_toplevel.html", {'quickpay_link': quickpay_link})
else:
logging.debug("quickpay_checkout() - Redirect response")
return HttpResponseRedirect(redirect_to=quickpay_link)
# Form invalid, go back to checkout step
step_vars = checkout.CHECKOUT_STEPS[step - 1]
template = "shop/%s.html" % step_vars["template"]
context = {"CHECKOUT_STEP_FIRST": step == checkout.CHECKOUT_STEP_FIRST,
"CHECKOUT_STEP_LAST": step == checkout.CHECKOUT_STEP_LAST,
"CHECKOUT_STEP_PAYMENT": (settings.SHOP_PAYMENT_STEP_ENABLED and
step == checkout.CHECKOUT_STEP_PAYMENT),
"step_title": step_vars["title"], "step_url": step_vars["url"],
"steps": checkout.CHECKOUT_STEPS, "step": step, "form": form,
"payment_url": "https://payment.quickpay.net/d7ad25ea15154ef4bdffb5bf78f623fc"}
page = loader.get_template(template).render(context=context, request=request)
if framed:
logging.debug("quickpay_checkout() - Form not OK, JSON response")
return JsonResponse({'success': False, 'page': page})
else:
logging.debug("quickpay_checkout() - Form not OK, page response")
return HttpResponse(page)
def escape_frame(f: Callable[[HttpRequest], HttpResponse]) -> Callable[[HttpRequest], HttpResponse]:
"""Escape iframe when payment is in a iframe and the shop itself is not"""
def f_escape(request: HttpRequest) -> HttpResponse:
if request.GET.get('framed'):
logging.debug("cartridge_quickpay.views.escape_frame: Escaping")
url = request.path
get_args = request.GET.copy()
get_args.pop('framed')
if get_args:
url += '?' + get_args.urlencode()
res = '<html><head><script>window.parent.location.replace("{}");</script></head></html>'.format(url)
return HttpResponse(res)
else:
logging.debug("cartridge_quickpay.views.escape_frame: NOT in frame")
return f(request)
f_escape.__name__ = f.__name__
return f_escape
def escape_popup(f: Callable[[HttpRequest], HttpResponse]) -> Callable[[HttpRequest], HttpResponse]:
"""Escape payment popup window"""
def f_escape(request: HttpRequest) -> HttpResponse:
if request.GET.get('popup'):
logging.debug("cartridge_quickpay.views.escape_popup: Escaping")
url = request.path
get_args = request.GET.copy()
get_args.pop('popup')
if get_args:
url += '?' + get_args.urlencode()
res = '<html><head><script>var opener = window.opener; opener.document.location = "{}"; window.close(); opener.focus();</script></head></html>'.format(url)
return HttpResponse(res)
else:
logging.debug("cartridge_quickpay.views.escape_popup: NOT in popup")
return f(request)
f_escape.__name__ = f.__name__
return f_escape
@escape_frame
@escape_popup
def failed(request: HttpRequest):
"""Payment failed"""
logging.warning("payment_quickpay.views.failed(), GET args = {}".format(request.GET))
qp_failed_url = getattr(settings, 'QUICKPAY_FAILED_URL', '')
if qp_failed_url:
return HttpResponseRedirect(qp_failed_url)
else:
# Assumes the template is available...
return render(request, "shop/quickpay_failed.html")
@escape_frame
@escape_popup
def success(request: HttpRequest) -> HttpResponse:
"""Quickpay payment succeeded.
GET args:
id : int = ID of order
hash : str = signature hash of order. Raise
NB: Form not available (quickpay order handler)
NB: Only safe to call more than once if order_handler is
"""
order_id = request.GET.get('id')
if order_id:
order = Order.objects.get(pk=order_id)
else:
order = Order.objects.from_request(request) # Raises DoesNotExist if order not found
order_hash = sign_order(order)
logging.debug("\n ---- payment_quickpay.views.success()\n\norder = %s, sign arg = %s, check sign = %s"
% (order, request.GET.get('hash'), sign_order(order)))
logging.debug("data: {}".format(dict(request.GET)))
# Check hash.
if request.GET.get('hash') != order_hash:
logging.warn("cartridge_quickpay:success - hash doesn't match order")
return HttpResponseForbidden()
# Call order handler
order_handler(request, order_form=None, order=order)
response = redirect("shop_complete")
return response
try:
from cartridge_subscription.models import Subscription, SubscriptionPeriod
except ImportError:
Subscription = None
@csrf_exempt
@transaction.atomic
def callback(request: HttpRequest) -> HttpResponse:
"""Callback from Quickpay. Register payment status in case it wasn't registered already"""
def update_payment() -> Optional[QuickpayPayment]:
"""Update QuickPay payment from Quickpay result"""
# Refers order, data from outer scope
payment: Optional[QuickpayPayment] = QuickpayPayment.get_order_payment(order)
if payment is not None:
payment.update_from_res(data) # NB: qp.test_mode == data['test_mode']
payment.save()
return payment
data = json.loads(request.body.decode('utf-8'))
logging.debug("\n ---- payment_quickpay.views.callback() ----")
logging.debug("Got data {}\n".format(data))
# We may get several callbacks with states "new", "pending", or "processed"
# We're only interested in "processed" for payments and "active" for new subscriptions
qp_state = data.get('state', None)
if (qp_state in ('processed', 'active', 'rejected')
or not getattr(settings, 'QUICKPAY_AUTO_CAPTURE', False) and qp_state == 'pending'):
logging.debug("payment_quickpay.views.callback(): QP state is {}, processing".format(qp_state))
else:
logging.debug("payment_quickpay.views.callback(): QP state is {}, skipping".format(qp_state))
return HttpResponse("OK")
# Get the order
order_id_payment_id_string = data.get('order_id','')
logging.debug('order_id_payment_id_string: {}'.format(order_id_payment_id_string))
order_id = re.sub('_\d+', '', order_id_payment_id_string)
logging.debug('order_id: {}'.format(order_id))
try:
order = Order.objects.filter(pk=order_id).select_for_update()[0] # Lock order to prevent race condition
except IndexError:
# Order not found, ignore
logging.warning("payment_quickpay.views.callback(): order id {} not found, skipping".format(order_id))
return HttpResponse("OK")
# Check checksum. If we have multiple agreements, we need the order currency to get the right one
checksum = sign(request.body, get_private_key(order_currency(order)))
logging.debug("Request checksum = {}".format(request.META['HTTP_QUICKPAY_CHECKSUM_SHA256']))
logging.debug("Calculated checksum = {}".format(checksum))
if checksum != request.META['HTTP_QUICKPAY_CHECKSUM_SHA256']:
logging.error('Quickpay callback: checksum failed {}'.format(data))
return HttpResponseBadRequest()
logging.debug("payment_quickpay.views.callback(): order.status = {}".format(order.status))
if data['state'] == 'rejected':
update_payment()
elif data['type'] == 'Subscription' and Subscription is not None:
# Starting a NEW subscription. The Subscription is created in order_handler
logging.error("payment_quickpay.views.callback(): starting subscription, order {}".format(order.id))
# Capture the initial subscription payment
capture_subscription_order(order) # Starts async capture, next callback is 'accepted'
elif data['accepted']:
# Normal or subscription payment
# If autocapture, the payment will have been captured.
# If not autocapture, the payment will have been reserved only and must be captured later.
# -- The order can be considered paid (reserved or captured) if and only if we get here.
# -- An order is paid if and only if it has a transaction_id
logging.info("payment_quickpay.views.callback(): accepted payment, order {}".format(order.id))
payment = update_payment()
order.transaction_id = data['id']
logging.debug("payment_quickpay.views.callback(): calling order_handler, qp subscription = {}"
.format(data.get('subscription_id', '-')))
order_handler(request=None, order_form=None, order=order, payment=payment)
logging.debug("payment_quickpay.views.callback(): final order.status: {}".format(order.status))
return HttpResponse("OK")
| 1.3125 | 1 |
okteam/tsurka.py | o-fedorov/okteam | 0 | 12796808 | <gh_stars>0
from typing import Tuple
from pgzero.builtins import Actor
from pygame import Vector2
from .settings import ANIMATION_SPEED, HEIGHT, SPEED, WALK_IMAGES, WIDTH
_TIME = 0.0
ALL = {}
X = Vector2(1, 0)
Y = Vector2(0, 1)
def add(direction: Tuple[int, int]):
actor = Actor(WALK_IMAGES[0])
ALL[actor] = Vector2(direction)
return actor
add((1, 0)).midright = (WIDTH, HEIGHT / 2)
add((0, 1)).midbottom = (WIDTH / 2, HEIGHT)
def draw():
for actor in ALL:
actor.draw()
def update(dt):
global _TIME
_TIME += dt
for actor in ALL:
update_one(actor, dt)
def update_one(actor, dt):
image_num = int(_TIME * ANIMATION_SPEED) % len(WALK_IMAGES)
actor.image = WALK_IMAGES[image_num]
if not image_num:
return
direction = ALL[actor]
delta = direction * SPEED * dt
if actor.left + delta.x <= 0 or actor.right + delta.x >= WIDTH:
ALL[actor] = direction.reflect(X)
delta = delta.reflect(X)
if actor.top + delta.y <= 0 or actor.bottom + delta.y >= HEIGHT:
ALL[actor] = direction.reflect(Y)
delta = delta.reflect(Y)
actor.x += delta.x
actor.y += delta.y
| 2.546875 | 3 |
015_011_19.py | priyankakushi/machine-learning | 0 | 12796816 | <gh_stars>0
# File Input Output
# Write in a file
'''file = open("abc.txt", "w+")
file.write("python is great language. \nYeah its great! !\n")
file. write ("How are you. \nYeah its great! ! \n")
file. write ("Hello Priyanka!\n")
file.close()
# Read through file
file = open("abc.txt", "r+")
#print(file.read())
#print(file.readlines())
print(file.readline())
print(file.readable())
file.close()
# Use of Append in File
# Difference between write and append
file = open("abc.txt", "a+")
file.write("How are you!\n")
file.write("What are you doing today!\n")
print(file.tell())
file.close()'''
file = open("abc.txt", "w+")
file.write("hello Soni!\n")
file.write("how are you!\n")
file.close()
#read through file
file = open("abc.txt", "r+")
#print(file.read())
#print(file.readline())
#print(file.readlines())
print(file.readable())
print(file.read(3))
file.close()
#use of append in file
file = open("abc.txt", "a+")
file.write("what are you doing!\n")
print(file.tell())
file.close()
| 3 | 3 |
src/Jupyter/Jupyter_frontend.py | Chaostheeory/Insight-DOTA-Mine | 0 | 12796824 | import pandas as pd
import psycopg2
#from sqlalchemy import create_engine
psql_credeintal = {
'database': 'wode',
'user': 'wode',
'password': '***',
'host': '192.168.3.11',
'port': '5432'
}
con = psycopg2.connect(**psql_credeintal)
def get_winrate(user_id):
query = "SELECT position, winrate FROM positions WHERE user_id='%s' order by position" % user_id
query_results = pd.read_sql_query(query,con)
return query_results
get_winrate(119807644)
| 1.679688 | 2 |
products/urls.py | okosamastar/nisshin_b2b | 0 | 12796832 | from django.urls import path
from . import views
# from django.views.generic import RedirectView
urlpatterns = [
path("", views.CategoriesView.as_view(), name="products.category"),
# path("detail/", RedirectView.as_view(url="/b2b/products/")),
path("detail/<slug:slug>", views.ProductDetail.as_view(), name="products.detail"),
path("<str:cat>/", views.ProductsView.as_view(), name="products.products"),
path(
"<str:cat>/<str:child>", views.ProductsView.as_view(), name="products.products"
),
path("<str:cat>/<str:tag>/", views.ProductsView.as_view(), name="products.tags"),
]
| 1.210938 | 1 |
pythonbrasil/exercicios/decisao/DE resp 03.py | adinsankofa/python | 0 | 12796840 | fm = str(input("Digite o sexo - [M - Masculino] ou [F - Feminino]: "))
def foum():
if fm == "M":
print("M - Masculino")
if fm == "F":
print("F - Feminino")
def si():
while fm != "M" and fm != "F":
print("Sexo inválido, tente novamente!")
fm = str(input("Digite o sexo - [M - Masculino] ou [F - Feminino]: "))
foum()
si()
foum()
| 2.46875 | 2 |
cui/register/auth/GiHubApi.Authorizations.List.20170109081152453/AccountGetter.py | ytyaru/GitHub.Upload.UserRegister.Insert.Token.201704031122 | 0 | 12796848 | <reponame>ytyaru/GitHub.Upload.UserRegister.Insert.Token.201704031122
#!python3
#encoding:utf-8
import sqlite3
#from AuthList import AuthList
import AuthList
import traceback
import pyotp
class AccountGetter:
def __init__(self):
def connect(self, db_path):
self.connector = sqlite3.connect(db_path)
self.cursor = connector.cursor()
| 1.34375 | 1 |
13975.py | WaiNaat/BOJ-Python | 0 | 12796856 | import sys
input = sys.stdin.readline
import heapq as hq
# input
t = int(input())
for _ in range(t):
chapter = int(input())
pages = list(map(int, input().split()))
# process
'''
각 장이 섞여도 됨.
>> 힙에서 제일 작은 거 두 개 합치고 다시 넣음.
'''
sol = 0
hq.heapify(pages)
for _ in range(chapter - 1):
cost = hq.heappop(pages) + hq.heappop(pages)
sol += cost
hq.heappush(pages, cost)
# output
print(sol) | 1.921875 | 2 |
bluebird/sim_client/bluesky/sim_client.py | rkm/bluebird | 8 | 12796864 | """
BlueSky simulation client class
"""
# TODO: Need to re-add the tests for string parsing/units from the old API tests
import os
from typing import List
from semver import VersionInfo
from .bluesky_aircraft_controls import BlueSkyAircraftControls
from .bluesky_simulator_controls import BlueSkySimulatorControls
from bluebird.settings import Settings
from bluebird.sim_client.bluesky.bluesky_client import BlueSkyClient
from bluebird.utils.abstract_sim_client import AbstractSimClient
from bluebird.utils.timer import Timer
_BS_MIN_VERSION = os.getenv("BS_MIN_VERSION")
if not _BS_MIN_VERSION:
raise ValueError("The BS_MIN_VERSION environment variable must be set")
MIN_SIM_VERSION = VersionInfo.parse(_BS_MIN_VERSION)
# TODO Check cases where we need this
def _assert_valid_args(args: list):
"""
Since BlueSky only accepts commands in the form of (variable-length) strings, we
need to check the arguments for each command string we construct before sending it
"""
# Probably a cleaner way of doing this...
assert all(
x and not x.isspace() and x != "None" for x in map(str, args)
), f"Invalid argument in : {args}"
class SimClient(AbstractSimClient):
"""AbstractSimClient implementation for BlueSky"""
@property
def aircraft(self) -> BlueSkyAircraftControls:
return self._aircraft_controls
@property
def simulation(self) -> BlueSkySimulatorControls:
return self._sim_controls
@property
def sim_version(self) -> VersionInfo:
return self._client.host_version
def __init__(self, **kwargs):
self._client = BlueSkyClient()
self._aircraft_controls = BlueSkyAircraftControls(self._client)
self._sim_controls = BlueSkySimulatorControls(self._client)
def start_timers(self) -> List[Timer]:
return self._client.start_timers()
def connect(self, timeout=1) -> None:
self._client.connect(
Settings.SIM_HOST,
event_port=Settings.BS_EVENT_PORT,
stream_port=Settings.BS_STREAM_PORT,
timeout=timeout,
)
def shutdown(self, shutdown_sim: bool = False) -> bool:
self._client.stop()
return True
| 1.6875 | 2 |
data-structures/p482.py | sajjadt/competitive-programming | 10 | 12796872 | <filename>data-structures/p482.py
from os import linesep
num_tests = int(input())
for i in range(num_tests):
input()
perms = list(map(int, input().split()))
nums = list(map(str, input().split()))
out = [0] * len(perms)
for j in range(len(perms)):
out[perms[j]-1] = str(nums[j])
print(linesep.join(out))
if i != num_tests - 1:
print()
| 1.515625 | 2 |
main.py | LCRT215/Conways-Game-of-Life | 0 | 12796880 | <filename>main.py
import pygame
import sys
from game_window_class import *
WIDTH, HEIGHT = 800, 800
BACKGROUND = (33, 47, 60)
#initial game setup
def get_events():
global running
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
def update():
game_window.update()
def draw():
window.fill(BACKGROUND)
game_window.draw()
pygame.init()
window = pygame.display.set_mode((WIDTH, HEIGHT))
clock = pygame.time.Clock()
game_window = Game_window(window, 150, 180)
running = True
while running:
get_events()
update()
draw()
pygame.display.update()
clock.tick()
pygame.quit()
sys.exit()
| 1.921875 | 2 |
src/components/kankeiforms/__init__.py | BigJerBD/Kankei-Backend | 0 | 12796888 | <reponame>BigJerBD/Kankei-Backend<gh_stars>0
from components.kankeiforms.kankeiform import KankeiForm
def init(config):
KankeiForm.timeout = config.DB_TIMEOUT_SEC
from . import exploration
from . import comparison
from . import random
def get_kankeiforms(config):
init(config)
return KankeiForm.registry
def get_kankeiforms_dict(config):
"""
note:: currently simply forward query config, however it is not ideal
since we want to present information about `querying` and not the config
:return:
"""
init(config)
return {
grp: {name: content.asdict() for name, content in content.items()}
for grp, content in KankeiForm.registry.items()
}
| 1.28125 | 1 |
gcraft/application/wx.py | ddomurad/gcraft | 0 | 12796896 | <gh_stars>0
from OpenGL.GLUT import *
from gcraft.core.app import GCraftApp
from gcraft.core.input_event import InputEvent
import wx
from wx import glcanvas
class GCraftCanvas(wx.glcanvas.GLCanvas):
def __init__(self, parent: wx.Window, gc_app: GCraftApp):
wx.glcanvas.GLCanvas.__init__(self, parent, -1)
self._renderer = gc_app
self._renderer.swap_buffers = self.on_swap_buffers
self._renderer_inited = False
self._last_mouse_pos = None
self._context = wx.glcanvas.GLContext(self)
self.Bind(wx.EVT_ERASE_BACKGROUND, self.on_erase_background_event)
self.Bind(wx.EVT_SIZE, self.on_resize_event)
self.Bind(wx.EVT_PAINT, self.on_paint_event)
self.Bind(wx.EVT_MOUSE_EVENTS, self.on_mouse_event)
self.Bind(wx.EVT_KEY_DOWN, self.on_key_down_event)
self.Bind(wx.EVT_KEY_UP, self.on_key_up_event)
def init(self):
glutInit()
self._renderer_inited = True
self._renderer.on_init()
self.resize()
def resize(self):
if self._renderer_inited:
size = self.GetClientSize()
self.SetCurrent(self._context)
self._renderer.on_reshape(size.width, size.height)
def render(self):
self._renderer.on_render()
self._renderer.input_state.clear_mouse_movement()
def on_swap_buffers(self):
self.SwapBuffers()
def on_erase_background_event(self, event):
pass # Do nothing, to avoid flashing on MSW.
def on_resize_event(self, event):
wx.CallAfter(self.resize)
event.Skip()
def on_paint_event(self, event):
self.SetCurrent(self._context)
if not self._renderer_inited:
self.init()
self.render()
self.Refresh(False)
def on_mouse_event(self, event):
if event.GetEventType() == wx.wxEVT_LEFT_DOWN:
input_event = InputEvent(InputEvent.IE_MOUSE, mouse_x=event.X, mouse_y=event.Y, mouse_btn=0, state=True)
self._renderer.input_state.update_state(input_event)
self._renderer.on_input(input_event)
elif event.GetEventType() == wx.wxEVT_LEFT_UP:
input_event = InputEvent(InputEvent.IE_MOUSE, mouse_x=event.X, mouse_y=event.Y, mouse_btn=0, state=False)
self._renderer.input_state.update_state(input_event)
self._renderer.on_input(input_event)
if event.GetEventType() == wx.wxEVT_RIGHT_DOWN:
input_event = InputEvent(InputEvent.IE_MOUSE, mouse_x=event.X, mouse_y=event.Y, mouse_btn=1, state=True)
self._renderer.input_state.update_state(input_event)
self._renderer.on_input(input_event)
elif event.GetEventType() == wx.wxEVT_RIGHT_UP:
input_event = InputEvent(InputEvent.IE_MOUSE, mouse_x=event.X, mouse_y=event.Y, mouse_btn=1, state=False)
self._renderer.input_state.update_state(input_event)
self._renderer.on_input(input_event)
elif event.GetEventType() == wx.wxEVT_MOTION:
if self._last_mouse_pos is None:
self._last_mouse_pos = [event.X, event.Y]
input_event = InputEvent(InputEvent.IE_MOUSE_MOVE, mouse_x=event.X, mouse_y=event.Y,
mouse_dx=self._last_mouse_pos[0] - event.X,
mouse_dy=self._last_mouse_pos[1] - event.Y)
self._renderer.input_state.update_state(input_event)
self._renderer.on_input(input_event)
self._last_mouse_pos[0] = event.X
self._last_mouse_pos[1] = event.Y
def on_key_down_event(self, event):
input_event = InputEvent(InputEvent.IE_KEY_DOWN, mouse_x=event.Y, mouse_y=event.Y, key=event.GetKeyCode())
self._renderer.input_state.update_state(input_event)
self._renderer.on_input(input_event)
def on_key_up_event(self, event):
input_event = InputEvent(InputEvent.IE_KEY_UP, mouse_x=event.Y, mouse_y=event.Y, key=event.GetKeyCode())
self._renderer.input_state.update_state(input_event)
self._renderer.on_input(input_event)
class GCraftContinuousRenderer(wx.Timer):
def __init__(self, canvas: GCraftCanvas):
wx.Timer.__init__(self)
self.canvas = canvas
def start(self):
wx.Timer.Start(self, 10)
def stop(self):
wx.Timer.Stop(self)
def Notify(self):
self.canvas.Refresh(False)
class GCraftContinuousCanvas(GCraftCanvas):
def __init__(self, parent: wx.Window, gc_app: GCraftApp):
GCraftCanvas.__init__(self, parent, gc_app)
self.renderer_timer = GCraftContinuousRenderer(self)
def start(self):
self.renderer_timer.start()
def stop(self):
self.renderer_timer.stop()
| 1.65625 | 2 |
tests/graph/test_ppatterns.py | saezlab/squidpy | 0 | 12796904 | import pytest
from anndata import AnnData
from pandas.testing import assert_frame_equal
import numpy as np
from squidpy.gr import moran, ripley_k, co_occurrence
MORAN_K = "moranI"
def test_ripley_k(adata: AnnData):
"""Check ripley score and shape."""
ripley_k(adata, cluster_key="leiden")
# assert ripley in adata.uns
assert "ripley_k_leiden" in adata.uns.keys()
# assert clusters intersection
cat_ripley = set(adata.uns["ripley_k_leiden"]["leiden"].unique())
cat_adata = set(adata.obs["leiden"].cat.categories)
assert cat_ripley.isdisjoint(cat_adata) is False
def test_moran_seq_par(dummy_adata: AnnData):
"""Check whether moran results are the same for seq. and parallel computation."""
moran(dummy_adata)
dummy_adata.var["highly_variable"] = np.random.choice([True, False], size=dummy_adata.var_names.shape)
df = moran(dummy_adata, copy=True, n_jobs=1, seed=42, n_perms=50)
df_parallel = moran(dummy_adata, copy=True, n_jobs=2, seed=42, n_perms=50)
idx_df = df.index.values
idx_adata = dummy_adata[:, dummy_adata.var.highly_variable.values].var_names.values
assert MORAN_K in dummy_adata.uns.keys()
assert "pval_sim_fdr_bh" in dummy_adata.uns[MORAN_K]
assert dummy_adata.uns[MORAN_K].columns.shape == (4,)
# test highly variable
assert dummy_adata.uns[MORAN_K].shape != df.shape
# assert idx are sorted and contain same elements
assert not np.array_equal(idx_df, idx_adata)
np.testing.assert_array_equal(sorted(idx_df), sorted(idx_adata))
# check parallel gives same results
with pytest.raises(AssertionError, match=r'.*\(column name="pval_sim"\) are different.*'):
# because the seeds will be different, we don't expect the pval_sim values to be the same
assert_frame_equal(df, df_parallel)
@pytest.mark.parametrize("n_jobs", [1, 2])
def test_moran_reproducibility(dummy_adata: AnnData, n_jobs: int):
"""Check moran reproducibility results."""
moran(dummy_adata)
dummy_adata.var["highly_variable"] = np.random.choice([True, False], size=dummy_adata.var_names.shape)
# seed will work only when multiprocessing/loky
df_1 = moran(dummy_adata, copy=True, n_jobs=n_jobs, seed=42, n_perms=50)
df_2 = moran(dummy_adata, copy=True, n_jobs=n_jobs, seed=42, n_perms=50)
idx_df = df_1.index.values
idx_adata = dummy_adata[:, dummy_adata.var.highly_variable.values].var_names.values
assert MORAN_K in dummy_adata.uns.keys()
# assert fdr correction in adata.uns
assert "pval_sim_fdr_bh" in dummy_adata.uns[MORAN_K]
assert dummy_adata.uns[MORAN_K].columns.shape == (4,)
# test highly variable
assert dummy_adata.uns[MORAN_K].shape != df_1.shape
# assert idx are sorted and contain same elements
assert not np.array_equal(idx_df, idx_adata)
np.testing.assert_array_equal(sorted(idx_df), sorted(idx_adata))
# check parallel gives same results
assert_frame_equal(df_1, df_2)
def test_co_occurrence(adata: AnnData):
"""
check ripley score and shape
"""
co_occurrence(adata, cluster_key="leiden")
# assert occurrence in adata.uns
assert "leiden_co_occurrence" in adata.uns.keys()
assert "occ" in adata.uns["leiden_co_occurrence"].keys()
assert "interval" in adata.uns["leiden_co_occurrence"].keys()
# assert shapes
arr = adata.uns["leiden_co_occurrence"]["occ"]
assert arr.ndim == 3
assert arr.shape[2] == 49
assert arr.shape[1] == arr.shape[0] == adata.obs["leiden"].unique().shape[0]
# @pytest.mark.parametrize(("ys", "xs"), [(10, 10), (None, None), (10, 20)])
@pytest.mark.parametrize(("n_jobs", "n_splits"), [(1, 2), (2, 2)])
def test_co_occurrence_reproducibility(adata: AnnData, n_jobs: int, n_splits: int):
"""Check co_occurrence reproducibility results."""
arr_1, interval_1 = co_occurrence(adata, cluster_key="leiden", copy=True, n_jobs=n_jobs, n_splits=n_splits)
arr_2, interval_2 = co_occurrence(adata, cluster_key="leiden", copy=True, n_jobs=n_jobs, n_splits=n_splits)
np.testing.assert_array_equal(sorted(interval_1), sorted(interval_2))
np.testing.assert_allclose(arr_1, arr_2)
| 1.554688 | 2 |
Simulation/main.py | MKamyab1991/quadcopter_ppo | 2 | 12796912 | import gym
import numpy as np
import torch
import torch.optim as optim
from utils_main import make_env, save_files
from neural_network import ActorCritic
from ppo_method import ppo
from common.multiprocessing_env import SubprocVecEnv
from itertools import count
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
num_envs = 2
env_name = "CustomEnv-v0"
envs = [make_env(env_name) for i in range(num_envs)]
envs = SubprocVecEnv(envs)
num_inputs = envs.observation_space.shape[0]
num_outputs = envs.action_space.shape[0]
# Hyper params:
hidden_size = 400
lr = 3e-6
num_steps = 20
mini_batch_size = 5
ppo_epochs = 4
threshold_reward = -0.01
model = ActorCritic(num_inputs, num_outputs, hidden_size).to(device)
env = gym.make(env_name)
my_ppo = ppo(model, env)
optimizer = optim.Adam(model.parameters(), lr=lr)
max_frames = 1_500_0000
frame_idx = 0
test_rewards = []
save_iteration = 1000
model_save_iteration = 1000
state = envs.reset()
early_stop = False
def trch_ft_device(input, device):
output = torch.FloatTensor(input).to(device)
return output
saver_model = save_files()
while frame_idx < max_frames and not early_stop:
log_probs = []
values = []
states = []
actions = []
rewards = []
masks = []
entropy = 0
for _ in range(num_steps):
state = trch_ft_device(state, device)
dist, value = model(state)
action = dist.sample()
next_state, reward, done, _ = envs.step(action.cpu().numpy())
log_prob = dist.log_prob(action)
entropy += dist.entropy().mean()
# appending
log_probs.append(log_prob)
values.append(value)
rewards.append(torch.FloatTensor(reward).unsqueeze(1).to(device))
masks.append(torch.FloatTensor(1 - done).unsqueeze(1).to(device))
states.append(state)
actions.append(action)
# next iteration init.
state = next_state
frame_idx += 1
if frame_idx % save_iteration == 0:
test_reward = np.mean([my_ppo.test_env() for _ in range(num_envs)])
test_rewards.append(test_reward)
# plot(frame_idx, test_rewards)
if test_reward > threshold_reward:
early_stop = True
if frame_idx % model_save_iteration == 0:
saver_model.model_save(model)
next_state = trch_ft_device(next_state, device)
_, next_value = model(next_state)
returns = my_ppo.compute_gae(next_value, rewards, masks, values)
returns = torch.cat(returns).detach()
log_probs = torch.cat(log_probs).detach()
values = torch.cat(values).detach()
states = torch.cat(states)
actions = torch.cat(actions)
advantage = returns - values
my_ppo.ppo_update(ppo_epochs, mini_batch_size, states, actions, log_probs, returns, advantage, optimizer)
max_expert_num = 50000
num_steps = 0
expert_traj = []
# building an episode based on the current model.
for i_episode in count():
state = env.reset()
done = False
total_reward = 0
while not done:
state = torch.FloatTensor(state).unsqueeze(0).to(device)
dist, _ = model(state)
action = dist.sample().cpu().numpy()[0]
next_state, reward, done, _ = env.step(action)
state = next_state
total_reward += reward
expert_traj.append(np.hstack([state, action]))
num_steps += 1
print("episode:", i_episode, "reward:", total_reward)
if num_steps >= max_expert_num:
break
expert_traj = np.stack(expert_traj)
print()
print(expert_traj.shape)
print()
np.save("expert_traj.npy", expert_traj)
| 1.992188 | 2 |
The container /Robotic Arm/craves.ai-master/pose/utils/evaluation.py | ReEn-Neom/ReEn.Neom-source-code- | 0 | 12796920 | <reponame>ReEn-Neom/ReEn.Neom-source-code-
from __future__ import absolute_import
import math
import numpy as np
import matplotlib.pyplot as plt
from random import randint
import torch
from .misc import *
from .transforms import transform, transform_preds
__all__ = ['accuracy', 'AverageMeter']
def get_preds(scores):
''' get predictions from score maps in torch Tensor
return type: torch.LongTensor
'''
assert scores.dim() == 4, 'Score maps should be 4-dim'
maxval, idx = torch.max(scores.view(scores.size(0), scores.size(1), -1), 2)
maxval = maxval.view(scores.size(0), scores.size(1), 1)
idx = idx.view(scores.size(0), scores.size(1), 1) + 1
preds = idx.repeat(1, 1, 2).float()
preds[:,:,0] = (preds[:,:,0] - 1) % scores.size(3) + 1
preds[:,:,1] = torch.floor((preds[:,:,1] - 1) / scores.size(3)) + 1
pred_mask = maxval.gt(0).repeat(1, 1, 2).float()
preds *= pred_mask
return preds
def calc_dists(preds, target, normalize):
preds = preds.float()
target = target.float()
dists = torch.zeros(preds.size(1), preds.size(0))
for n in range(preds.size(0)):
for c in range(preds.size(1)):
if target[n,c,0] > 1 and target[n, c, 1] > 1:
dists[c, n] = torch.dist(preds[n,c,:], target[n,c,:])/normalize[n]
else:
dists[c, n] = -1
return dists
def dist_acc(dists, thr=0.5):
''' Return percentage below threshold while ignoring values with a -1 '''
if dists.ne(-1).sum() > 0:
return dists.le(thr).eq(dists.ne(-1)).sum().numpy() / dists.ne(-1).sum().numpy()
else:
return -1
def accuracy(output, target, idxs, thr=0.5):
''' Calculate accuracy according to PCK, but uses ground truth heatmap rather than x,y locations
First value to be returned is average accuracy across 'idxs', followed by individual accuracies
'''
preds = get_preds(output)
gts = get_preds(target)
norm = torch.ones(preds.size(0))*output.size(3)/4.0
dists = calc_dists(preds, gts, norm)
acc = torch.zeros(len(idxs)+1)
avg_acc = 0
cnt = 0
for i in range(len(idxs)):
acc[i+1] = dist_acc(dists[idxs[i]-1], thr=thr)
if acc[i+1] >= 0:
avg_acc = avg_acc + acc[i+1]
cnt += 1
if cnt != 0:
acc[0] = avg_acc / cnt
return acc
def final_preds_bbox(output, bbox, res):
preds = get_preds(output) # float typ
preds = preds.numpy()
for i in range(preds.shape[0]):
width = bbox[2][i] - bbox[0][i]
height = bbox[3][i] - bbox[1][i]
for j in range(preds.shape[1]):
preds[i, j, :] = preds[i, j, :] / res * np.array([width, height]) + np.array([bbox[0][i], bbox[0][i]])
return torch.from_numpy(preds)
def final_preds(output, center, scale, res):
coords = get_preds(output) # float type
# pose-processing
for n in range(coords.size(0)):
for p in range(coords.size(1)):
hm = output[n][p]
px = int(math.floor(coords[n][p][0]))
py = int(math.floor(coords[n][p][1]))
if px > 1 and px < res[0] and py > 1 and py < res[1]:
diff = torch.Tensor([hm[py - 1][px] - hm[py - 1][px - 2], hm[py][px - 1]-hm[py - 2][px - 1]])
coords[n][p] += diff.sign() * .25
coords[:, :, 0] += 0.5
coords[:, :, 1] -= 0.5
preds = coords.clone()
# Transform back
for i in range(coords.size(0)):
preds[i] = transform_preds(coords[i], center[i], scale[i], res)
if preds.dim() < 3:
preds = preds.view(1, preds.size())
return preds
def d3_acc(preds, gts, percent = .5):
num_samples = len(preds)
acc = np.zeros_like(preds[0])
hit = 0
# miss_list = []
max_error_list = [] #max angle error for each image
res_list = []
for i in range(num_samples):
pred = np.array(preds[i])
gt = np.array(gts[i])
res = np.abs(pred - gt)
res[0:7] = np.abs((res[0:7] + 180.0) % 360.0 - 180.0)
max_error_list.append(np.max(res[0:4]))
res_list.append(res)
# if not np.any(res[0:4]>10): #false prediction
# acc += res
# hit = hit + 1
# else:
# miss_list.append(i)
top_n = int(percent * num_samples) #take top N images with smallesr error.
sorted_list = np.argsort(max_error_list)
for i in range(top_n):
acc += res_list[sorted_list[i]]
return (acc/top_n)[:4]
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| 1.8125 | 2 |
deployer/contrib/loggers/on_host.py | timgates42/python-deployer | 39 | 12796928 | <reponame>timgates42/python-deployer<filename>deployer/contrib/loggers/on_host.py
from deployer.loggers import Logger, RunCallback, ForkCallback
from deployer.utils import esc1
class OnHostLogger(Logger):
"""
Log all transactions on every host in:
~/.deployer/history
"""
def __init__(self, username):
from socket import gethostname
self.from_host = gethostname()
self.username = username
def log_run(self, run_entry):
if not run_entry.sandboxing:
run_entry.host._run_silent("""
mkdir -p ~/.deployer/;
echo -n `date '+%%Y-%%m-%%d %%H:%%M:%%S | ' ` >> ~/.deployer/history;
echo -n '%s | %s | %s | ' >> ~/.deployer/history;
echo '%s' >> ~/.deployer/history;
"""
% ('sudo' if run_entry.use_sudo else ' ',
esc1(self.from_host),
esc1(self.username),
esc1(run_entry.command or '')
))
return RunCallback()
def log_fork(self, fork_entry):
# Use the same class OnHostLogger in forks.
class callback(ForkCallback):
def get_fork_logger(c):
return OnHostLogger(self.username)
return callback()
| 1.53125 | 2 |
money_legos/uniswap/contracts.py | gokhanbaydar/py-money-legos | 3 | 12796936 | <gh_stars>1-10
from .. import util
exchangeAbi = util.read_json("./uniswap/abi/Exchange.json")
factoryAbi = util.read_json("./uniswap/abi/Factory.json")
contracts = {
"factory": {
"address": "0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95",
"abi": factoryAbi,
},
"exchange": {
"abi": exchangeAbi,
},
}
| 0.96875 | 1 |
src/wready/__init__.py | WisconsinRobotics/wready | 0 | 12796944 | <filename>src/wready/__init__.py
from .sig_handler import SignalInterruptHandler
from .wready_client import TaskContext, WReadyClient
from .wready_server import InitTask, WReadyServer, WReadyServerObserver
| 0.435547 | 0 |
misago/misago/users/api/ranks.py | vascoalramos/misago-deployment | 2 | 12796952 | from rest_framework import mixins, viewsets
from ..models import Rank
from ..serializers import RankSerializer
class RanksViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = RankSerializer
queryset = Rank.objects.filter(is_tab=True).order_by("order")
| 0.894531 | 1 |
parentopticon/db/test_model.py | EliRibble/parentopticon | 0 | 12796960 | import datetime
import os
from typing import List, Optional
import unittest
from parentopticon.db import test_utilities
from parentopticon.db.model import ColumnInteger, ColumnText, Model
class ModelTests(test_utilities.DBTestCase):
"Test all of our logic around the model class."
class MyTable(Model):
COLUMNS = {
"id": ColumnInteger(autoincrement=True, primary_key=True),
"count": ColumnInteger(),
"name": ColumnText(null=True),
}
def _makerows(self, names: Optional[List[str]] = None):
"Make a few rows. Useful for many tests."
names = names or ["foo", "bar", "baz"]
return {
ModelTests.MyTable.insert(self.db, count=(i+1)*2, name=name)
for i, name in enumerate(names)
}
def setUp(self):
super().setUp()
self.db.execute_commit_return(ModelTests.MyTable.create_statement())
self.db.execute_commit_return(ModelTests.MyTable.truncate_statement())
def test_create_statement(self):
"Can we get a proper create table clause?"
result = ModelTests.MyTable.create_statement()
expected = "\n".join((
"CREATE TABLE IF NOT EXISTS MyTable (",
"count INTEGER,",
"id INTEGER PRIMARY KEY AUTOINCREMENT,",
"name TEXT",
");",
))
self.assertEqual(result, expected)
def test_insert(self):
"Can we insert a row into a table?"
rowid = ModelTests.MyTable.insert(self.db, count=3, name="foobar")
found = self.db.execute("SELECT count, name FROM MyTable").fetchall()
self.assertEqual(len(found), 1)
def test_get(self):
"Can we get a row from the table?"
rowid = ModelTests.MyTable.insert(self.db, count=3, name="foobar")
result = ModelTests.MyTable.get(self.db, rowid)
self.assertEqual(result.id, rowid)
self.assertEqual(result.count, 3)
self.assertEqual(result.name, "foobar")
def test_get_none(self):
"Can we get None when the row does not exist?"
result = ModelTests.MyTable.get(self.db, -1)
self.assertIs(result, None)
def test_list_all(self):
"Can we get several rows from the table?"
rowids = self._makerows()
results = ModelTests.MyTable.list(self.db)
self.assertEqual({result.id for result in results}, rowids)
def test_list_some(self):
"Can we get several rows from the table with a where clause?"
rowids = self._makerows()
results = ModelTests.MyTable.list_where(self.db, where="count >= 4")
self.assertEqual({result.count for result in results}, {4, 6})
def test_list_with_none(self):
"Can we get a list where an item is NULL?"
rowids = self._makerows(names=["foo", None, "bar"])
results = ModelTests.MyTable.list(self.db, name=None)
self.assertEqual({result.count for result in results}, {4})
def test_search_not_found(self):
"Can we search and not find something?"
results = ModelTests.MyTable.search(self.db, name="sir-not-appearing")
self.assertIs(results, None)
def test_search_one(self):
"Can we search and find a single row?"
rowids = self._makerows()
results = ModelTests.MyTable.search(self.db, name="foo")
self.assertEqual(results.name, "foo")
self.assertEqual(results.count, 2)
def test_search_many(self):
"Do we error when we have multiple matches?"
self._makerows(names=["foo", "foo", "bar"])
with self.assertRaises(ValueError):
ModelTests.MyTable.search(self.db, name="foo")
def test_search_with_none(self):
"Do we properly search for NULL columns?"
self._makerows(names=["foo", None, "bar"])
results = ModelTests.MyTable.search(self.db, name=None)
self.assertEqual(results.name, None)
self.assertEqual(results.count, 4)
def test_update(self):
"Can we update a row with update()?"
rows = self._makerows(names=["foo"])
row_id = list(rows)[0]
ModelTests.MyTable.update(self.db, row_id, name="biff")
results = ModelTests.MyTable.get(self.db, row_id)
self.assertEqual(results.name, "biff")
def test_update_multiple(self):
"Can we update a row with multiple values?"
rows = self._makerows(names=["foo"])
row_id = list(rows)[0]
ModelTests.MyTable.update(self.db, row_id, name="biff", count=100)
results = ModelTests.MyTable.get(self.db, row_id)
self.assertEqual(results.count, 100)
self.assertEqual(results.name, "biff")
| 1.882813 | 2 |
docs_build/tutorials_templates/data_management/data_versioning/mds.py | dataloop-ai/sdk_examples | 3 | 12796968 | def section1():
"""
# Data Versioning
Dataloop's powerful data versioning provides you with unique tools for data management - clone, merge, slice & dice your files, to create multiple versions for various applications. Sample use cases include:
Golden training sets management
Reproducibility (dataset training snapshot)
Experimentation (creating subsets from different kinds)
Task/Assignment management
Data Version "Snapshot" - Use our versioning feature as a way to save data (items, annotations, metadata) before any major process. For example, a snapshot can serve as a roll-back mechanism to original datasets in case of any error without losing the data.
## Clone Datasets
Cloning a dataset creates a new dataset with the same files as the original. Files are actually a reference to the original binary and not a new copy of the original, so your cloud data remains safe and protected. When cloning a dataset, you can add a destination dataset, remote file path, and more...
"""
def section2():
"""
## Merge Datasets
Dataset merging outcome depends on how similar or different the datasets are.
* Cloned Datasets - items, annotations, and metadata will be merged. This means that you will see annotations from different datasets on the same item.
* Different datasets (not clones) with similar recipes - items will be summed up, which will cause duplication of similar items.
* Datasets with different recipes - Datasets with different default recipes cannot be merged. Use the 'Switch recipe' option on dataset level (3-dots action button) to match recipes between datasets and be able to merge them.
"""
| 3.078125 | 3 |
setup.py | tsurusekazuki/Create_Package | 0 | 12796976 | # -*- coding: utf-8 -*-
# Learn more: https://github.com/kennethreitz/setup.py
import os, sys
from setuptools import setup, find_packages
def read_requirements() -> List:
"""Parse requirements from requirements.txt."""
reqs_path = os.path.join('.', 'requirements.txt')
with open(reqs_path, 'r') as f:
requirements = [line.rstrip() for line in f]
return requirements
setup(
name='sample',
version='0.1.0',
description='Sample package for Python-Guide.org',
long_description=readme,
author='<NAME>',
author_email='<EMAIL>',
install_requires=read_requirements(),
url='test',
license=license,
packages=find_packages(exclude=('tests', 'docs'))
)
| 2.015625 | 2 |
peacecorps/contenteditor/forms.py | cmc333333/peacecorps-site | 8 | 12796984 | <reponame>cmc333333/peacecorps-site
import logging
from django import forms
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.auth.forms import (
AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm)
from django.utils.translation import ugettext_lazy as _
from contenteditor import models
class LoggingAuthenticationForm(AdminAuthenticationForm):
"""Override login form to log attempts"""
def clean(self):
logger = logging.getLogger("peacecorps.login")
try:
cleaned = super(LoggingAuthenticationForm, self).clean()
if cleaned.get('password'):
logger.info("%s successfully logged in",
self.cleaned_data['username'])
else:
logger.warn("Failed login attempt for %s",
self.cleaned_data.get('username'))
return cleaned
except forms.ValidationError:
logger.warn("Failed login attempt for %s",
self.cleaned_data.get('username'))
raise
class StrictUserCreationForm(UserCreationForm):
password1 = forms.CharField(
label=_("Password"), widget=forms.PasswordInput, help_text=_("""
Enter a password. Requirements include: at least 20 characters,
at least one uppercase letter, at least one lowercase letter, at
least one number, and at least one special character.
"""))
def clean_password1(self):
"""Adds to the default password validation routine in order to enforce
stronger passwords"""
password = self.cleaned_data['password1']
errors = models.password_errors(password)
# If password_validator returns errors, raise an error, else proceed.
if errors:
raise forms.ValidationError('\n'.join(errors))
else:
return password
class StrictAdminPasswordChangeForm(AdminPasswordChangeForm):
"""Password form for editing a user"""
password1 = forms.CharField(
label=_("Password"), widget=forms.PasswordInput, help_text=_("""
Enter a password. Requirements include: at least 20 characters,
at least one uppercase letter, at least one lowercase letter, at
least one number, and at least one special character.
"""))
def clean_password1(self):
"""Adds to the default password validation routine in order to enforce
stronger passwords"""
password = self.cleaned_data['password1']
errors = models.password_errors(password)
# Also check that this is a new password
if self.user.check_password(self.cleaned_data['password1']):
errors.append("Must not reuse a password")
# If password_validator returns errors, raise an error, else proceed.
if errors:
raise forms.ValidationError('\n'.join(errors))
else:
return password
def save(self):
user = super(StrictAdminPasswordChangeForm, self).save()
user.extra.password_expires = models.expires()
user.extra.save()
return user
class StrictPasswordChangeForm(PasswordChangeForm):
"""Password form residing at /admin/password_change"""
new_password1 = forms.CharField(
label=_("New password"), widget=forms.PasswordInput, help_text=_("""
Enter a password. Requirements include: at least 20 characters,
at least one uppercase letter, at least one lowercase letter, at
least one number, and at least one special character.
"""))
def clean_new_password1(self):
"""Adds to the default password validation routine in order to enforce
stronger passwords"""
password = self.cleaned_data['<PASSWORD>']
errors = models.password_errors(password)
# Also check that this is a new password
if self.user.check_password(self.cleaned_data['<PASSWORD>1']):
errors.append("Must not reuse a password")
# If password_validator returns errors, raise an error, else proceed.
if errors:
raise forms.ValidationError('\n'.join(errors))
else:
return password
def save(self):
user = super(StrictPasswordChangeForm, self).save()
user.extra.password_expires = models.expires()
user.extra.save()
return user
| 1.421875 | 1 |
intralinks/utils/associations.py | ilapi/intralinks-sdk-python | 3 | 12796992 | <filename>intralinks/utils/associations.py
"""
For educational purpose only
"""
def associate_users_and_groups(users, groups, group_members):
users_by_id = {u['id']:u for u in users}
groups_by_id = {g['id']:g for g in groups}
for u in users:
u['groups'] = []
for g in groups:
g['groupMembers'] = []
for m in group_members:
group = groups_by_id[m['workspaceGroupId']]
user = users_by_id[m['workspaceUserId']]
group['groupMembers'].append(user['id'])
user['groups'].append(group['id'])
for g in groups:
if g['groupMemberCount'] != len(g['groupMembers']):
raise Exception(g)
class PathBuilder:
def __init__(self, objects):
self.objects = objects
self.objects_by_id = {o['id']:o for o in objects}
def get_object(self, object_id):
return self.objects_by_id[object_id]
def get_parent(self, o):
return self.get_object(o['parentId'])
def has_parent(self, o):
return 'parentId' in o and o['parentId'] != -1
def build_paths(self):
for o in self.objects:
self.__build_path_helper__(o)
def __build_path_helper__(self, o):
if 'ids' not in o:
parent_ids = []
parent_names = []
if self.has_parent(o):
parent = self.get_parent(o)
if 'children_ids' not in parent:
parent['children_ids'] = []
parent['children_ids'].append(o['id'])
self.__build_path_helper__(parent)
parent_ids = parent['ids']
parent_names = parent['names']
o['ids'] = parent_ids + [o['id']]
o['names'] = parent_names + [o['name']]
o['fullPath'] = '/'.join(o['names'])
def build_paths(*arg):
objects = []
for a in arg:
objects.extend(a)
PathBuilder(objects).build_paths()
| 2.4375 | 2 |
examples/boost_python/demo3_numpy/demo3.py | davidcortesortuno/finmag | 10 | 12797000 | <reponame>davidcortesortuno/finmag<filename>examples/boost_python/demo3_numpy/demo3.py
import numpy as np
import demo3_module
a = np.array([[1, 2], [3, 4]], dtype=float)
print "Trace of a:", demo3_module.trace(a)
| 1.28125 | 1 |
_unittests/ut_sklapi/test_onnx_helper.py | Exlsunshine/mlprodict | 0 | 12797008 | <filename>_unittests/ut_sklapi/test_onnx_helper.py
"""
@brief test log(time=2s)
"""
import unittest
from logging import getLogger
import numpy
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Binarizer, StandardScaler, OneHotEncoder
from skl2onnx import convert_sklearn
from skl2onnx.common.data_types import FloatTensorType
from skl2onnx.helpers.onnx_helper import load_onnx_model, save_onnx_model
from skl2onnx.helpers.onnx_helper import select_model_inputs_outputs
from skl2onnx.helpers.onnx_helper import enumerate_model_node_outputs
from pyquickhelper.pycode import ExtTestCase
class TestOnnxHelper(ExtTestCase):
def setUp(self):
logger = getLogger('skl2onnx')
logger.disabled = True
def get_model(self, model):
try:
import onnxruntime
assert onnxruntime is not None
except ImportError:
return None
from onnxruntime import InferenceSession
session = InferenceSession(save_onnx_model(model))
return lambda X: session.run(None, {'input': X})[0]
def test_onnx_helper_load_save(self):
model = make_pipeline(StandardScaler(), Binarizer(threshold=0.5))
X = numpy.array([[0.1, 1.1], [0.2, 2.2]])
model.fit(X)
model_onnx = convert_sklearn(
model, 'binarizer', [('input', FloatTensorType([1, 2]))])
filename = "temp_onnx_helper_load_save.onnx"
save_onnx_model(model_onnx, filename)
model = load_onnx_model(filename)
list(enumerate_model_node_outputs(model))
new_model = select_model_inputs_outputs(model, 'variable')
self.assertTrue(new_model.graph is not None) # pylint: disable=E1101
tr1 = self.get_model(model)
tr2 = self.get_model(new_model)
X = X.astype(numpy.float32)
X1 = tr1(X)
X2 = tr2(X)
self.assertEqual(X1.shape, (2, 2))
self.assertEqual(X2.shape, (2, 2))
def test_onnx_helper_load_save_init(self):
model = make_pipeline(Binarizer(), OneHotEncoder(
sparse=False), StandardScaler())
X = numpy.array([[0.1, 1.1], [0.2, 2.2], [0.4, 2.2], [0.2, 2.4]])
model.fit(X)
model_onnx = convert_sklearn(
model, 'pipe3', [('input', FloatTensorType([1, 2]))])
filename = "temp_onnx_helper_load_save.onnx"
save_onnx_model(model_onnx, filename)
model = load_onnx_model(filename)
list(enumerate_model_node_outputs(model))
new_model = select_model_inputs_outputs(model, 'variable')
self.assertTrue(new_model.graph is not None) # pylint: disable=E1101
tr1 = self.get_model(model)
tr2 = self.get_model(new_model)
X = X.astype(numpy.float32)
X1 = tr1(X)
X2 = tr2(X)
self.assertEqual(X1.shape, (4, 2))
self.assertEqual(X2.shape, (4, 2))
if __name__ == "__main__":
unittest.main()
| 1.703125 | 2 |
src/constellix/domains/record/type/main.py | aperim/python-constellix | 0 | 12797016 | <filename>src/constellix/domains/record/type/main.py
"""A Record"""
import logging
_LOGGER = logging.getLogger(__name__)
class DomainRecord(object):
def __init__(self, protocol=None, domain=None):
super().__init__()
self.__protocol = protocol
self.__domain = domain
@property
def record_type(self):
try:
return self.__record_type
except AttributeError:
return None
@property
def domain_id(self):
try:
return self.__domain.id
except AttributeError:
return None | 1.335938 | 1 |
pharmacognosy/users/__init__.py | skylifewww/pharmacognosy | 0 | 12797024 | <reponame>skylifewww/pharmacognosy<gh_stars>0
default_app_config = 'pharmacognosy.users.apps.Config'
| -0.124512 | 0 |
core_admin/des/migrations/0033_auto_20210722_1449.py | linea-it/tno | 0 | 12797032 | # Generated by Django 2.2.13 on 2021-07-22 14:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('des', '0032_auto_20210713_2127'),
]
operations = [
migrations.AlterField(
model_name='astrometryjob',
name='status',
field=models.IntegerField(choices=[(1, 'Idle'), (2, 'Running'), (3, 'Completed'), (4, 'Failed'), (5, 'Aborted'), (6, 'Warning'), (7, 'Launched')], default=1, verbose_name='Status'),
),
]
| 0.578125 | 1 |
alipay/aop/api/domain/MultiStagePayLineInfo.py | antopen/alipay-sdk-python-all | 213 | 12797040 | <reponame>antopen/alipay-sdk-python-all<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class MultiStagePayLineInfo(object):
def __init__(self):
self._payment_amount = None
self._payment_idx = None
@property
def payment_amount(self):
return self._payment_amount
@payment_amount.setter
def payment_amount(self, value):
self._payment_amount = value
@property
def payment_idx(self):
return self._payment_idx
@payment_idx.setter
def payment_idx(self, value):
self._payment_idx = value
def to_alipay_dict(self):
params = dict()
if self.payment_amount:
if hasattr(self.payment_amount, 'to_alipay_dict'):
params['payment_amount'] = self.payment_amount.to_alipay_dict()
else:
params['payment_amount'] = self.payment_amount
if self.payment_idx:
if hasattr(self.payment_idx, 'to_alipay_dict'):
params['payment_idx'] = self.payment_idx.to_alipay_dict()
else:
params['payment_idx'] = self.payment_idx
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MultiStagePayLineInfo()
if 'payment_amount' in d:
o.payment_amount = d['payment_amount']
if 'payment_idx' in d:
o.payment_idx = d['payment_idx']
return o
| 1.476563 | 1 |
systest/testcases/vim/test_vim.py | ayoubbargueoui1996/osm-devops | 0 | 12797048 | <reponame>ayoubbargueoui1996/osm-devops
# Copyright 2017 Sandvine
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pytest
import time
@pytest.mark.vim
@pytest.mark.openstack
@pytest.mark.vmware
class TestClass(object):
def test_empty_vim(self,osm):
assert not osm.get_api().vim.list()
@pytest.fixture(scope='function')
def cleanup_test_add_vim_account(self,osm,request):
def teardown():
try:
for vim in osm.get_api().vim.list(False):
osm.get_api().vim.delete(vim['name'])
except:
pass
request.addfinalizer(teardown)
@pytest.mark.openstack
@pytest.mark.smoke
def test_add_vim_account(self,osm,openstack,cleanup_test_add_vim_account):
os_access = {}
vim_name = 'helloworld-os'
os_access['vim-url'] = 'https://169.254.169.245/'
os_access['vim-username'] = 'pytest2'
os_access['vim-password'] = '<PASSWORD>'
os_access['vim-tenant-name'] = 'pytest3'
os_access['vim-type'] = 'openstack'
os_access['description'] = 'a test vim'
assert not osm.get_api().vim.create(vim_name,os_access)
resp=osm.get_api().vim.get(vim_name)
assert resp['name'] == vim_name
assert resp['vim_type'] == 'openstack'
assert resp['vim_url'] == os_access['vim-url']
assert resp['vim_user'] == os_access['vim-username']
assert resp['vim_tenant_name'] == os_access['vim-tenant-name']
assert not osm.get_api().vim.delete(vim_name)
@pytest.mark.vmware
#@<EMAIL>
def test_add_vim_account_vmware(self,osm,vmware,cleanup_test_add_vim_account):
os_access = {}
vim_name = 'helloworld-vmware'
os_access['vim-url'] = 'https://169.254.169.245/'
os_access['vim-username'] = 'pytest2'
os_access['vim-password'] = '<PASSWORD>'
os_access['vim-tenant-name'] = 'pytest3'
os_access['vim-type'] = 'vmware'
os_access['description'] = 'a test vim'
assert not osm.get_api().vim.create(vim_name,os_access)
resp=osm.get_api().vim.get(vim_name)
assert resp['name'] == vim_name
assert resp['vim_type'] == 'vmware'
assert resp['vim_url'] == os_access['vim-url']
assert resp['vim_user'] == os_access['vim-username']
assert resp['vim_tenant_name'] == os_access['vim-tenant-name']
assert not osm.get_api().vim.delete(vim_name)
<EMAIL>
def test_add_multiple_accounts(self,osm,cleanup_test_add_vim_account):
os_access = {}
vims = [ {'name': 'testvim1', 'vim-type': 'openstack'}, {'name': 'testvim2','vim-type': 'vmware'} ]
os_access['vim-url'] = 'https://169.254.169.245/'
os_access['vim-username'] = 'pytest2'
os_access['vim-password'] = '<PASSWORD>'
os_access['vim-tenant-name'] = 'pytest3'
os_access['description'] = 'a test vim'
for vim in vims:
os_access['vim-type'] = vim['vim-type']
assert not osm.get_api().vim.create(vim['name'],os_access)
resp=osm.get_api().vim.get(vim['name'])
assert resp['name'] == vim['name']
assert resp['vim_type'] == vim['vim-type']
assert resp['vim_url'] == os_access['vim-url']
assert resp['vim_user'] == os_access['vim-username']
assert resp['vim_tenant_name'] == os_access['vim-tenant-name']
for vim in osm.get_api().vim.list(False):
osm.get_api().vim.delete(vim['name'])
| 1.234375 | 1 |
setup.py | nitros12/pyumlgen | 0 | 12797056 | <filename>setup.py
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md")) as f:
long_desc = f.read()
setup(
name="pyumlgen",
version="0.1.6",
description="Generate UML diagrams with type information from python modules",
author="<NAME>",
packages=find_packages(),
entry_points={
"console_scripts": [
"pyumlgen=pyumlgen:main"
]
}
)
| 0.894531 | 1 |
2015/08.py | bernikr/advent-of-code | 1 | 12797064 | from aocd import get_data
def part1(a):
return sum(len(l) - len(l.encode('utf-8').decode('unicode_escape')) + 2 for l in a)
def part2(a):
return sum(len(l.encode('unicode_escape').decode('utf-8').replace('"', '\\"')) - len(l) + 2 for l in a)
if __name__ == '__main__':
data = get_data(day=8, year=2015)
inp = data.splitlines()
print(part1(inp))
print(part2(inp))
| 2 | 2 |
src/ape/managers/accounts.py | unparalleled-js/ape | 210 | 12797072 | from typing import Dict, Iterator, List, Type
from dataclassy import dataclass
from pluggy import PluginManager # type: ignore
from ape.api.accounts import AccountAPI, AccountContainerAPI, TestAccountAPI
from ape.types import AddressType
from ape.utils import cached_property, singledispatchmethod
from .config import ConfigManager
from .converters import ConversionManager
from .networks import NetworkManager
@dataclass
class AccountManager:
"""
The ``AccountManager`` is a container of containers for
:class:`~ape.api.accounts.AccountAPI` objects.
All containers must subclass :class:`~ape.api.accounts.AccountContainerAPI`
and are treated as singletons.
Import the accounts manager singleton from the root ``ape`` namespace.
Usage example::
from ape import accounts # "accounts" is the AccountManager singleton
my_accounts = accounts.load("dev")
"""
config: ConfigManager
converters: ConversionManager
plugin_manager: PluginManager
network_manager: NetworkManager
@cached_property
def containers(self) -> Dict[str, AccountContainerAPI]:
"""
The list of all :class:`~ape.api.accounts.AccountContainerAPI` instances
across all installed plugins.
Returns:
dict[str, :class:`~ape.api.accounts.AccountContainerAPI`]
"""
containers = {}
data_folder = self.config.DATA_FOLDER
data_folder.mkdir(exist_ok=True)
for plugin_name, (container_type, account_type) in self.plugin_manager.account_types:
# Ignore containers that contain test accounts.
if issubclass(account_type, TestAccountAPI):
continue
accounts_folder = data_folder / plugin_name
accounts_folder.mkdir(exist_ok=True)
containers[plugin_name] = container_type(accounts_folder, account_type, self.config)
return containers
@property
def aliases(self) -> Iterator[str]:
"""
All account aliases from every account-related plugin. The "alias"
is part of the :class:`~ape.api.accounts.AccountAPI`. Use the
account alias to load an account using method
:meth:`~ape.managers.accounts.AccountManager.load`.
Returns:
Iterator[str]
"""
for container in self.containers.values():
yield from container.aliases
def get_accounts_by_type(self, type_: Type[AccountAPI]) -> List[AccountAPI]:
"""
Get a list of accounts by their type.
Args:
type_ (Type[:class:`~ape.api.accounts.AccountAPI`]): The type of account
to get.
Returns:
List[:class:`~ape.api.accounts.AccountAPI`]
"""
accounts_with_type = []
for account in self:
if isinstance(account, type_):
self._inject_provider(account)
accounts_with_type.append(account)
return accounts_with_type
def __len__(self) -> int:
"""
The number of accounts managed by all account plugins.
Returns:
int
"""
return sum(len(container) for container in self.containers.values())
def __iter__(self) -> Iterator[AccountAPI]:
for container in self.containers.values():
for account in container:
self._inject_provider(account)
yield account
def __repr__(self) -> str:
return "[" + ", ".join(repr(a) for a in self) + "]"
@cached_property
def test_accounts(self) -> List[TestAccountAPI]:
"""
Accounts generated from the configured test mnemonic. These accounts
are also the subject of a fixture available in the ``test`` plugin called
``accounts``. Configure these accounts, such as the mnemonic and / or
number-of-accounts using the ``test`` section of the `ape-config.yaml` file.
Usage example::
def test_my_contract(accounts):
# The "accounts" fixture uses the AccountsManager.test_accounts()
sender = accounts[0]
receiver = accounts[1]
...
Returns:
List[:class:`~ape.api.accounts.TestAccountAPI`]
"""
accounts = []
for plugin_name, (container_type, account_type) in self.plugin_manager.account_types:
if not issubclass(account_type, TestAccountAPI):
continue
container = container_type(None, account_type, self.config)
for account in container:
self._inject_provider(account)
accounts.append(account)
return accounts
def load(self, alias: str) -> AccountAPI:
"""
Get an account by its alias.
Raises:
IndexError: When there is no local account with the given alias.
Returns:
:class:`~ape.api.accounts.AccountAPI`
"""
if alias == "":
raise ValueError("Cannot use empty string as alias!")
for account in self:
if account.alias and account.alias == alias:
self._inject_provider(account)
return account
raise IndexError(f"No account with alias '{alias}'.")
@singledispatchmethod
def __getitem__(self, account_id) -> AccountAPI:
raise NotImplementedError(f"Cannot use {type(account_id)} as account ID.")
@__getitem__.register
def __getitem_int(self, account_id: int) -> AccountAPI:
"""
Get an account by index. For example, when you do the CLI command
``ape accounts list --all``, you will see a list of enumerated accounts
by their indices. Use this method as a quicker, ad-hoc way to get an
account from that index. **NOTE**: It is generally preferred to use
:meth:`~ape.managers.accounts.AccountManager.load` or
:meth:`~ape.managers.accounts.AccountManager.__getitem_str`.
Returns:
:class:`~ape.api.accounts.AccountAPI`
"""
for idx, account in enumerate(self.__iter__()):
if account_id == idx:
self._inject_provider(account)
return account
raise IndexError(f"No account at index '{account_id}'.")
@__getitem__.register
def __getitem_str(self, account_str: str) -> AccountAPI:
"""
Get an account by address.
Raises:
IndexError: When there is no local account with the given address.
Returns:
:class:`~ape.api.accounts.AccountAPI`
"""
account_id = self.converters.convert(account_str, AddressType)
for container in self.containers.values():
if account_id in container:
account = container[account_id]
self._inject_provider(account)
return account
raise IndexError(f"No account with address '{account_id}'.")
def __contains__(self, address: AddressType) -> bool:
"""
Determine if the given address matches an account in ``ape``.
Args:
address (:class:`~ape.types.AddressType`): The address to check.
Returns:
bool: ``True`` when the given address is found.
"""
return any(address in container for container in self.containers.values())
def _inject_provider(self, account: AccountAPI):
if self.network_manager.active_provider is not None:
account.provider = self.network_manager.active_provider
| 1.523438 | 2 |
simulator/web/lset.py | ondiiik/meteoink | 2 | 12797080 | <reponame>ondiiik/meteoink<filename>simulator/web/lset.py
from config import location
from log import dump_exception
def page(web):
try:
i = int(web.args['idx'])
args = web.args['name'], float(web.args['lat']), float(web.args['lon'])
location[i].name, location[i].lat, location[i].lon = args
location[i].flush()
except Exception as e:
dump_exception('WEB error:', e)
yield web.index
| 1.046875 | 1 |
src/extendable_pydantic/__init__.py | lmignon/pydantic-ext | 0 | 12797088 | <gh_stars>0
"""A lib to define pydantic models extendable at runtime."""
# shortcut to main used class
from .main import ExtendableModelMeta
from .version import __version__
| 0.925781 | 1 |
setup.py | DNKonanov/uni_cli | 0 | 12797096 | <reponame>DNKonanov/uni_cli
import setuptools
setuptools.setup(
name="uniqpy",
version="0.1.3",
author="<NAME>",
author_email="<EMAIL>",
description="UNIQUAC-based tool for multicomponent VLEs",
long_description="uniqpy",
long_description_content_type="",
url="https://github.com/DNKonanov/uni_cli",
project_urls={
"Bug Tracker": "https://github.com/DNKonanov/uni_cli",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.7",
include_package_data=True,
packages=['uniqpy'],
install_requires=[
'numpy',
'scipy'
],
entry_points={
'console_scripts': [
'uniqpy=uniqpy.uni_cli:main'
]
}
)
| 0.953125 | 1 |
py/py_0113_non-bouncy_numbers.py | lcsm29/project-euler | 0 | 12797104 | # Solution of;
# Project Euler Problem 113: Non-bouncy numbers
# https://projecteuler.net/problem=113
#
# Working from left-to-right if no digit is exceeded by the digit to its left
# it is called an increasing number; for example, 134468. Similarly if no
# digit is exceeded by the digit to its right it is called a decreasing
# number; for example, 66420. We shall call a positive integer that is neither
# increasing nor decreasing a "bouncy" number; for example, 155349. As n
# increases, the proportion of bouncy numbers below n increases such that
# there are only 12951 numbers below one-million that are not bouncy and only
# 277032 non-bouncy numbers below 1010. How many numbers below a googol
# (10100) are not bouncy?
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 113
timed.caller(dummy, n, i, prob_id)
| 2.890625 | 3 |
dataspot-bokeh/dataspot/statistics/excel_importer.py | patrickdehoon/dataspot-docker | 3 | 12797112 | <filename>dataspot-bokeh/dataspot/statistics/excel_importer.py<gh_stars>1-10
from openpyxl import load_workbook
class ExcelImporter:
def __init__(self):
self.__relationships = dict()
def set_relationships(self, ws, statistic):
relationships = self.__relationships
relationships[statistic] = dict()
for row in ws.iter_rows(values_only=True):
if row[0] in relationships[statistic]:
for ind, i in enumerate(row):
if ind > 0 and i is not None and i not in relationships[statistic][row[0]]:
relationships[statistic][row[0]].append(i)
else:
relationships[statistic][row[0]] = [i for ind, i in enumerate(row) if ind > 0 and i is not None]
def get_relationships(self):
return self.__relationships
def build(self, path):
wb = load_workbook(path)
for sheet in wb.sheetnames:
ws = wb[sheet]
self.set_relationships(ws=ws, statistic=sheet)
| 2.078125 | 2 |
tests/test_context_manager.py | LouisPi/PiPortableRecorder | 51 | 12797120 | <reponame>LouisPi/PiPortableRecorder
"""tests for Context and ContextManager objects"""
import os
import unittest
from threading import Event
from mock import patch, Mock
try:
from context_manager import ContextManager, Context, ContextError
except ImportError:
print("Absolute imports failed, trying relative imports")
os.sys.path.append(os.path.dirname(os.path.abspath('.')))
# Store original __import__
orig_import = __import__
def import_mock(name, *args, **kwargs):
if name in ['helpers'] and not kwargs:
#Have to filter for kwargs since there's a package in 'json'
#that calls __builtins__.__import__ with keyword arguments
#and we don't want to mock that call
return Mock()
return orig_import(name, *args, **kwargs)
with patch('__builtin__.__import__', side_effect=import_mock):
from context_manager import ContextManager, Context, ContextError
class TestContext(unittest.TestCase):
"""tests context class"""
def test_constructor(self):
"""Tests constructor"""
c = Context("test_context", lambda *a, **k: True)
self.assertIsNotNone(c)
def test_threading(self):
"""Tests whether threaded and non-threaded contexts behave as they should"""
c = Context("test_context", lambda *a, **k: True)
e = Event()
finished = Event()
c.signal_finished = finished.set
c.threaded = False # Need to set this flag, otherwise a validation check fails
c.activate()
assert(not e.isSet())
assert(not finished.isSet())
c.threaded = True
c.set_target(e.set)
c.activate()
finished.wait()
assert(e.isSet())
def test_targetless_threaded_context(self):
"""Tests whether a target-less threaded context fails to activate"""
c = Context("test_context", lambda *a, **k: True)
try:
c.activate()
except ContextError:
pass
else:
raise AssertionError
# After marking context as non-threaded, it should activate OK
c.threaded = False
try:
c.activate()
except:
raise AssertionError
else:
pass
class TestContextManager(unittest.TestCase):
"""tests context manager class and interaction between contexts"""
def test_constructor(self):
"""Tests constructor"""
cm = ContextManager()
self.assertIsNotNone(cm)
def test_initial_contexts(self):
"""Tests whether initial contexts are getting created"""
cm = ContextManager()
cm.init_io(Mock(), Mock()) #Implicitly creates initial contexts
for context_alias, context in cm.contexts.items():
assert(context_alias in cm.initial_contexts)
assert(context)
def test_basic_context_switching(self):
"""Tests whether basic context switching works"""
cm = ContextManager()
cm.initial_contexts = [cm.fallback_context, "test1", "test2"]
cm.init_io(Mock(), Mock())
assert(cm.current_context is None)
cm.switch_to_context(cm.fallback_context)
assert(cm.current_context == cm.fallback_context)
e1 = Event()
e2 = Event()
cm.register_context_target("test1", e1.wait)
cm.register_context_target("test2", e2.wait)
cm.switch_to_context("test1")
assert(cm.current_context == "test1")
assert(cm.get_previous_context("test1") == cm.fallback_context)
cm.switch_to_context("test2")
assert(cm.current_context == "test2")
assert(cm.get_previous_context("test2") == "test1")
cm.switch_to_context("test1")
assert(cm.current_context == "test1")
assert(cm.get_previous_context("test1") == "test2")
#Setting events so that threads exit
e1.set()
e2.set()
def test_context_switching_on_context_finish(self):
"""Tests whether basic context switching works"""
cm = ContextManager()
cm.init_io(Mock(), Mock())
cm.switch_to_context(cm.fallback_context)
e1 = Event()
c = cm.create_context("test1")
cm.register_context_target("test1", e1.wait)
cm.switch_to_context("test1")
assert(cm.current_context == "test1")
finished = Event()
def new_signal_finished():
c.event_cb(c.name, "finished")
finished.set()
with patch.object(c, 'signal_finished', side_effect=new_signal_finished) as p:
e1.set()
#Waiting for the thread to exit
finished.wait()
assert(cm.current_context == cm.fallback_context)
def test_targetless_context_switching(self):
"""Tests that switching to a target-less context fails"""
cm = ContextManager()
cm.init_io(Mock(), Mock())
cm.switch_to_context(cm.fallback_context)
cm.create_context("test1")
assert(cm.current_context == cm.fallback_context)
cm.switch_to_context("test1")
assert(cm.current_context == cm.fallback_context)
def test_failsafe_fallback_on_io_fail(self):
cm = ContextManager()
cm.fallback_context = "m"
cm.initial_contexts = ["m"]
cm.init_io(Mock(), Mock())
cm.switch_to_context(cm.fallback_context)
c1 = cm.create_context("t1")
c2 = cm.create_context("t2")
e1 = Event()
e2 = Event()
cm.register_context_target("t1", e1.wait)
cm.register_context_target("t2", e2.wait)
cm.switch_to_context("t1")
# Fucking things up - since context objects are app-accessible,
# we can't really rely on them staying the same
del c1.i
c1.signal_finished = lambda: True
del c2.i
# Both current and new contexts are fucked up
cm.switch_to_context("t2")
# Setting events so that threads exit
e1.set()
e2.set()
assert(cm.current_context == cm.fallback_context)
def test_failsafe_fallback_on_thread_fail(self):
cm = ContextManager()
cm.fallback_context = "m"
cm.initial_contexts = ["m"]
cm.init_io(Mock(), Mock())
cm.switch_to_context(cm.fallback_context)
c1 = cm.create_context("t1")
c2 = cm.create_context("t2")
e1 = Event()
e2 = Event()
cm.register_context_target("t1", e1.wait)
cm.register_context_target("t2", e2.wait)
cm.switch_to_context("t1")
# Removing
c1.set_target(None)
del c1.thread
c1.signal_finished = lambda: True
c2.set_target(None)
# Again, switcing to the fucked up context
cm.switch_to_context("t2")
# Setting events so that threads exit
e1.set()
e2.set()
assert(cm.current_context == cm.fallback_context)
if __name__ == '__main__':
unittest.main()
""" def test_left_key_exits(self):
r = Refresher(lambda: "Hello", get_mock_input(), get_mock_output(), name=r_name)
r.refresh = lambda *args, **kwargs: None
# This test doesn't actually test whether the Refresher exits
# It only tests whether the in_foreground attribute is set
# Any ideas? Maybe use some kind of "timeout" library?
def scenario():
r.keymap["KEY_LEFT"]()
assert not r.in_foreground
# If the test fails, either the assert will trigger a test failure,
# or the idle loop will just run indefinitely
# The exception thrown should protect from the latter
raise KeyboardInterrupt
with patch.object(r, 'idle_loop', side_effect=scenario) as p:
try:
r.activate()
except KeyboardInterrupt:
pass #Test succeeded
def test_shows_data_on_screen(self):
i = get_mock_input()
o = get_mock_output()
r = Refresher(lambda: "Hello", i, o, name=r_name)
def scenario():
r.refresh()
r.deactivate()
with patch.object(r, 'idle_loop', side_effect=scenario) as p:
r.activate()
#The scenario should only be called once
assert r.idle_loop.called
assert r.idle_loop.call_count == 1
assert o.display_data.called
assert o.display_data.call_count == 2 #One in to_foreground, and one in patched idle_loop
assert o.display_data.call_args_list[0][0] == ("Hello", )
assert o.display_data.call_args_list[1][0] == ("Hello", )
def test_pause_resume(self):
i = get_mock_input()
o = get_mock_output()
r = Refresher(lambda: "Hello", i, o, name=r_name, refresh_interval=0.1)
#refresh_interval is 0.1 so that _counter always stays 0
#and idle_loop always refreshes
#Doing what an activate() would do, but without a loop
r.to_foreground()
assert o.display_data.called
assert o.display_data.call_count == 1 #to_foreground calls refresh()
r.idle_loop()
assert o.display_data.call_count == 2 #not paused
r.pause()
r.idle_loop()
assert o.display_data.call_count == 2 #paused, so count shouldn't change
r.resume()
assert o.display_data.call_count == 3 #resume() refreshes the display
r.idle_loop()
assert o.display_data.call_count == 4 #should be refresh the display normally now
def test_keymap_restore_on_resume(self):
i = get_mock_input()
o = get_mock_output()
r = Refresher(lambda: "Hello", i, o, name=r_name, refresh_interval=0.1)
r.refresh = lambda *args, **kwargs: None
r.to_foreground()
assert i.set_keymap.called
assert i.set_keymap.call_count == 1
assert i.set_keymap.call_args[0][0] == r.keymap
assert "KEY_LEFT" in r.keymap
r.pause()
assert i.set_keymap.call_count == 1 #paused, so count shouldn't change
i.set_keymap(None)
assert i.set_keymap.call_args[0][0] != r.keymap
r.resume()
assert i.set_keymap.call_count == 3 #one explicitly done in the test right beforehand
assert i.set_keymap.call_args[0][0] == r.keymap
def test_set_interval(self):
i = get_mock_input()
o = get_mock_output()
r = Refresher(lambda: "Hello", i, o, name=r_name, refresh_interval=1)
assert(r.refresh_interval == 1)
assert(r.sleep_time == 0.1)
assert(r.iterations_before_refresh == 10)
# Refresh intervals up until 0.1 don't change the sleep time
r.set_refresh_interval(0.1)
assert(r.refresh_interval == 0.1)
assert(r.sleep_time == 0.1)
assert(r.iterations_before_refresh == 1)
# Refresh intervals less than 0.1 change sleep_time to match refresh interval
r.set_refresh_interval(0.01)
assert(r.refresh_interval == 0.01)
assert(r.sleep_time == 0.01)
assert(r.iterations_before_refresh == 1)
# Now setting refresh_interval to a high value
r.set_refresh_interval(10)
assert(r.refresh_interval == 10)
assert(r.sleep_time == 0.1) # Back to normal
assert(r.iterations_before_refresh == 100)
def test_update_keymap(self):
i = get_mock_input()
o = get_mock_output()
r = Refresher(lambda: "Hello", i, o, name=r_name, refresh_interval=0.1)
r.refresh = lambda *args, **kwargs: None
# We need to patch "process_callback" because otherwise the keymap callbacks
# are wrapped and we can't test equivalence
with patch.object(r, 'process_callback', side_effect=lambda keymap:keymap) as p:
keymap1 = {"KEY_LEFT": lambda:1}
r.update_keymap(keymap1)
assert(r.keymap == keymap1)
keymap2 = {"KEY_RIGHT": lambda:2}
r.update_keymap(keymap2)
keymap2.update(keymap1)
assert(r.keymap == keymap2)"""
| 1.710938 | 2 |
swagger_server/models/data_utility.py | DITAS-Project/data-utility-evaluator | 0 | 12797128 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class DataUtility(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, url: str=None, accuracy: float=None, consistency: float=None, completeness: float=None, timeliness: float=None): # noqa: E501
"""DataUtility - a model defined in Swagger
:param url: The url of this DataUtility. # noqa: E501
:type url: str
:param accuracy: The accuracy of this DataUtility. # noqa: E501
:type accuracy: float
:param consistency: The consistency of this DataUtility. # noqa: E501
:type consistency: float
:param completeness: The completeness of this DataUtility. # noqa: E501
:type completeness: float
:param timeliness: The timeliness of this DataUtility. # noqa: E501
:type timeliness: float
"""
self.swagger_types = {
'url': str,
'accuracy': float,
'consistency': float,
'completeness': float,
'timeliness': float
}
self.attribute_map = {
'url': 'URL',
'accuracy': 'accuracy',
'consistency': 'consistency',
'completeness': 'completeness',
'timeliness': 'timeliness'
}
self._url = url
self._accuracy = accuracy
self._consistency = consistency
self._completeness = completeness
self._timeliness = timeliness
@classmethod
def from_dict(cls, dikt) -> 'DataUtility':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The DataUtility of this DataUtility. # noqa: E501
:rtype: DataUtility
"""
return util.deserialize_model(dikt, cls)
@property
def url(self) -> str:
"""Gets the url of this DataUtility.
:return: The url of this DataUtility.
:rtype: str
"""
return self._url
@url.setter
def url(self, url: str):
"""Sets the url of this DataUtility.
:param url: The url of this DataUtility.
:type url: str
"""
if url is None:
raise ValueError("Invalid value for `url`, must not be `None`") # noqa: E501
self._url = url
@property
def accuracy(self) -> float:
"""Gets the accuracy of this DataUtility.
:return: The accuracy of this DataUtility.
:rtype: float
"""
return self._accuracy
@accuracy.setter
def accuracy(self, accuracy: float):
"""Sets the accuracy of this DataUtility.
:param accuracy: The accuracy of this DataUtility.
:type accuracy: float
"""
if accuracy is None:
raise ValueError("Invalid value for `accuracy`, must not be `None`") # noqa: E501
self._accuracy = accuracy
@property
def consistency(self) -> float:
"""Gets the consistency of this DataUtility.
:return: The consistency of this DataUtility.
:rtype: float
"""
return self._consistency
@consistency.setter
def consistency(self, consistency: float):
"""Sets the consistency of this DataUtility.
:param consistency: The consistency of this DataUtility.
:type consistency: float
"""
if consistency is None:
raise ValueError("Invalid value for `consistency`, must not be `None`") # noqa: E501
self._consistency = consistency
@property
def completeness(self) -> float:
"""Gets the completeness of this DataUtility.
:return: The completeness of this DataUtility.
:rtype: float
"""
return self._completeness
@completeness.setter
def completeness(self, completeness: float):
"""Sets the completeness of this DataUtility.
:param completeness: The completeness of this DataUtility.
:type completeness: float
"""
if completeness is None:
raise ValueError("Invalid value for `completeness`, must not be `None`") # noqa: E501
self._completeness = completeness
@property
def timeliness(self) -> float:
"""Gets the timeliness of this DataUtility.
:return: The timeliness of this DataUtility.
:rtype: float
"""
return self._timeliness
@timeliness.setter
def timeliness(self, timeliness: float):
"""Sets the timeliness of this DataUtility.
:param timeliness: The timeliness of this DataUtility.
:type timeliness: float
"""
if timeliness is None:
raise ValueError("Invalid value for `timeliness`, must not be `None`") # noqa: E501
self._timeliness = timeliness
| 1.40625 | 1 |
parentheses/0921_minimum_add_to_make_valid.py | MartinMa28/Algorithms_review | 0 | 12797136 | <reponame>MartinMa28/Algorithms_review
class Solution:
def minAddToMakeValid(self, S: str) -> int:
stack = []
violations = 0
if S == '':
return 0
for idx, ch in enumerate(S):
if ch == '(':
stack.append(idx)
elif ch == ')':
if len(stack) == 0:
violations += 1
else:
stack.pop()
if len(stack) > 0:
violations += len(stack)
return violations | 1.96875 | 2 |
game/views.py | lizheng3401/MetaStudio | 0 | 12797144 | import os
from django.shortcuts import render,get_object_or_404, redirect
from django.http import FileResponse
from .models import GameCategory, Game
from comment.forms import GameCommentForm,SubGCommentForm
from comment.models import SubGComment
from .forms import UploadGameForm
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def portfllio(request):
categories = GameCategory.objects.all().order_by("name")
gameList = []
for cate in categories:
games = Game.objects.filter(category = cate.pk).order_by("-createTime")
temp = (cate,games)
gameList.append(temp)
return render(request, 'home/portfolio.html', context={'gameList': gameList})
def gameInfo(request,pk):
game = get_object_or_404(Game, pk=pk)
form = GameCommentForm()
subForm = SubGCommentForm()
c = game.gamecomment_set.all()
comments = []
for comment in c:
subComment = SubGComment.objects.filter(parentComment=comment.pk).order_by("createTime")
temp = (comment,subComment)
comments.append(temp)
context = {
'game': game,
'form': form,
'subForm': subForm,
'comments': comments,
}
return render(request, 'game/game.html', context=context)
def downloadGame(request, pk):
gameObj = get_object_or_404(Game, pk=pk)
url = BASE_DIR+str(gameObj.game.url).replace('/', '\\')
name = str(gameObj.game)
file = open(url, 'rb')
response = FileResponse(file)
response['Content-Type'] = 'application/octet-stream'
response['Content-Disposition'] = 'attachment;filename="{0}"'.format(name)
gameObj.increase_times()
return response
def uploadGame(request):
categories = GameCategory.objects.all()
if request.method == 'POST':
form = UploadGameForm(request.POST)
gamelication = request.FILES['game']
if form.is_valid():
game = form.save(commit=False)
game.game = gamelication
if 'icon' not in request.POST:
game.icon = request.FILES['icon']
if 'foreImg' not in request.POST:
game.foreImg = request.FILES['foreImg']
game.save()
return redirect('/')
else:
form = UploadGameForm()
return render(request, 'game/upload.html', context={'form': form, 'categories': categories})
def deleteGame(request, pk):
Game.objects.filter(pk=pk).delete()
return redirect("/user/")
def editGame(request, pk):
categories = GameCategory.objects.all()
game = get_object_or_404(Game, pk=pk)
if request.method == 'POST':
content = request.POST
game.name = content['name']
game.version = content['version']
game.category.pk = content['category']
game.inTro = content['inTro']
if 'icon' not in request.POST:
game.icon = request.FILES['icon']
if 'foreImg' not in request.POST:
game.foreImg = request.FILES['foreImg']
if 'game' not in request.POST:
game.game = request.FILES['game']
game.save()
return redirect("/user/")
context = {'categories': categories,'game': game}
return render(request, 'game/edit.html',context=context) | 1.34375 | 1 |
character/models/__init__.py | SamusChief/myth-caster-api | 0 | 12797152 | <gh_stars>0
""" Models for Characters """
from .ancestry import Ancestry, SubAncestry
from .background import Background
from .character import Character, ClassAndLevel, InventoryAdventuringGear, \
InventoryArmor, InventoryTool, InventoryWeapon, InventoryWondrousItem, SkillProficiency
from .character_class import CharacterClass, Archetype, FeaturesAtLevel, \
SpellsKnownAtLevel, SpellSlotsAtLevel
from .feature import Feature
| 0.882813 | 1 |
src/packModules/filewrite.py | PauloHenriqueRCS/InterPy | 0 | 12797160 | <filename>src/packModules/filewrite.py
def filewrite(outcontent,filename):
try:
filecontent = open("outFiles/outcontent.txt", mode="a", encoding="utf-8")
filecontent.write("\n\n\n=========={}==========\n".format(filename))
filecontent.write("\n".join(str(el) for el in outcontent))
except IOError as identifier:
print(str(identifier))
finally:
filecontent.close()
| 1.59375 | 2 |
ebel/validate.py | e-bel/ebel | 1 | 12797168 | <filename>ebel/validate.py
"""Collect of methods used for validating a BEL file."""
import os
import re
import csv
import difflib
import logging
from typing import Iterable, Union, Optional
from textwrap import fill
import numpy as np
import pandas as pd
import ebel.database
from ebel.parser import check_bel_script_line_by_line, check_bel_script, bel_to_json
logger = logging.getLogger(__name__)
def validate_bel_file(bel_script_path: str,
force_new_db: bool = False,
line_by_line: bool = False,
reports: Union[Iterable[str], str] = None,
bel_version: str = '2_1',
tree: bool = False,
sqlalchemy_connection_str: str = None,
json_file: bool = True,
force_json: bool = False,):
"""Validate BEL script for correct syntax following eBNF grammar.
Parameters
----------
bel_script_path: str
Path to BEL file or directory contaiing BEL files.
force_new_db: bool
Delete current database of namespaces/values and generate a new one. Defaults to False.
line_by_line: bool
TODO: Write this.
reports: Iterable[str] or str
List of file paths to write reports to. Multiple formats of the report can be generated at once. Acceptable
formats include: CSV, TSV, TXT, XLS, XLSX, JSON, HTML, MD
bel_version: {'1', '2', '2_1'}
Which BEL grammar version should be used for validating the BEL file. Current available are 1.0, 2.0, and 2.1.
Defaults to the most recent version.
tree: bool
Generates a tree of relationships derived from the BEL file. Defaults to False.
sqlalchemy_connection_str: str
Path to SQLLite database to be used for storing/looking up used namespaces and values. If None given, it uses
the generated e(BE:L) database (default).
json_file: bool
If True, generates a JSON file that can be used for importing BEL relationships into an e(BE:L) generated
OrientDB database. Only creates the JSON file when there are no grammar or syntax errors. Defaults to True.
force_json: bool
If True, will create an importable JSON file even if there are namespace/value errors. Defaults to False.
Returns
-------
dict
Dictionary of file paths and results for each BEL file processed.
Examples
--------
Task: Validate BEL script `my.bel` for BEL syntax 2.0, create error
reports in Markdown and JSON format. In case of no errors create a JSON file
for the import of BEL network into Cytoscape:
> ebel validate my.bel -v 2 -r error_report.md,error_report.json
"""
validation_results = dict()
if bel_script_path.startswith('"') and bel_script_path.endswith('"'):
bel_script_path = bel_script_path[1:-1]
if reports and reports.startswith('"') and reports.endswith('"'):
reports = reports[1:-1]
if line_by_line:
# TODO: This is perhaps not working
result = check_bel_script_line_by_line(bel_script_path,
error_report_file_path=reports,
bel_version=bel_version)
if reports:
logger.info("Wrote report to %s\n" % reports)
else:
logger.info("\n".join([x.to_string() for x in result]) + "\n")
else:
if sqlalchemy_connection_str:
ebel.database.set_connection(sqlalchemy_connection_str)
bel_files = _create_list_bel_files(bel_path=bel_script_path)
validation_results['bel_files_checked'] = bel_files
for bel_file in bel_files:
# Create dict to be filled for individual BEL files.
validation_results[bel_file] = dict()
logger.info(f"Processing {bel_file}")
result = check_bel_script(
bel_script_path=bel_file,
force_new_db=force_new_db,
bel_version=bel_version,
)
if json_file:
if not result['errors'] or force_json:
json_file = _write_odb_json(bel_path=bel_file, results=result, bel_version=bel_version)
validation_results[bel_file]['json'] = json_file
if tree:
if result['errors']:
logger.error("Tree can not be printed because errors still exists\n")
else:
logger.debug(result['tree'])
validation_results[bel_file]['tree'] = result['tree']
if result['warnings'] and reports:
report_paths = _write_report(reports, result, report_type='warnings')
validation_results[bel_file]['reports'] = report_paths
elif result['errors']:
if not reports:
logger.info('\n'.join([x.to_string() for x in result['errors']]) + "\n")
else:
_write_report(reports, result, report_type='errors')
def repair_bel_file(bel_script_path: str, new_file_path: Optional[str] = None):
"""Repair a BEL document.
Parameters
----------
bel_script_path : str
Path to the BEL file.
new_file_path : str (optional)
Export repaired version of file to new path.
"""
# if evidence:
# regular expression for missing continuous line (\ at the end of line)
with open(bel_script_path, "r", encoding="utf-8") as belfile:
content = belfile.read()
new_content = content
for regex_pattern in re.findall(r'\n((SET\s+(DOCUMENT\s+Description|Evidence|SupportingText)'
r'\s*=\s*)"(((?<=\\)"|[^"])+)"\s*\n*)',
content):
if regex_pattern[2].startswith("DOCUMENT"):
new_prefix = "SET DOCUMENT Description = "
else:
new_prefix = "SET Support = "
new_evidence_text = re.sub(r"(\\?[\r\n]+)|\\ ", " ", regex_pattern[3].strip())
new_evidence_text = re.sub(r"\s{2,}", " ", new_evidence_text)
new_evidence_text = re.sub(r'(\\)(\w)', r'\g<2>', new_evidence_text)
new_evidence_text = fill(new_evidence_text, break_long_words=False).replace("\n", " \\\n")
new_evidence = new_prefix + '"' + new_evidence_text + '"\n\n'
new_content = new_content.replace(regex_pattern[0], new_evidence)
if content != new_content:
if new_file_path:
with open(new_file_path + ".diff2repaired", "w") as new_file:
new_file.write('\n'.join(list(difflib.ndiff(content.split("\n"), new_content.split("\n")))))
else:
with open(bel_script_path, "w") as output_file:
output_file.write(new_content)
def _write_odb_json(bel_path: str, results: dict, bel_version: str) -> str:
json_path = bel_path + ".json"
if int(bel_version[0]) > 1:
json_tree = bel_to_json(results['tree'])
open(json_path, "w").write(json_tree)
return json_path
def _create_list_bel_files(bel_path: str) -> list:
"""Export all BEL files in directory as list. If single file is passed, returns a list with that path."""
if os.path.isdir(bel_path):
bel_files = []
for file in os.listdir(bel_path):
if file.endswith(".bel"):
bel_file_path = os.path.join(bel_path, file)
bel_files.append(bel_file_path)
else:
bel_files = [bel_path]
return bel_files
def _write_report(reports: Union[Iterable[str], str], result: dict, report_type: str) -> list:
"""Write report in different types depending on the file name suffix in reports.
Parameters
----------
reports : Iterable[str] or str
List of report formats or comma separated list of report file names.
result : dict
return value of check_bel_script methode.
report_type : str
`report_type` could be 'warnings' or 'errors'.
Returns
-------
list
List of file paths for the reports written.
"""
# TODO: report_type options should be constants
errors_or_warns_as_list_of_dicts = [x.to_dict() for x in result[report_type]]
columns = [report_type[:-1] + "_class", "url", "keyword", "entry", "line_number", "column", "hint"]
df = pd.DataFrame(data=errors_or_warns_as_list_of_dicts, columns=columns)
df.index += 1
if isinstance(reports, str):
reports = reports.split(",")
for report in reports:
if report.endswith('.csv'):
df.to_csv(report)
if report.endswith('.xls'):
df.to_excel(report)
if report.endswith('.xlsx'):
df.to_excel(report, engine='xlsxwriter')
if report.endswith('.tsv'):
df.to_csv(report, sep='\t')
if report.endswith('.json'):
df.to_json(report)
if report.endswith('.txt'):
open(report, "w").write(df.to_string())
if report.endswith('.html'):
df.to_html(report)
if report.endswith('.md'):
cols = df.columns
df2 = pd.DataFrame([['---', ] * len(cols)], columns=cols)
if df.hint.dtype == np.str:
df.hint = df.hint.str.replace(r'\|', '|')
if df.entry.dtype == np.str:
df.entry = df.entry.str.replace(r'\|', '|')
df.url = [("[url](" + str(x) + ")" if not pd.isna(x) else '') for x in df.url]
url_template = "[%s](" + report.split(".bel.")[0] + ".bel?expanded=true&viewer=simple#L%s)"
df.line_number = [url_template % (x, x) for x in df.line_number]
df3 = pd.concat([df2, df])
df3.to_csv(report, sep="|", index=False, quoting=csv.QUOTE_NONE, escapechar="\\")
return reports
| 1.835938 | 2 |
integreat_cms/api/v3/events.py | Integreat/cms-v2 | 21 | 12797176 | <reponame>Integreat/cms-v2
"""
This module includes functions related to the event API endpoint.
"""
from copy import deepcopy
from datetime import timedelta
from django.conf import settings
from django.http import JsonResponse
from django.utils import timezone
from django.utils.html import strip_tags
from ...cms.models.events.event_translation import EventTranslation
from ...cms.utils.slug_utils import generate_unique_slug
from ..decorators import json_response
from .locations import transform_poi
def transform_event(event):
"""
Function to create a JSON from a single event object.
:param event: The event which should be converted
:type event: ~integreat_cms.cms.models.events.event.Event
:return: data necessary for API
:rtype: dict
"""
return {
"id": event.id,
"start_date": event.start_date,
"end_date": event.end_date,
"all_day": event.is_all_day,
"start_time": event.start_time,
"end_time": event.end_time,
"recurrence_id": event.recurrence_rule.id if event.recurrence_rule else None,
"timezone": settings.CURRENT_TIME_ZONE,
}
def transform_event_translation(event_translation):
"""
Function to create a JSON from a single event_translation object.
:param event_translation: The event translation object which should be converted
:type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation
:return: data necessary for API
:rtype: dict
"""
event = event_translation.event
if event.location:
location_translation = (
event.location.get_public_translation(event_translation.language.slug)
or event.location.best_translation
)
else:
location_translation = None
absolute_url = event_translation.get_absolute_url()
return {
"id": event_translation.id,
"url": settings.BASE_URL + absolute_url,
"path": absolute_url,
"title": event_translation.title,
"modified_gmt": event_translation.last_updated.strftime("%Y-%m-%d %H:%M:%S"),
"excerpt": strip_tags(event_translation.content),
"content": event_translation.content,
"available_languages": event_translation.available_languages,
"thumbnail": event.icon.url if event.icon else None,
"location": transform_poi(event.location, location_translation),
"event": transform_event(event),
"hash": None,
}
def transform_event_recurrences(event_translation, today):
"""
Yield all future recurrences of the event.
:param event_translation: The event translation object which should be converted
:type event_translation: ~integreat_cms.cms.models.events.event_translation.EventTranslation
:param today: The first date at which event may be yielded
:type today: ~datetime.date
:return: An iterator over all future recurrences up to ``settings.API_EVENTS_MAX_TIME_SPAN_DAYS``
:rtype: Iterator[:class:`~datetime.date`]
"""
event = event_translation.event
recurrence_rule = event.recurrence_rule
if not recurrence_rule:
return
# In order to avoid unnecessary computations, check if any future event
# may be valid and return early if that is not the case
if (
recurrence_rule.recurrence_end_date
and recurrence_rule.recurrence_end_date < today
):
return
event_length = event.end_date - event.start_date
start_date = event.start_date
event_translation.id = None
# Store language and slug for usage in loop
current_language = event_translation.language
current_slug = event_translation.slug
# Calculate all recurrences of this event
for recurrence_date in recurrence_rule.iter_after(start_date):
if recurrence_date - max(start_date, today) > timedelta(
days=settings.API_EVENTS_MAX_TIME_SPAN_DAYS
):
break
if recurrence_date < today or recurrence_date == start_date:
continue
# Create all temporary translations of this recurrence
recurrence_translations = {}
if event.region.fallback_translations_enabled:
languages = event.region.active_languages
else:
languages = event.public_languages
for language in languages:
# Create copy in memory to make sure original translation is not affected by changes
event_translation = deepcopy(event_translation)
# Fake the requested language
event_translation.language = language
event_translation.slug = generate_unique_slug(
**{
"slug": f"{current_slug}-{recurrence_date}",
"manager": EventTranslation.objects,
"object_instance": event_translation,
"foreign_model": "event",
"region": event.region,
"language": language,
}
)
# Reset id to make sure id does not conflict with existing event translation
event_translation.event.id = None
# Set date to recurrence date
event_translation.event.start_date = recurrence_date
event_translation.event.end_date = recurrence_date + event_length
# Clear cached property in case url with different language was already calculated before
try:
del event_translation.url_prefix
except AttributeError:
pass
recurrence_translations[language.slug] = event_translation
# Set the prefetched public translations to make sure the recurrence translations are correctly listed in available languages
for recurrence_translation in recurrence_translations.values():
recurrence_translation.event.prefetched_public_translations_by_language_slug = (
recurrence_translations
)
# Update translation object with the one with prefetched temporary translations
event_translation = recurrence_translations[current_language.slug]
# Clear cached property in case available languages with different recurrence was already calculated before
try:
del event_translation.available_languages
except AttributeError:
pass
yield transform_event_translation(event_translation)
@json_response
# pylint: disable=unused-argument
def events(request, region_slug, language_slug):
"""
List all events of the region and transform result into JSON
:param request: The current request
:type request: ~django.http.HttpRequest
:param region_slug: The slug of the requested region
:type region_slug: str
:param language_slug: The slug of the requested language
:type language_slug: str
:return: JSON object according to APIv3 events endpoint definition
:rtype: ~django.http.JsonResponse
"""
region = request.region
# Throw a 404 error when the language does not exist or is disabled
region.get_language_or_404(language_slug, only_active=True)
result = []
now = timezone.now().date()
for event in region.events.prefetch_public_translations().filter(archived=False):
event_translation = event.get_public_translation(language_slug)
if event_translation:
if event.end_date >= now:
result.append(transform_event_translation(event_translation))
for future_event in transform_event_recurrences(event_translation, now):
result.append(future_event)
return JsonResponse(
result, safe=False
) # Turn off Safe-Mode to allow serializing arrays
| 1.203125 | 1 |
etoLib/etoLib/Attic/s3_func.py | tonybutzer/eto-draft | 0 | 12797184 | <gh_stars>0
def s3_hello(person_name):
print('Hello There Person:', person_name)
def s3_push_delete_local(local_file, bucket, bucket_filepath):
print('def s3_push_delete_local(local_file, bucket, bucket_filepath):')
| 1.898438 | 2 |
addons/mrp_byproduct/wizard/change_production_qty.py | jjiege/odoo | 0 | 12797192 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models
class ChangeProductionQty(models.TransientModel):
_inherit = 'change.production.qty'
@api.model
def _update_product_to_produce(self, prod, qty, old_qty):
modification = super(ChangeProductionQty, self)._update_product_to_produce(prod, qty, old_qty)
for sub_product_line in prod.bom_id.sub_products:
move = prod.move_finished_ids.filtered(lambda x: x.subproduct_id == sub_product_line and x.state not in ('done', 'cancel'))
if move:
product_uom_factor = prod.product_uom_id._compute_quantity(prod.product_qty - prod.qty_produced, prod.bom_id.product_uom_id)
qty1 = sub_product_line.product_qty
qty1 *= product_uom_factor / prod.bom_id.product_qty
modification[move[0]] = (qty1, move[0].product_uom_qty)
move[0].write({'product_uom_qty': qty1})
else:
move = prod._create_byproduct_move(sub_product_line)
modification[move] = (move.product_uom_qty, 0)
return modification
| 1.507813 | 2 |
03-DataWranglingWithMongoDB/P02-WrangleOpenStreetMapData/handler.py | ccampguilhem/Udacity-DataAnalyst | 1 | 12797200 | <gh_stars>1-10
import xml.sax
from collections import Counter, defaultdict
"""
Custom handler for parsing OpenStreetMap XML files.
While parsing the XML file, handler keeps trace of:
- tags count
- tags ancestors
It is possible to register callback functions for start or end events.
The callbacks for start event will be called passing the following arguments:
- stack
- locator
The callbacks for end event will be called passing the following arguments:
- element name
- element children
- locator
Return value of callbacks is ignored by the handler class.
This enables to enhance the parser with 'on the fly' data quality audit or export.
"""
class OpenStreetMapXmlHandler(xml.sax.ContentHandler):
def __init__(self):
"""
Constructor.
This class is intended to be used as a context manager.
The state of object keeps a trace of stack while parsing. This enables to collect information
from children. The stack is destroyed when end event occured. This enables to limit memory usage
while parsing.
The _stack internal variable stores tuples
- element unique identifier
- element name (as provided by start event)
- element attributes (as provided by start event)
"""
xml.sax.ContentHandler.__init__(self) #super not working here ???
def __enter__(self):
"""
Context manager entry point.
"""
self._id = 0 #unique identifier incremented at
self._stack = [ ] #current stack of element being read
self._element_tags = Counter() #counter of element tags
self._element_ancestors = defaultdict(set) #collection of ancestors per tag
self._start_callbacks = [ ] #start event callbacks
self._end_callbacks = [ ] #end event callbacks
self._children = { } #children elements of elements being read
return self
def __exit__(self, *args):
"""
Context manager exit point.
"""
pass
def startElement(self, name, attrs):
"""
Method invoked when starting to read an element in XML dataset.
This method is part of of xml.sax.ContentHandler interface and is overloaded here.
- name: tag of element being read
- attrs: element attributes
"""
#Get identifier for current element
identifier = self._requestUniqueIdentifier()
#Has element a parent? If yes get the id.
try:
parent_tuple = self._stack[-1]
if parent_tuple[1] == 'osm':
#We ignore osm element as it has too many children
parent = None
else:
parent = parent_tuple[0]
except IndexError:
parent = None
#Exploit current stack to get ancestor
ancestor = ".".join([s[1] for s in self._stack])
self._element_ancestors[name].add(ancestor)
#Update tag counter
self._element_tags[name] += 1
#Update parent children (if any)
if parent is not None:
self._children[parent].append((name, attrs))
#Initialisation of own children
self._children[identifier] = [ ]
#Update stack
self._stack.append((identifier, name, attrs))
#Use registered callbacks
for callback in self._start_callbacks:
callback(self._stack, self._locator)
def endElement(self, name):
"""
Method invoked when ending to read an element in XML dataset.
This method is part of of xml.sax.ContentHandler interface and is overloaded here.
- name: tag of element being read
"""
#Get identifier
identifier = self._stack[-1][0]
#Use registered callbacks before element is cleaned
for callback in self._end_callbacks:
callback(name, self._children[identifier], self._locator)
#Cleaning
identifier, name, attrs = self._stack.pop(-1)
del self._children[identifier]
def getTagsCount(self):
"""
Get a dictionnary with tags count.
- return: dictionnary where keys are tags and values are count
"""
return dict(self._element_tags)
def getTagsAncestors(self):
"""
Get a dictionnary with tags ancestors.
- return: dictionnary where keys are tags and values are a sequence of all different ancestors path
"""
return dict(self._element_ancestors)
def registerStartEventCallback(self, func):
"""
Register a callback for start event.
Note that return value of callback is ignored. Any exception raised by callback is not catched by handler,
so you should take care of catching all exceptions within the callback itself.
- func: a callable object taking stack and locator as arguments.
"""
self._start_callbacks.append(func)
def registerEndEventCallback(self, func):
"""
Register a callback for end event.
Note that return value of callback is ignored. Any exception raised by callback is not catched by handler,
so you should take care of catching all exceptions within the callback itself.
- func: a callable object taking element name, element children and locator as arguments.
"""
self._end_callbacks.append(func)
def clearCallbacks(self):
"""
Remove all registered callbacks.
"""
self._end_callbacks = [ ]
self._start_callbacks = [ ]
def _requestUniqueIdentifier(self):
"""
Return a unique identifier used at parsing time.
- return: identifier
"""
self._id += 1
return self._id | 2.203125 | 2 |