max_stars_repo_path
stringlengths 4
305
| max_stars_repo_name
stringlengths 4
130
| max_stars_count
int64 0
191k
| id
stringlengths 1
8
| content
stringlengths 6
1.02M
| score
float64 -1.16
4.16
| int_score
int64 0
4
|
---|---|---|---|---|---|---|
slash/core/runnable_test_factory.py | omergertel/slash | 0 | 12798008 | from .metadata import Metadata
class RunnableTestFactory(object):
def __init__(self, file_path='', module_name='', factory_name=''):
super(RunnableTestFactory, self).__init__()
self.file_path = file_path
self.module_name = module_name
self.factory_name = factory_name
def generate_tests(self, fixture_store):
"""
Generates :class:`.RunnableTest` instances to run
Do not override this method directly. Use :func:`.RunnableTestFactory._generate_tests` instead.
"""
for address_in_factory, test in self._generate_tests(fixture_store):
assert test.__slash__ is None
test.__slash__ = Metadata(self, test, address_in_factory)
yield test
def _generate_tests(self, fixture_store):
raise NotImplementedError() # pragma: no cover
| 1.515625 | 2 |
src/setting.py | willyii/CarND-Advanced-Lane-Lines | 0 | 12798016 | <reponame>willyii/CarND-Advanced-Lane-Lines<gh_stars>0
CALIBRATION_PATH = "./param/calibration_param.npz"
| -0.275391 | 0 |
flask_occam/converters.py | bprinty/Flask-Occam | 2 | 12798024 |
# imports
# -------
import re
from werkzeug.routing import BaseConverter
from werkzeug.exceptions import NotFound
# helpers
# -------
MODELS = dict()
def class_registry(cls):
"""
Function for dynamically getting class
registry dictionary from specified model.
"""
try:
return dict(cls._sa_registry._class_registry)
except:
return dict(cls._decl_class_registry)
return
def gather_models():
"""
Inspect sqlalchemy models from current context and set global
dictionary to be used in url conversion.
"""
global MODELS
from flask import current_app, has_app_context
if not has_app_context():
return
if 'sqlalchemy' not in current_app.extensions:
return
# inspect current models and add to map
db = current_app.extensions['sqlalchemy'].db
registry = class_registry(db.Model)
for cls in registry.values():
if isinstance(cls, type) and issubclass(cls, db.Model):
# class name
MODELS[cls.__name__] = cls
# lowercase name
MODELS[cls.__name__.lower()] = cls
# snake_case name
words = re.findall(r'([A-Z][0-9a-z]+)', cls.__name__)
if len(words) > 1:
alias = '_'.join(map(lambda x: x.lower(), words))
MODELS[alias] = cls
return
# converters
# ----------
class ModelConverter(BaseConverter):
"""
For url inputs containing a model identifier, look
up the model and return the object.
This method simplifies a lot of the boilerplate needed
to do model look ups in REST apis.
Examples:
.. code-block:: python
@app.route('/users/<id(User):user>')
def get_user(user):
return jsonify(user.json())
In addition, this class can be inherited and used
for other custom parameter url converters. For instance,
here is how you might use it to create a name converter:
.. code-block:: python
class NameConverter(ModelConverter):
__param__ = 'name'
app.url_map.converters['name'] = NameConverter
# ... handlers ...
@app.route('/users/<name(User):user>')
def get_user(user):
return jsonify(user.json())
"""
__param__ = 'id'
def __init__(self, map, model):
self.map = map
self.model = model
return
@property
def models(self):
global MODELS
if not MODELS:
gather_models()
return MODELS
def to_python(self, value):
mapper = self.models
# make sure model exists
if self.model not in mapper:
raise AssertionError(
'Specified model `{}` in url converter '
'not part of application models.'.format(self.model))
# set up class for conversion
cls = mapper[self.model]
# search for the object
model = cls.get(**{self.__param__: value})
if model is None:
raise NotFound
return model
def to_url(self, value):
return super(ModelConverter, self).to_url(getattr(value, self.__param__))
| 1.78125 | 2 |
kgcnn/ops/scatter.py | thegodone/gcnn_keras | 47 | 12798032 | import tensorflow as tf
@tf.function
def tensor_scatter_nd_ops_by_name(segment_name, tensor, indices, updates, name=None):
"""Scatter operation chosen by name that pick tensor_scatter_nd functions.
Args:
segment_name (str): Operation to update scattered updates. Either 'sum' or 'min' etc.
tensor (tf.Tensor): Tensor to scatter updates into.
indices (tf.Tensor): Indices to for updates.
updates (tf.Tensor): Updates of new entries for tensor.
name (str): Name of the tensor.
Returns:
tf.Tensor: Updates scattered into tensor with different update rules.
"""
if segment_name in ["segment_sum", "sum", "reduce_sum", "add"]:
pool = tf.tensor_scatter_nd_add(tensor, indices, updates, name=name)
elif segment_name in ["segment_max", "max", "reduce_max"]:
pool = tf.tensor_scatter_nd_max(tensor, indices, updates, name=name)
elif segment_name in ["segment_min", "min", "reduce_min"]:
pool = tf.tensor_scatter_nd_min(tensor, indices, updates, name=name)
else:
raise TypeError("Unknown pooling, choose: 'mean', 'sum', ...")
return pool
| 2.109375 | 2 |
pybpodapi/com/messaging/debug.py | ckarageorgkaneen/pybpod-api | 1 | 12798040 | # !/usr/bin/python3
# -*- coding: utf-8 -*-
from pybpodapi.com.messaging.base_message import BaseMessage
class DebugMessage(BaseMessage):
""" Information line for things like experiment name , task name, board id, etc. """
MESSAGE_TYPE_ALIAS = "debug"
MESSAGE_COLOR = (200, 200, 200)
| 1.203125 | 1 |
instance_data/problem_printer.py | LuddeWessen/assembly-robot-manager-minizinc | 3 | 12798048 | # MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
from problem_instance import Problem
class ProblemPrinter:
def __init__(self, fixture_order_raw, duration_rnd_seed, **kwargs):
self.p = Problem(fixture_order_raw = fixture_order_raw, **kwargs)
self.fixture_order_raw = fixture_order_raw
self.no_grip = None
self.no_suction = None
self.duration_rnd_seed = duration_rnd_seed
def GetFixtureOrderString(self):
s = ""
current = ""
for i in range(len(self.fixture_order_raw)):
if self.fixture_order_raw[i] == 0:
current = "G"
elif self.fixture_order_raw[i] == 1:
current = "S"
elif self.fixture_order_raw[i] == -1:
current = "_"
else:
current = "ERROR"
s = s + current
return s
def GetNoComp(self):
return self.p.no_grip + self.p.no_suction
def FilePrint(self, file_name_prefix):
"""
Printing File Header
"""
setup_file_name = file_name_prefix + str(self.GetNoComp()) + "_" + self.GetFixtureOrderString() + ".dzn"
file1 = open(setup_file_name,"w")
file1.write("%-----------------------------------------------------------------------------%\n")
file1.write("% Dual Arm Multi Capacity Multi Tool Scheduling / Routing\n")
file1.write("% Assembly Configuration\n")
file1.write("% Auto Generated by python script, authored by <NAME> \n")
file1.write("%-----------------------------------------------------------------------------%\n\n\n")
"""
Printing durations
"""
self.p.RandomizeTaskDurations(self.duration_rnd_seed)
file1.write("task_durations = ")
file1.write(self.p.GetDurationsOfTasksString(str_offset=len("task_durations = ")+1))
file1.write('\n\n\n')
"""
Printing task sets
"""
file1.write("TRAY_TASKS = " + self.p.TrayTasksToString() + ";\n")
file1.write("CAMERA_TASKS = " + self.p.CameraTasksToString() + ";\n")
file1.write("OUTPUT_TASKS = " + self.p.OutputTasksToString() + ";\n")
file1.write('\n\n')
file1.write("empty_gripper_tasks = " + self.p.PressTasksToString() + ";\n")
file1.write('\n\n')
"""
Printing Tool Pick and Place- orders
"""
# TODO: last row does not seem to have press - which it does no, since it is output
# However, we assume it does!
# Fix!
file1.write("gripper_pick_tasks_orders = " + self.p.GetPickTaskOrderString(0) + ";\n\n")
file1.write("suction_pick_tasks_orders = " + self.p.GetPickTaskOrderString(1) + ";\n\n")
file1.write("fixture_task_orders = " + self.p.GetFixtureTaskOrderString() + ";\n\n")
file1.close()
return setup_file_name
| 1.859375 | 2 |
docker_registry_frontend/manifest.py | cschaba/docker-registry-frontend | 21 | 12798056 | <filename>docker_registry_frontend/manifest.py
import abc
import functools
import json
import operator
class DockerRegistryManifest(abc.ABC):
def __init__(self, content):
self._content = content
def get_created_date(self):
raise NotImplementedError
def get_entrypoint(self):
raise NotImplementedError
def get_exposed_ports(self):
raise NotImplementedError
def get_docker_version(self):
raise NotImplementedError
def get_volumes(self):
raise NotImplementedError
class DockerRegistrySchema1Manifest(DockerRegistryManifest):
def __get_sorted_history(self):
history = []
for entry in self._content['history']:
history.append(json.loads(entry['v1Compatibility']))
history.sort(key=lambda x: x['created'], reverse=True)
return history
def __get_first_value(self, *keys):
for entry in self.__get_sorted_history():
try:
return functools.reduce(operator.getitem, keys, entry)
except KeyError:
pass
return None
def get_created_date(self):
return self.__get_first_value('created')
def get_docker_version(self):
return self.__get_first_value('docker_version')
def get_entrypoint(self):
return self.__get_first_value('config', 'Entrypoint')
def get_exposed_ports(self):
return self.__get_first_value('config', 'ExposedPorts')
def get_layer_ids(self):
layer_ids = []
for layer in self._content['fsLayers']:
layer_ids.append(layer['blobSum'])
return set(layer_ids)
def get_volumes(self):
return self.__get_first_value('config', 'Volumes')
def makeManifest(content):
if content['schemaVersion'] == 1:
return DockerRegistrySchema1Manifest(content)
else:
raise ValueError
| 1.578125 | 2 |
vimfiles/bundle/vim-python/submodules/pylint/tests/functional/n/non/non_ascii_name.py | ciskoinch8/vimrc | 463 | 12798064 | """ Tests for non-ascii-name checker. """
áéíóú = 4444 # [non-ascii-name]
def úóíéá(): # [non-ascii-name]
"""yo"""
| 0.972656 | 1 |
docs/conf.py | arthurazs/dotapatch | 12 | 12798072 | <filename>docs/conf.py
#!/usr/bin/env python3
# coding: utf-8
# dotapatch documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 4 11:19:55 2018.
from os.path import abspath
from sys import path
import sphinx_rtd_theme
path.insert(0, abspath('..'))
needs_sphinx = '1.6.5'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = 'dotapatch'
copyright = '2016, <NAME>'
author = '<NAME>'
version = '2.4'
release = '2.4.4'
pygments_style = 'sphinx'
intersphinx_mapping = {'https://docs.python.org/3': None}
htmlhelp_basename = 'dotapatchdoc'
latex_documents = [
(master_doc, 'dotapatch.tex', 'dotapatch Documentation',
'<NAME>', 'manual'),
]
man_pages = [
(master_doc, 'dotapatch', 'dotapatch Documentation',
[author], 1)
]
exclude_patterns = []
language = None
gettext_compact = False
texinfo_documents = [
(master_doc, 'dotapatch', 'dotapatch Documentation',
author, 'dotapatch', 'Parses Dota 2 text patches to html format.',
'Miscellaneous'),
]
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
'logo_only': True,
'display_version': False
}
| 1.171875 | 1 |
Round #585 (Div 2)/C.py | julianferres/Codeforces | 4 | 12798080 | SI = lambda : input()
from collections import Counter
n = int(input())
a = SI()
b = SI()
def solve(n,a,b):
d = Counter(a)+Counter(b)
for i in d:
if(d[i]&1):
print(-1)
return
xa = d[a]//2
newa = []
newb = []
for i in range(n):
if(a[i]!=b[i]):
newa.append((a[i],i))
newb.append((b[i],i))
a,b = newa,newb
aux = len(a)
if(aux==0):
print(0)
return
canta = 0
for i in a:
if(i[0]=='a'):
canta+=1
if(canta&1):
print(len(a)//2+1)
print(a[0][1]+1,a[0][1]+1)
a[0],b[0] = b[0],a[0]
else:
print(len(a)//2)
lastA,lastB = -1,-1
for i in range(aux):
if(a[i][0]=='a'):
if(lastA==-1):
lastA=a[i][1]
else:
print(lastA+1,a[i][1]+1)
lastA=-1
else:
if(lastB==-1):
lastB=a[i][1]
else:
print(lastB+1,a[i][1]+1)
lastB=-1
solve(n,a,b)
| 2.25 | 2 |
help.py | Fxcilities/KEKWBot | 2 | 12798088 | import discord
from discord.ext import commands
from discord.ext import *
from discord.ext.commands import *
import asyncio
class help(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def help(self, ctx):
embed = discord.Embed(
title="KEKW Bot Help",
description="_ _\nThank you for inviting KEKW bot!\nCheck out our other bot, [Essentials](https://essentialsbot.xyz)\n\n[Setup the bot](https://github.com/Fxcilities/KEKWBot/blob/main/README.md)",
color=discord.Color.dark_gold()
)
embed.add_field(name="Main commands:", value="**```kekw!start (amount, defaults to 50)```**\n**```kekw!emojis```**", inline=False)
embed.set_footer(text="Requested by: " + str(ctx.author), icon_url=str(ctx.author.avatar_url))
await ctx.message.delete()
await ctx.send(embed=embed, delete_after=30)
def setup(bot):
bot.add_cog(help(bot))
| 1.835938 | 2 |
abtools/core/models.py | i1bgv/abtools | 3 | 12798096 | <reponame>i1bgv/abtools
# -*- coding: utf-8 -*-
import numpy as np
from .base import Distribution
from .distributions import Bernoulli, Lognormal
class BLModel(Distribution):
def __init__(self, x=None, mu=None, std=None,
alpha=None, n=None, k_b=None, k_l=None):
self.bernoulli = Bernoulli(
x=(x > 0) * 1 if x is not None else None,
alpha=alpha if alpha is not None else None,
beta=n - alpha if n is not None and alpha is not None else None
)
self.lognormal = Lognormal(
x=x[x > 0] if x is not None else None,
mu=mu if mu is not None else None,
std=std if std is not None else None,
n=alpha if alpha is not None else None
)
super(BLModel, self).__init__()
self._set_parents(self.bernoulli, self.lognormal)
def prod(args):
return np.prod(args, axis=0)
self._set_parent_operation(prod, 'Product')
self.bernoulli.k = self._set_k(k_b)
self.lognormal.k = self._set_k(k_l)
def __rshift__(self, dist):
if not isinstance(dist, BLModel):
raise TypeError
new_b_model = self.bernoulli >> dist.bernoulli
new_l_model = self.lognormal >> dist.lognormal
new_bl = BLModel(
mu=new_l_model.mu,
std=new_l_model.std,
alpha=new_b_model.alpha,
n=new_b_model.n
)
return new_bl
def __mul__(self, k):
if not isinstance(k, list):
raise TypeError
self.bernoulli.k = self._set_k(k[0])
self.lognormal.k = self._set_k(k[1])
return self
| 1.984375 | 2 |
src/diamond/collectors/hacheck/test/testhacheck.py | rohangulati/fullerite | 0 | 12798104 | <reponame>rohangulati/fullerite
#!/usr/bin/python
# coding=utf-8
################################################################################
from mock import Mock
from mock import patch
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from urllib2 import HTTPError
from diamond.collector import Collector
from hacheck import HacheckCollector
################################################################################
class TestHacheckCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('HacheckCollector', {})
self.collector = HacheckCollector(config, None)
@patch.object(Collector, 'publish')
@patch('urllib2.urlopen')
def test_works_with_real_data(self, urlopen_mock, publish_mock):
urlopen_mock.return_value = self.getFixture('metrics')
self.collector.collect()
self.assertPublishedMany(
publish_mock,
{
'hacheck.cache.expirations': 2692,
'hacheck.cache.sets': 2713,
'hacheck.cache.gets': 28460,
'hacheck.cache.hits': 25747,
'hacheck.cache.misses': 2713,
'hacheck.outbound_request_queue_size': 12
},
)
@patch.object(Collector, 'publish')
@patch('urllib2.urlopen')
def test_graceful_failure_on_http_error(self, urlopen_mock, publish_mock):
urlopen_mock.side_effect = HTTPError(
Mock(), Mock(), Mock(), Mock(), Mock())
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
@patch.object(Collector, 'publish')
@patch('urllib2.urlopen')
def test_graceful_failure_on_json_error(self, urlopen_mock, publish_mock):
urlopen_mock.return_value = self.getFixture('bad_metrics')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
################################################################################
if __name__ == "__main__":
unittest.main()
| 1.351563 | 1 |
run_files.py | OmarOsman/Arabic_Text_Summarization | 0 | 12798112 | <filename>run_files.py<gh_stars>0
import os, pickle, re
import document
import preprocess
import argparse
import pdb
def get_summary(input_text):
pr = preprocess.Preprocess()
original_text = input_text
preprocessed_text = pr.get_clean_article(input_text)
sentences = pr.get_article_sentences(preprocessed_text)
original_sentences = pr.get_article_sentences(input_text)
paragraphs = pr.get_cleaned_article_paragraphes(preprocessed_text)
para_sent_list = pr.get_para_sentences(paragraphs)
tokenized_word_sentences = pr.get_tokenized_word_sentences(sentences)
doc = document.Doc(
original_text = original_text , original_sentences = original_sentences ,
preprocessed_text = preprocessed_text.replace('ppp',""),
sentences = sentences,
paragraphs = paragraphs ,para_sent_list = para_sent_list ,tokenized_word_sentences = tokenized_word_sentences)
summary = doc.summarize()
return summary
def run():
input_dir = "input"
output_dir = "output"
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input", required=True,help="path to input text document")
#ap.add_argument("-o", "--output", required=True,help="path to output Summarized Document")
args = vars(ap.parse_args())
input_path = os.path.join(input_dir,args['input'])
output_path = os.path.join(output_dir,args['input'])
pr = preprocess.Preprocess()
input_text = pr.get_article_content(input_path)
summary = get_summary(input_text)
#pdb.set_trace()
with open(output_path,'w' ,encoding = "utf-8") as f: f.write(summary)
if __name__ == "__main__":
run()
| 1.578125 | 2 |
rx/core/operators/catch.py | mmpio/RxPY | 4,342 | 12798120 | from typing import Callable, Union
import rx
from rx.core import Observable, typing
from rx.disposable import SingleAssignmentDisposable, SerialDisposable
from rx.internal.utils import is_future
def catch_handler(source: Observable, handler: Callable[[Exception, Observable], Observable]) -> Observable:
def subscribe(observer, scheduler=None):
d1 = SingleAssignmentDisposable()
subscription = SerialDisposable()
subscription.disposable = d1
def on_error(exception):
try:
result = handler(exception, source)
except Exception as ex: # By design. pylint: disable=W0703
observer.on_error(ex)
return
result = rx.from_future(result) if is_future(result) else result
d = SingleAssignmentDisposable()
subscription.disposable = d
d.disposable = result.subscribe(observer, scheduler=scheduler)
d1.disposable = source.subscribe_(
observer.on_next,
on_error,
observer.on_completed,
scheduler
)
return subscription
return Observable(subscribe)
def _catch(handler: Union[Observable, Callable[[Exception, Observable], Observable]]
) -> Callable[[Observable], Observable]:
def catch(source: Observable) -> Observable:
"""Continues an observable sequence that is terminated by an
exception with the next observable sequence.
Examples:
>>> op = catch(ys)
>>> op = catch(lambda ex, src: ys(ex))
Args:
handler: Second observable sequence used to produce
results when an error occurred in the first sequence, or an
exception handler function that returns an observable sequence
given the error and source observable that occurred in the
first sequence.
Returns:
An observable sequence containing the first sequence's
elements, followed by the elements of the handler sequence
in case an exception occurred.
"""
if callable(handler):
return catch_handler(source, handler)
elif isinstance(handler, typing.Observable):
return rx.catch(source, handler)
else:
raise TypeError('catch operator takes whether an Observable or a callable handler as argument.')
return catch
| 2.59375 | 3 |
delivery/pdf_processor/preprocessor/__init__.py | sidmishraw/scp | 2 | 12798128 | # __init__.py
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2017-04-05 20:29:06
# @Last Modified by: <NAME>
# @Last Modified time: 2017-04-05 23:13:28
'''
Houses the core logic used to build the reverse-indices for the words extracted from the PDFs.
The preprocessor module.
'''
# CS 267 specific imports
from preprocessor.build_tables import read_input_files
from preprocessor.build_tables import determine_word_positions
from preprocessor.build_tables import determine_doc_frequency
__all__ = ['read_input_files', 'determine_word_positions', 'determine_doc_frequency']
| 2 | 2 |
franki/docker/compose.py | cr0hn/franki | 1 | 12798136 | import argparse
from typing import List
import yaml
from ..dal.services import ServiceConfig
TAB = " " * 2
def cli_docker_compose(parser: argparse._SubParsersAction):
sub_parser = parser.add_parser("docker-compose",
help="build a docker-compose")
sub_parser.add_argument("-v", "--compose-version",
default="3.7",
help="minimum docker-compose format")
sub_parser.add_argument("PATH", nargs="+")
def build_docker_compose(parsed: argparse.Namespace,
services_config: List[ServiceConfig]) -> str:
#
# NOT USE YAML LIBRARY BECAUSE IT DOESN'T GUARANTEES ORDER ON KEYS
#
data_service = [
f"version: {parsed.compose_version}",
f"services:\n",
]
for serv in services_config:
service = serv.service
# -------------------------------------------------------------------------
# Service config
# -------------------------------------------------------------------------
data_service.extend([
f"{TAB}#{'-' * 40}",
f"{TAB}# Service: '{service.name}'",
f"{TAB}#{'-' * 40}",
f"{TAB}{service.name.lower()}:",
f"{TAB}{TAB}image: {service.name.lower()}:{service.version}"
])
if service.environment:
data_service.append(f"{TAB}{TAB}environment:")
for e in service.environment:
data_service.append(f"{TAB}{TAB}{TAB}- {e}={e}")
if service.port:
data_service.append(f"{TAB}{TAB}ports:")
data_service.append(f"{TAB}{TAB}{TAB}- {service.port}:{service.port}")
if service.command:
data_service.append(f"{TAB}{TAB}command: {service.command}")
if service.entrypoint:
data_service.append(f"{TAB}{TAB}command: {service.entrypoint}")
data_service.append("")
# -------------------------------------------------------------------------
# Dependencies
# -------------------------------------------------------------------------
for dep in service.dependencies:
data_service.append(f"{TAB}{dep.name}:")
data_service.append(f"{TAB}{TAB}image: {dep.image}")
data_service.append(f"{TAB}{TAB}environment:")
for e in dep.environment:
data_service.append(f"{TAB}{TAB}{TAB} - {e}={e}")
# TODO: importar de catálogo
if dep.command:
data_service.append(f"{TAB}{TAB}command: {dep.command}")
# if dep.ports:
# data_service.append(f"{TAB}{TAB}ports: {dep.environment}")
data_service.append("")
data_service.extend([
f"{TAB}#{'-' * 40}",
f"{TAB}# END '{service.name}'",
f"{TAB}#{'-' * 40}"
])
return "\n".join(data_service)
__all__ = ("cli_docker_compose", "build_docker_compose")
| 1.65625 | 2 |
calculate_diff.py | yhc45/IP-Spoof | 0 | 12798144 | <filename>calculate_diff.py<gh_stars>0
#!/usr/bin/python2
from spoof_struct import send_packet
import dpkt, socket, subprocess
from collections import defaultdict
import time
import cPickle as pickle
import itertools
src_ip = '192.168.100.128'
spoof_ip = '192.168.22.21'
src_port = 54024
pcap_name = "filter.pcap"
ipid_map={}
def parse_pcap(file_n):
f = open(file_n)
pcap = dpkt.pcap.Reader(f)
for ts, buf in pcap:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
tcp = ip.data
#src_addr = socket.inet_ntoa(ip.src)
if eth.type == dpkt.ethernet.ETH_TYPE_IP and tcp.dport == src_port: # and tcp.sport == port
ipid_map[socket.inet_ntoa(ip.src)].append(ip.id)
f.close()
return
def parse_candidate(file_name):
f = open(file_name,'rb')
db = pickle.load(f)
f.close()
return db
def main():
parse_ip = parse_candidate("ip_port_record.pickle")
ipid_map = parse_candidate("save.p")
f1 = open("not_found","w+")
f2 = open("diff_port","w+")
for ip,port in parse_ip.items():
if ip not in ipid_map:
f1.write(ip+"\n")
elif port != ipid_map[ip][0]:
f2.write("request to ip: "+ip+ " port: "+str(port)+"\n")
f2.write("respond to ip: "+ip+ " port: "+str(ipid_map[ip][0])+"\n")
f1.close()
f2.close()
f4 = open("cand","w+")
reflector_candidate = {}
for ip,lists in ipid_map.items():
result = [j-i for j, i in zip(lists[3::2],lists[1:-2:2])]
timestamp = [j-i for j, i in zip(lists[4::2],lists[2:-1:2])]
if all(time > 0.8 for time in timestamp) and len(result) <= 29 and len(result) >20 and (sum(result)/len(result))>0 and (sum(result)/len(result)) < 6:
reflector_candidate[ip] = lists[0]
f4.write("respond to ip: "+ip)
f4.close()
f3 = open( "reflector_candidate.pickle", "wb" )
pickle.dump( reflector_candidate, f3)
f3.close()
print(reflector_candidate)
#for i in range(30):
# for ip, port in parse_ip.items():
#send_packet(src_ip,src_port,ip,port,1,1)
# send_packet(spoof_ip,src_port,ip,port,1,1)
# print("ip: "+ip+" id: "+str(port)+"\n")
# exit(1)
# time.sleep(1)
#p.send_signal(subprocess.signal.SIGTERM)
#time.sleep(1)
#parse_pcap(pcap_name)
#f = open("result_measure.txt","w+")
#for ip, id_list in ipid_map.iteritems():
# f.write("ip: "+ip+" id: "+str(id_list)+"\n")
#f.close()
if __name__ == "__main__":
main()
| 1.570313 | 2 |
email_utils/email_verification.py | Aayush-hub/Bulk-Mailer | 0 | 12798152 | <reponame>Aayush-hub/Bulk-Mailer<gh_stars>0
from itsdangerous import URLSafeTimedSerializer, SignatureExpired
from json import load
config = None
with open("import.json", "r") as f:
config = load(f)["jsondata"]
# Token is valid for 1 day
if len(config["email_verification_timeout"]) != 0:
MAX_TIME = int(config["email_verification_timeout"])
else:
raise Exception("Property 'email_verification_timeout' not set in 'import.json' file")
# Salt
if len(config["email_verification_timeout"]) != 0:
VERIFICATION_SALT = config["email_verification_salt"]
else:
raise Exception("Property 'email_verification_salt' not set in 'import.json' file")
# Secret Key
if len(config["email_verification_timeout"]) != 0:
SECRET = config["email_verification_secret"]
else:
raise Exception("Property 'email_verification_secret' not set in 'import.json' file")
def validate_token(token=None):
"""Helps in confirming the Email Address with the help of the token, sent on the registered email address.\n
Keyword Arguments:
token -- Token passed in the user's email
"""
try:
res = URLSafeTimedSerializer(SECRET).loads(token, salt=VERIFICATION_SALT, max_age=MAX_TIME)
except SignatureExpired:
return False
# Token was successfully validated
return True
def generate_token(email=None):
"""
Returns a token for the purpose of email verification.\n
Keyword Arguments
email -- Email address for which the token is to be generated
"""
if not isinstance(email, str) or len(email) == 0:
print("Error: Invalid Email address passed")
return None
token = URLSafeTimedSerializer(SECRET).dumps(email, salt=VERIFICATION_SALT)
## Return token for the email
return token | 2.15625 | 2 |
leetcode/add_binary.py | zhangao0086/Python-Algorithm | 3 | 12798160 | <gh_stars>1-10
#!/usr/bin/python3
# -*-coding:utf-8-*-
__author__ = "Bannings"
class Solution:
def addBinary(self, a: str, b: str) -> str:
carry, ans = 0 , ''
for i in range(max(len(a), len(b))):
carry += ord(a[len(a) - i - 1]) - ord('0') if i < len(a) else 0
carry += ord(b[len(b) - i - 1]) - ord('0') if i < len(b) else 0
ans = chr(carry % 2 + ord('0')) + ans
carry //= 2
return ans if carry == 0 else '1' + ans
if __name__ == '__main__':
assert Solution().addBinary("11", "1") == "100"
assert Solution().addBinary("1010", "1011") == "10101" | 2.359375 | 2 |
examples/spatially-varying-anisotropy/run.py | davidcortesortuno/finmag | 10 | 12798168 | <filename>examples/spatially-varying-anisotropy/run.py<gh_stars>1-10
"""
Demonstrating spatially varying anisotropy. Example with anisotropy vectors as follows:
-----------------------------------
--> --> --> --> --> --> --> --> -->
--> --> --> --> --> --> --> --> -->
--> --> --> --> --> --> --> --> -->
-----------------------------------
^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^
| | | | | | | | | | | |
| | | | | | | | | | | |
-----------------------------------
"""
import os
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import pylab
import dolfin as df
import matplotlib.pyplot as plt
from finmag import Simulation
from finmag.field import Field
from finmag.energies import UniaxialAnisotropy, Exchange
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
def run_simulation(plot=False):
mu0 = 4.0 * np.pi * 10**-7 # vacuum permeability N/A^2
Ms = 1.0e6 # saturation magnetisation A/m
A = 13.0e-12 # exchange coupling strength J/m
Km = 0.5 * mu0 * Ms**2 # magnetostatic energy density scale kg/ms^2
lexch = (A/Km)**0.5 # exchange length m
unit_length = 1e-9
K1 = Km
L = lexch / unit_length
nx = 10
Lx = nx * L
ny = 1
Ly = ny * L
nz = 30
Lz = nz * L
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(Lx, Ly, Lz), nx, ny, nz)
# Anisotropy easy axis is (0, 0, 1) in the lower half of the film and
# (1, 0, 0) in the upper half. This is a toy model of the exchange spring
# systems that <NAME> is working on.
boundary = Lz / 2.0
expr_a = df.Expression(("x[2] <= b ? 0 : 1", "0", "x[2] <= b ? 1 : 0"), b=boundary, degree=1)
V = df.VectorFunctionSpace(mesh, "DG", 0, dim=3)
a = Field(V, expr_a)
sim = Simulation(mesh, Ms, unit_length)
sim.set_m((1, 0, 1))
sim.add(UniaxialAnisotropy(K1, a))
sim.add(Exchange(A))
sim.relax()
if plot:
points = 200
zs = np.linspace(0, Lz, points)
axis_zs = np.zeros((points, 3)) # easy axis probed along z-axis
m_zs = np.zeros((points, 3)) # magnetisation probed along z-axis
for i, z in enumerate(zs):
axis_zs[i] = a((Lx/2.0, Ly/2.0, z))
m_zs[i] = sim.m_field((Lx/2.0, Ly/2.0, z))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(zs, axis_zs[:, 0], "-o", label="a_x")
ax.plot(zs, axis_zs[:, 2], "-x", label="a_z")
ax.plot(zs, m_zs[:, 0], "-", label="m_x")
ax.plot(zs, m_zs[:, 2], "-", label="m_z")
ax.set_xlabel("z (nm)")
ax.legend(loc="upper left")
plt.savefig(os.path.join(MODULE_DIR, "profile.png"))
sim.m_field.save_pvd(os.path.join(MODULE_DIR, 'exchangespring.pvd'))
if __name__ == "__main__":
run_simulation(plot=True)
| 2.125 | 2 |
pagi_api.py | RAIRLab/PAGIapi-python | 0 | 12798176 | <reponame>RAIRLab/PAGIapi-python<gh_stars>0
"""
Python PAGIworld API
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2015, RAIR Lab"
__credits__ = ["<NAME>"]
__license__ = "MIT"
import math
import os
import socket
import time
ERROR_CHECK = True
VALID_COMMANDS = ["sensorRequest", "addForce", "loadTask", "print", "findObj", "setState",
"getActiveStates", "setReflex", "removeReflex", "getActiveReflexes"]
VALID_SENSORS = ["S", "BP", "LP", "RP", "A", "MDN", "MPN"]
for i in range(5):
VALID_SENSORS.append("L%d" % i)
VALID_SENSORS.append("R%d" % i)
for i in range(0, 31):
for j in range(0, 21):
VALID_SENSORS.append("V%d.%d" % (i, j))
for i in range(0, 16):
for j in range(0, 11):
VALID_SENSORS.append("P%d.%d" % (i, j))
VALID_FORCES = ["RHvec", "LHvec", "BMvec", "RHH", "LHH", "RHV", "LHV", "BMH", "BMV", "J", "BR",
"RHG", "LHG", "RHR", "LHR"]
# pylint: disable=too-many-instance-attributes
class PAGIWorld(object):
"""
:type pagi_socket: socket.socket
:type __ip_address: str
:type __port: int
:type __timeout: float
:type __message_fragment: str
:type __task_file: str
:type message_stack: list
"""
def __init__(self, ip_address="", port=42209, timeout=3):
"""
:param ip:
:param port:
:return:
"""
self.pagi_socket = None
self.__ip_address = ip_address
self.__port = port
self.__timeout = timeout
self.__message_fragment = ""
self.__task_file = ""
self.message_stack = list()
self.connect(ip_address, port, timeout)
self.agent = PAGIAgent(self)
def connect(self, ip_address="", port=42209, timeout=3):
"""
Create a socket to the given
:param ip:
:param port:
:return:
:raises: ConnectionRefusedError
"""
if ip_address == "":
ip_address = socket.gethostbyname(socket.gethostname())
self.__ip_address = ip_address
self.__port = port
self.__timeout = timeout
self.__message_fragment = ""
self.__task_file = ""
self.message_stack = list()
self.pagi_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.pagi_socket.connect((ip_address, port))
self.pagi_socket.setblocking(False)
self.pagi_socket.settimeout(timeout)
def disconnect(self):
"""
Close the socket to PAGIWorld and then reset internal variables (in case we just use
connect directly without creating new PAGIWorld instance)
:return:
"""
self.pagi_socket.close()
def __assert_open_socket(self):
"""
Make sure that we have an existing socket connection. If we don't, exception will be raised.
:return:
:raises: RuntimeError
"""
if self.pagi_socket is None:
raise RuntimeError("No open socket. Use connect() to open a new socket connection")
def send_message(self, message):
"""
Send a message to the socket. We make sure that the message is a valid action type, as well
verify that if the message is for a sensor or action, that it's a valid sensor or action
to prevent bad calls.
:param message:
:type message: str
:return:
:raises: RuntimeError
"""
self.__assert_open_socket()
if ERROR_CHECK:
command = message[:message.find(",")]
if command == "" or command not in VALID_COMMANDS:
raise RuntimeError("Invalid command found in the message '%s'" % message)
end = message[len(command)+1:].find(",")
if end == -1:
secondary = message[len(command)+1:]
else:
secondary = message[len(command)+1:end + len(command) + 1]
if command == "sensorRequest" and secondary not in VALID_SENSORS:
raise RuntimeError("Invalid sensor '%s' in message '%s'" % (secondary, message))
elif command == "addForce" and secondary not in VALID_FORCES:
raise RuntimeError("Invalid force '%s' in message '%s'" % (secondary, message))
# all messages must end with \n
if message[-1] != "\n":
message += "\n"
self.pagi_socket.send(message.encode())
def get_message(self, code="", block=False):
"""
Gets messages from the socket. If code is blank, then we just return the first message
from the socket, otherwise return the first matching message with that code, saving all
other messages to a stack. If block is set to False, and there's no response from the
socket, after self.__timeout seconds, function will raise socket.timeout exception. If
block is set to true, no exception will be thrown, but program will stop in this function
if socket doesn't return anything
:param code:
:type code: str
:param block:
:type block: bool
:return:
:raises: socket.timeout
"""
if block:
self.pagi_socket.setblocking(True)
response = self.__get_message_from_stack(code)
while True and response != "":
while "\n" not in self.__message_fragment:
self.__message_fragment += self.pagi_socket.recv(4096).decode()
message_index = self.__message_fragment.find("\n")
if message_index == -1:
break
else:
response = self.__message_fragment[:message_index]
self.__message_fragment = self.__message_fragment[message_index+1:]
if code == "" or (response[:len(code)] == code and response[len(code)] == ","):
break
else:
self.message_stack.append(response)
if block:
self.pagi_socket.setblocking(False)
self.pagi_socket.settimeout(self.__timeout)
return response
def __get_message_from_stack(self, code):
"""
Attempts to return a message from the stack if (1) the stack isn't empty and (2) either
code is blank or it matches something on the message stack
:param code:
:return: str
"""
if len(self.message_stack) > 0:
if code != "":
for index in range(len(self.message_stack)):
if self.message_stack[index][:len(code)] == code and \
self.message_stack[index][len(code)] == ",":
return self.message_stack.pop(0)
return None
else:
return self.message_stack.pop(0)
def load_task(self, task_file):
"""
Loads a task in PAGIworld. We additionally save the task file name so we can reset things
if necessary
:param task_file:
:type task_file: str
:raises: FileNotFoundError
"""
if not os.path.isfile(task_file):
raise RuntimeError("Task file at '%s' was not found" % task_file)
self.__task_file = task_file
self.send_message("loadTask,%s" % task_file)
def reset_task(self):
"""
Resets the task to the one that was loaded in self.load_task. If one wasn't loaded, then
a RuntimeError will be raised.
:raises: RuntimeError
"""
if self.__task_file == "" or self.__task_file is None:
raise RuntimeError("Cannot reset task, no previous task file found")
self.load_task(self.__task_file)
def print_text(self, text):
"""
Print text to the PAGIworld console window.
:param text:
:type text: str
:return:
"""
text = str(text)
self.send_message("print,%s" % text)
self.get_message(code="print")
def set_state(self, name, length):
"""
Set a state within PAGIworld.
:param name:
:type name: str
:param length:
:type length: int
:return:
"""
self.send_message("setState,%s,%d" % (name, length))
self.get_message(code="setState")
def remove_state(self, name):
"""
"Removes" states from PAGIworld by just setting it's duration to zero (so that can't ever
really be in a state)
:param name:
:return:
"""
self.send_message("setState,%s,0" % name)
self.get_message(code="setState")
def get_all_states(self):
"""
Returns a list of all states that are currently in PAGIworld.
:return: list
"""
self.send_message("getActiveStates")
states = self.get_message(code="activeStates").split(",")
return states[1:]
def set_reflex(self, name, conditions, actions=None):
"""
Sets a reflex in PAGIworld to be carried out on conditions.
:param name:
:param conditions:
:param actions:
:return:
"""
if actions is not None:
self.send_message("setReflex,%s,%s,%s" % (name, conditions, actions))
else:
self.send_message("setReflex,%s,%s" % (name, conditions))
self.get_message(code="setReflex")
def remove_reflex(self, name):
"""
Removes a reflex completely from PAGIworld
:param name:
:return:
"""
self.send_message("removeReflex,%s" % name)
self.get_message(code="removeReflex")
def get_all_reflexes(self):
"""
Returns a list of all the active reflexes in PAGIworld
:return: list
"""
self.send_message("getActiveReflexes")
reflexes = self.get_message(code="activeReflexes").split(",")
return reflexes[1:]
def drop_item(self, name, x_coord, y_coord, description=None):
"""
Creates an item and drops into into PAGIworld. These items are the ones pre-built into
PAGIworld.
:param name:
:param x:
:param y:
:param n:
:return:
"""
if description is None or description == "":
self.send_message("dropItem,%s,%f,%f" % (name, x_coord, y_coord))
else:
self.send_message("dropItem,%s,%f,%f,%s" % (name, x_coord, y_coord, description))
self.get_message(code="dropItem")
# pylint: disable=too-many-arguments
def create_item(self, name, image_file, x, y, m, ph, r, e, k, degrees=True):
"""
Creates a new item in PAGIworld with the specified properties
:param name:
:param image_file:
:param x:
:param y:
:param m:
:param ph:
:param r:
:param e:
:param k:
:param degrees:
:return:
"""
if degrees:
r = r * math.pi / 180.
self.send_message("createItem,%s,%s,%f,%f,%f,%d,%f,%f,%d" % (name, image_file,
x, y, m, ph, r, e, k))
self.get_message(code="createItem")
class PAGIAgent(object):
"""
PAGIAgent
:type pagi_world: PAGIWorld
:type left_hand: PAGIAgentHand
:type right_hand: PAGIAgentHand
"""
def __init__(self, pagi_world):
if not isinstance(pagi_world, PAGIWorld):
raise ValueError("You must pass in a valid PagiWorld variable to PagiAgent")
self.pagi_world = pagi_world
self.left_hand = PAGIAgentHand('l', pagi_world)
self.right_hand = PAGIAgentHand('r', pagi_world)
def jump(self):
"""
Causes the agent to try and jump. He will only be able to if his bottom edge is touching
something solid, otherwise he'll do nothing.
:return: bool True if agent has jumped (his bottom is touching something solid) otherwise
False
"""
self.pagi_world.send_message("addForce,J,1000")
response = self.pagi_world.get_message(code="J").split(",")
return int(response[1]) == 1
def reset_agent(self):
"""
Resets agent state back to a starting position (looking upward with hands in starting
position)
:return:
"""
self.reset_rotation()
def reset_rotation(self):
"""
Resets the agent's rotation back to 0 degrees (looking upward)
:return:
"""
self.rotate(0, absolute=True)
def rotate(self, val, degrees=True, absolute=False):
"""
Rotate the agent some number of degrees/radians. If absolute is True, then we rotate to
position specified from 0 (looking up), otherwise rotate him relative to where he's looking.
Therefore, if he's looking down at 180 degrees, and we tell him to rotate 90 degrees, if
absolute is True, he'll be looking to the left at 90 degrees and if absolute is False,
he'll be looking to the right at 270 degrees
0
90 agent 270
180
:param val:
:type val: float
:param degrees:
:type degrees: bool
:param absolute:
:type absolute: bool
:return:
"""
if not degrees:
val = val * 180. / math.pi
if absolute:
val %= 360.
val -= self.get_rotation()
self.pagi_world.send_message("addForce,BR,%f" % val)
self.pagi_world.get_message(code="BR")
def get_rotation(self, degrees=True):
"""
Returns rotation in either degrees (0 - 359) or radians (0 - 2*pi) of agent (0 is looking
upward)
:param degrees:
:type degrees: bool
:return:
"""
self.pagi_world.send_message("sensorRequest,A")
response = self.pagi_world.get_message(code="A").split(",")
rotation = float(response[-1])
rotation %= 360
if degrees:
rotation = rotation * 180 / math.pi
return rotation
def move_paces(self, paces, direction='L'):
"""
Attempts to move the agent some number of paces (defined as one width of his body) to
either the left or right.
:param paces:
:type paces: int
:param direction:
:type direction: str
:return:
"""
assert_left_or_right(direction)
val = 1 if direction[0].upper() == "R" else -1
cnt = 0
while cnt < paces:
self.send_force(x=(val * 1000), absolute=True)
time.sleep(2)
cnt += 1
def send_force(self, x=0, y=0, absolute=False):
"""
Sends a vector force to the agent to move his body. If absolute is False, then vectors are
relative to the direction agent is looking, thus +y is always in direction of top of agent,
-y is bottom, +x is towards his right side, -x is his left side. If absolute is true, then
vector +y is world up, -y is world bottom, +x is world right and -x is world left.
:param x:
:type x: float
:param y:
:type y: float
:param absolute:
:type absolute: bool
:return:
"""
x = float(x)
y = float(y)
if not absolute or (x == 0 and y == 0):
self.pagi_world.send_message("addForce,BMvec,%f,%f" % (x, y))
else:
rotation = self.get_rotation()
if x != 0 and y != 0:
ax = math.fabs(x)
ay = math.fabs(y)
hyp = math.sqrt(ax ** 2 + ay ** 2)
angle = math.acos(ay / hyp)
z = math.sin(angle) * ay
else:
if x != 0:
z = math.fabs(x)
else:
z = math.fabs(y)
nx, ny = PAGIAgent.__get_relative_vector(x, y, z, rotation)
print(nx, ny)
self.pagi_world.send_message("addForce,BMvec,%f,%f" % (nx, ny))
self.pagi_world.get_message(code="BMvec")
@staticmethod
def __get_relative_vector(x, y, z, rotation):
"""
TODO: Finish and simplify
:param x:
:param y:
:param z:
:param rotation:
:return:
"""
if x == 0:
if y < 0:
angle = 180
else:
angle = 0
elif y == 0:
if x > 0:
angle = 270
else:
angle = 90
elif x < 0:
if y > 0:
angle = math.acos(z / y) * 180 / math.pi
else:
angle = math.acos(z / x) * 180 / math.pi + 90
else:
if y < 0:
angle = math.acos(z / y) * 180 / math.pi + 180
else:
angle = math.acos(z / x) * 180 / math.pi + 270
adjusted = rotation - angle
radjusted = adjusted * math.pi / 180
if adjusted == 0:
return 0, z
elif adjusted == 180 or adjusted == -180:
return 0, (-1 * z)
elif adjusted == 90 or adjusted == -270:
return z, 0
elif adjusted == 270 or adjusted == -90:
return (-1 * z), 0
else:
if adjusted > 0:
if adjusted < 90:
ny = math.cos(radjusted) * z
nx = math.sqrt(math.pow(z, 2) - math.pow(ny, 2))
elif adjusted < 180:
nx = math.cos(radjusted - 90) * z
ny = math.sqrt(math.pow(z, 2) - math.pow(nx, 2)) * -1
elif adjusted < 270:
ny = math.cos(radjusted - 180) * z * -1
nx = math.sqrt(math.pow(z, 2) - math.pow(ny, 2)) * -1
else:
nx = math.cos(radjusted - 270) * z * -1
ny = math.sqrt(math.pow(z, 2) - math.pow(nx, 2))
else:
if adjusted < -90:
ny = math.cos(radjusted * -1) * z
nx = math.sqrt(math.pow(z, 2) - math.pow(ny, 2)) * -1
elif adjusted < -180:
nx = math.cos(radjusted * -1 - 90) * z * -1
ny = math.sqrt(math.pow(z, 2) - math.pow(nx, 2)) * -1
elif adjusted < -270:
ny = math.cos(radjusted * -1 - 180) * z * -1
nx = math.sqrt(math.pow(z, 2) - math.pow(ny, 2))
else:
nx = math.cos(radjusted * -1 - 270) * z
ny = math.sqrt(math.pow(z, 2) - math.pow(nx, 2))
return nx, ny
def get_position(self):
"""
Gets x/y coordinates of the agent in the world
:return: tuple(float, float) of coordinates of agent
"""
self.pagi_world.send_message("sensorRequest,BP")
response = self.pagi_world.get_message(code="BP").split(",")
return float(response[1]), float(response[2])
def get_periphal_vision(self):
"""
Returns a list of 11 (rows) x 16 (columns) points which contains all of his periphal vision.
vision[0][0] represents lower left of the vision field with vision[10][15] representing
upper right
:return: list of size 11 x 16
"""
self.pagi_world.send_message("sensorRequest,MPN")
response = self.pagi_world.get_message(code="MPN").split(",")
return self.__process_vision(response, 16)
def get_detailed_vision(self):
"""
Returns a list of ?x? points which contains all of his detailed vision
:return:
"""
self.pagi_world.send_message("sensorRequest,MDN")
response = self.pagi_world.get_message(code="MDN").split(",")
return self.__process_vision(response, 21)
@staticmethod
def __process_vision(response, column_length):
"""
Internal method to process returned vision repsonse. Splits the response into a list of
lists where each inner list is the length of specified column_length.
:param response:
:param column_length:
:return:
"""
vision = list()
current = list()
for j in range(1, len(response)):
if (j - 1) % column_length == 0:
if len(current) > 0:
vision.append(current)
current = list()
current.append(response[j])
vision.append(current)
return vision
def center_hands(self):
"""
Moves both of the agent's hands to the center of his body
:return:
"""
raise NotImplementedError
class PAGIAgentHand(object):
"""
:type pagi_world: PAGIWorld
"""
def __init__(self, hand, pagi_world):
assert_left_or_right(hand)
self.hand = hand[0].upper()
self.pagi_world = pagi_world
def get_position(self):
"""
Gets the position of the hand relative to the agent
:return: tupe(float, float) of the x, y coordinates of the hand
"""
self.pagi_world.send_message("sensorRequest,%sP" % self.hand)
response = self.pagi_world.get_message(code=("%sP" % self.hand)).split(",")
return float(response[1]), float(response[2])
def release(self):
"""
Opens the hand, releasing anything it could be holding
:return:
"""
self.pagi_world.send_message("%sHR" % self.hand)
self.pagi_world.get_message(code="%sHR" % self.hand)
def grab(self):
"""
Closes the hand, grabbing anything it is touching
:return:
"""
self.pagi_world.send_message("%sHG" % self.hand)
self.pagi_world.get_message(code="%sHG" % self.hand)
def send_force(self, x, y, absolute=False):
"""
Sends a vector of force to the hand moving it
:param x:
:type x: float
:param y:
:type y: float
:param absolute:
:type absolute: bool
:return:
"""
if not absolute:
self.pagi_world.send_message("%sHvec,%f,%f" % (self.hand, x, y))
else:
pass
self.pagi_world.get_message(code="%sHvec" % self.hand)
def assert_left_or_right(direction):
"""
Checks that the given direction is either left or right, and if it isn't, raise exception
:param direction:
:return:
"""
if not direction.upper() == 'R' and not direction.upper() == 'L' \
and not direction.upper() == 'RIGHT' and not direction.upper() == 'LEFT':
raise ValueError("You can only use a L or R value for hands")
| 1.476563 | 1 |
lux_ai/lux_gym/reward_spaces.py | mrzhuzhe/Kaggle_Lux_AI_2021 | 44 | 12798184 | from abc import ABC, abstractmethod
import copy
import logging
import numpy as np
from scipy.stats import rankdata
from typing import Dict, NamedTuple, NoReturn, Tuple
from ..lux.game import Game
from ..lux.game_constants import GAME_CONSTANTS
from ..lux.game_objects import Player
def count_city_tiles(game_state: Game) -> np.ndarray:
return np.array([player.city_tile_count for player in game_state.players])
def count_units(game_state: Game) -> np.ndarray:
return np.array([len(player.units) for player in game_state.players])
def count_total_fuel(game_state: Game) -> np.ndarray:
return np.array([
sum([city.fuel for city in player.cities.values()])
for player in game_state.players
])
def count_research_points(game_state: Game) -> np.ndarray:
return np.array([player.research_points for player in game_state.players])
def should_early_stop(game_state: Game) -> bool:
ct_count = count_city_tiles(game_state)
unit_count = count_units(game_state)
ct_pct = ct_count / max(ct_count.sum(), 1)
unit_pct = unit_count / max(unit_count.sum(), 1)
return ((ct_count == 0).any() or
(unit_count == 0).any() or
(ct_pct >= 0.75).any() or
(unit_pct >= 0.75).any())
class RewardSpec(NamedTuple):
reward_min: float
reward_max: float
zero_sum: bool
only_once: bool
# All reward spaces defined below
class BaseRewardSpace(ABC):
"""
A class used for defining a reward space and/or done state for either the full game or a sub-task
"""
def __init__(self, **kwargs):
if kwargs:
logging.warning(f"RewardSpace received unexpected kwargs: {kwargs}")
@staticmethod
@abstractmethod
def get_reward_spec() -> RewardSpec:
pass
@abstractmethod
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
pass
def get_info(self) -> Dict[str, np.ndarray]:
return {}
# Full game reward spaces defined below
class FullGameRewardSpace(BaseRewardSpace):
"""
A class used for defining a reward space for the full game.
"""
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
return self.compute_rewards(game_state, done), done
@abstractmethod
def compute_rewards(self, game_state: Game, done: bool) -> Tuple[float, float]:
pass
class GameResultReward(FullGameRewardSpace):
@staticmethod
def get_reward_spec() -> RewardSpec:
return RewardSpec(
reward_min=-1.,
reward_max=1.,
zero_sum=True,
only_once=True
)
def __init__(self, early_stop: bool = False, **kwargs):
super(GameResultReward, self).__init__(**kwargs)
self.early_stop = early_stop
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
if self.early_stop:
done = done or should_early_stop(game_state)
return self.compute_rewards(game_state, done), done
def compute_rewards(self, game_state: Game, done: bool) -> Tuple[float, float]:
if not done:
return 0., 0.
# reward here is defined as the sum of number of city tiles with unit count as a tie-breaking mechanism
rewards = [int(GameResultReward.compute_player_reward(p)) for p in game_state.players]
rewards = (rankdata(rewards) - 1.) * 2. - 1.
return tuple(rewards)
@staticmethod
def compute_player_reward(player: Player):
ct_count = player.city_tile_count
unit_count = len(player.units)
# max board size is 32 x 32 => 1024 max city tiles and units,
# so this should keep it strictly so we break by city tiles then unit count
return ct_count * 10000 + unit_count
class CityTileReward(FullGameRewardSpace):
@staticmethod
def get_reward_spec() -> RewardSpec:
return RewardSpec(
reward_min=0.,
reward_max=1.,
zero_sum=False,
only_once=False
)
def compute_rewards(self, game_state: Game, done: bool) -> Tuple[float, float]:
return tuple(count_city_tiles(game_state) / 1024.)
class StatefulMultiReward(FullGameRewardSpace):
@staticmethod
def get_reward_spec() -> RewardSpec:
return RewardSpec(
reward_min=-1. / GAME_CONSTANTS["PARAMETERS"]["MAX_DAYS"],
reward_max=1. / GAME_CONSTANTS["PARAMETERS"]["MAX_DAYS"],
zero_sum=False,
only_once=False
)
def __init__(
self,
positive_weight: float = 1.,
negative_weight: float = 1.,
early_stop: bool = False,
**kwargs
):
assert positive_weight > 0.
assert negative_weight > 0.
self.positive_weight = positive_weight
self.negative_weight = negative_weight
self.early_stop = early_stop
self.city_count = np.empty((2,), dtype=float)
self.unit_count = np.empty_like(self.city_count)
self.research_points = np.empty_like(self.city_count)
self.total_fuel = np.empty_like(self.city_count)
self.weights = {
"game_result": 10.,
"city": 1.,
"unit": 0.5,
"research": 0.1,
"fuel": 0.005,
# Penalize workers each step that their cargo remains full
# "full_workers": -0.01,
"full_workers": 0.,
# A reward given each step
"step": 0.,
}
self.weights.update({key: val for key, val in kwargs.items() if key in self.weights.keys()})
for key in copy.copy(kwargs).keys():
if key in self.weights.keys():
del kwargs[key]
super(StatefulMultiReward, self).__init__(**kwargs)
self._reset()
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
if self.early_stop:
done = done or should_early_stop(game_state)
return self.compute_rewards(game_state, done), done
def compute_rewards(self, game_state: Game, done: bool) -> Tuple[float, float]:
new_city_count = count_city_tiles(game_state)
new_unit_count = count_units(game_state)
new_research_points = count_research_points(game_state)
new_total_fuel = count_total_fuel(game_state)
reward_items_dict = {
"city": new_city_count - self.city_count,
"unit": new_unit_count - self.unit_count,
"research": new_research_points - self.research_points,
# Don't penalize losing fuel at night
"fuel": np.maximum(new_total_fuel - self.total_fuel, 0),
"full_workers": np.array([
sum(unit.get_cargo_space_left() > 0 for unit in player.units if unit.is_worker())
for player in game_state.players
]),
"step": np.ones(2, dtype=float)
}
if done:
game_result_reward = [int(GameResultReward.compute_player_reward(p)) for p in game_state.players]
game_result_reward = (rankdata(game_result_reward) - 1.) * 2. - 1.
self._reset()
else:
game_result_reward = np.array([0., 0.])
self.city_count = new_city_count
self.unit_count = new_unit_count
self.research_points = new_research_points
self.total_fuel = new_total_fuel
reward_items_dict["game_result"] = game_result_reward
assert self.weights.keys() == reward_items_dict.keys()
reward = np.stack(
[self.weight_rewards(reward_items_dict[key] * w) for key, w in self.weights.items()],
axis=0
).sum(axis=0)
return tuple(reward / 500. / max(self.positive_weight, self.negative_weight))
def weight_rewards(self, reward: np.ndarray) -> np.ndarray:
reward = np.where(
reward > 0.,
self.positive_weight * reward,
reward
)
reward = np.where(
reward < 0.,
self.negative_weight * reward,
reward
)
return reward
def _reset(self) -> NoReturn:
self.city_count = np.ones_like(self.city_count)
self.unit_count = np.ones_like(self.unit_count)
self.research_points = np.zeros_like(self.research_points)
self.total_fuel = np.zeros_like(self.total_fuel)
class ZeroSumStatefulMultiReward(StatefulMultiReward):
@staticmethod
def get_reward_spec() -> RewardSpec:
return RewardSpec(
reward_min=-1.,
reward_max=1.,
zero_sum=True,
only_once=False
)
def compute_rewards(self, game_state: Game, done: bool) -> Tuple[float, float]:
reward = np.array(super(ZeroSumStatefulMultiReward, self).compute_rewards(game_state, done))
return tuple(reward - reward.mean())
class PunishingExponentialReward(BaseRewardSpace):
@staticmethod
def get_reward_spec() -> RewardSpec:
return RewardSpec(
reward_min=-1. / GAME_CONSTANTS["PARAMETERS"]["MAX_DAYS"],
reward_max=1. / GAME_CONSTANTS["PARAMETERS"]["MAX_DAYS"],
zero_sum=False,
only_once=False
)
def __init__(
self,
**kwargs
):
self.city_count = np.empty((2,), dtype=float)
self.unit_count = np.empty_like(self.city_count)
self.research_points = np.empty_like(self.city_count)
self.total_fuel = np.empty_like(self.city_count)
self.weights = {
"game_result": 0.,
"city": 1.,
"unit": 0.5,
"research": 0.01,
"fuel": 0.001,
}
self.weights.update({key: val for key, val in kwargs.items() if key in self.weights.keys()})
for key in copy.copy(kwargs).keys():
if key in self.weights.keys():
del kwargs[key]
super(PunishingExponentialReward, self).__init__(**kwargs)
self._reset()
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
new_city_count = count_city_tiles(game_state)
new_unit_count = count_units(game_state)
new_research_points = count_research_points(game_state)
new_total_fuel = count_total_fuel(game_state)
city_diff = new_city_count - self.city_count
unit_diff = new_unit_count - self.unit_count
reward_items_dict = {
"city": new_city_count,
"unit": new_unit_count,
"research": new_research_points,
"fuel": new_total_fuel,
}
if done:
game_result_reward = [int(GameResultReward.compute_player_reward(p)) for p in game_state.players]
game_result_reward = (rankdata(game_result_reward) - 1.) * 2. - 1.
self._reset()
else:
game_result_reward = np.array([0., 0.])
self.city_count = new_city_count
self.unit_count = new_unit_count
self.research_points = new_research_points
self.total_fuel = new_total_fuel
reward_items_dict["game_result"] = game_result_reward
assert self.weights.keys() == reward_items_dict.keys()
reward = np.stack(
[reward_items_dict[key] * w for key, w in self.weights.items()],
axis=0
).sum(axis=0)
lost_unit_or_city = (city_diff < 0) | (unit_diff < 0)
reward = np.where(
lost_unit_or_city,
-0.1,
reward / 1_000.
)
return tuple(reward), done or lost_unit_or_city.any()
def compute_rewards(self, game_state: Game, done: bool) -> Tuple[float, float]:
raise NotImplementedError
def _reset(self) -> NoReturn:
self.city_count = np.ones_like(self.city_count)
self.unit_count = np.ones_like(self.unit_count)
self.research_points = np.zeros_like(self.research_points)
self.total_fuel = np.zeros_like(self.total_fuel)
# Subtask reward spaces defined below
# NB: Subtasks that are "different enough" should be defined separately since each subtask gets its own embedding
# See obs_spaces.SUBTASK_ENCODING
# TODO: Somehow include target locations for subtasks?
class Subtask(BaseRewardSpace, ABC):
@staticmethod
def get_reward_spec() -> RewardSpec:
"""
Don't override reward_spec or you risk breaking classes like multi_subtask.MultiSubtask
"""
return RewardSpec(
reward_min=0.,
reward_max=1.,
zero_sum=False,
only_once=True
)
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
goal_reached = self.completed_task(game_state)
return tuple(goal_reached.astype(float)), goal_reached.any() or done
@abstractmethod
def completed_task(self, game_state: Game) -> np.ndarray:
pass
def get_subtask_encoding(self, subtask_encoding: dict) -> int:
return subtask_encoding[type(self)]
class CollectNWood(Subtask):
def __init__(self, n: int = GAME_CONSTANTS["PARAMETERS"]["RESOURCE_CAPACITY"]["WORKER"], **kwargs):
super(CollectNWood, self).__init__(**kwargs)
self.n = n
def completed_task(self, game_state: Game) -> np.ndarray:
return np.array([
sum([unit.cargo.wood for unit in player.units])
for player in game_state.players
]) >= self.n
class CollectNCoal(Subtask):
def __init__(self, n: int = GAME_CONSTANTS["PARAMETERS"]["RESOURCE_CAPACITY"]["WORKER"] // 2, **kwargs):
super(CollectNCoal, self).__init__(**kwargs)
self.n = n
def completed_task(self, game_state: Game) -> np.ndarray:
return np.array([
sum([unit.cargo.coal for unit in player.units])
for player in game_state.players
]) >= self.n
class CollectNUranium(Subtask):
def __init__(self, n: int = GAME_CONSTANTS["PARAMETERS"]["RESOURCE_CAPACITY"]["WORKER"] // 5, **kwargs):
super(CollectNUranium, self).__init__(**kwargs)
self.n = n
def completed_task(self, game_state: Game) -> np.ndarray:
return np.array([
sum([unit.cargo.uranium for unit in player.units])
for player in game_state.players
]) >= self.n
class MakeNCityTiles(Subtask):
def __init__(self, n_city_tiles: int = 2, **kwargs):
super(MakeNCityTiles, self).__init__(**kwargs)
assert n_city_tiles > 1, "Players start with 1 city tile already"
self.n_city_tiles = n_city_tiles
def completed_task(self, game_state: Game) -> np.ndarray:
return count_city_tiles(game_state) >= self.n_city_tiles
class MakeNContiguousCityTiles(MakeNCityTiles):
def completed_task(self, game_state: Game) -> np.ndarray:
return np.array([
# Extra -1 is included to avoid taking max of empty sequence
max([len(city.citytiles) for city in player.cities.values()] + [0])
for player in game_state.players
]) >= self.n_city_tiles
class CollectNTotalFuel(Subtask):
def __init__(self, n_total_fuel: int = GAME_CONSTANTS["PARAMETERS"]["LIGHT_UPKEEP"]["CITY"] *
GAME_CONSTANTS["PARAMETERS"]["NIGHT_LENGTH"], **kwargs):
super(CollectNTotalFuel, self).__init__(**kwargs)
self.n_total_fuel = n_total_fuel
def completed_task(self, game_state: Game) -> np.ndarray:
return count_total_fuel(game_state) >= self.n_total_fuel
class SurviveNNights(Subtask):
def __init__(self, n_nights: int = 1, **kwargs):
super(SurviveNNights, self).__init__(**kwargs)
cycle_len = GAME_CONSTANTS["PARAMETERS"]["DAY_LENGTH"] + GAME_CONSTANTS["PARAMETERS"]["NIGHT_LENGTH"]
self.target_step = n_nights * cycle_len
assert self.target_step <= GAME_CONSTANTS["PARAMETERS"]["MAX_DAYS"]
self.city_count = np.empty((2,), dtype=int)
self.unit_count = np.empty_like(self.city_count)
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
failed_task = self.failed_task(game_state)
completed_task = self.completed_task(game_state)
if failed_task.any():
rewards = np.where(
failed_task,
0.,
0.5 + 0.5 * completed_task.astype(float)
)
else:
rewards = completed_task.astype(float)
done = failed_task.any() or completed_task.any() or done
if done:
self._reset()
return tuple(rewards), done
def completed_task(self, game_state: Game) -> np.ndarray:
return np.array([
game_state.turn >= self.target_step
]).repeat(2)
def failed_task(self, game_state: Game) -> np.ndarray:
new_city_count = count_city_tiles(game_state)
new_unit_count = count_units(game_state)
failed = np.logical_or(
new_city_count < self.city_count,
new_unit_count < self.unit_count
)
self.city_count = new_city_count
self.unit_count = new_unit_count
return failed
def _reset(self) -> NoReturn:
self.city_count = np.ones_like(self.city_count)
self.unit_count = np.ones_like(self.unit_count)
class GetNResearchPoints(Subtask):
def __init__(
self,
n_research_points: int = GAME_CONSTANTS["PARAMETERS"]["RESEARCH_REQUIREMENTS"]["COAL"],
**kwargs
):
super(GetNResearchPoints, self).__init__(**kwargs)
self.n_research_points = n_research_points
def completed_task(self, game_state: Game) -> np.ndarray:
return np.array([player.research_points for player in game_state.players]) >= self.n_research_points
| 2.640625 | 3 |
Bank_loan_project/code.py | NehaBhojani/ga-learner-dsmp-repo | 0 | 12798192 | # --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
bank = pd.read_csv(path)
categorical_var = bank.select_dtypes(include='object')
print(categorical_var)
numerical_var = bank.select_dtypes(include='number')
print(numerical_var)
# code starts here
# code ends here
# --------------
# code starts here
bank.columns
banks = bank.drop('Loan_ID',axis=1)
banks.columns
banks.isnull().sum()
bank_mode = banks.mode(axis=0)
#col = list(banks.columns)
bank_mode.loc[0,:]
banks.isnull().sum()
#for x in banks.columns.values:
# banks[x]=banks[x].fillna(value=bank_mode[x].loc[0])
##banks = banks[col].apply(lambda x: x.fillna(x.mode,inplace=True))
banks.fillna(bank_mode.loc[0,:],inplace=True)
banks.isnull().sum()
#banks.isnull().sum()
#code ends here
# --------------
# Code starts here
banks[['Gender','Married', 'Self_Employed','LoanAmount']]
avg_loan_amount = pd.pivot_table(banks, values='LoanAmount', index=['Gender','Married','Self_Employed'], aggfunc=np.mean)
# code ends here
# --------------
# code starts here
self_emp_y = banks['Self_Employed'] == 'Yes'
loan_status = banks['Loan_Status'] == 'Y'
self_emp_n = banks['Self_Employed'] == 'No'
Loan_Status = 614
loan_approved_se = (self_emp_y & loan_status).value_counts()[1]
loan_approved_nse = (self_emp_n & loan_status).value_counts()[1]
print(loan_approved_se ,' ',loan_approved_nse, Loan_Status)
percentage_se = (loan_approved_se/Loan_Status) * 100
percentage_nse = (loan_approved_nse/Loan_Status) * 100
print("Percent of Loan approval for Self employed people is : ",percentage_se)
print("Percent of Loan approval for people who are not self-employed is: ",percentage_nse)
# code ends here
# --------------
# code starts here
loan_term = banks['Loan_Amount_Term'].apply(lambda x : x/12)
loan_term>=25
big_loan_term = banks[loan_term>=25].shape[0]
# code ends here
# --------------
# code starts here
loan_groupby = banks.groupby('Loan_Status')
loan_groupby = loan_groupby[['ApplicantIncome','Credit_History']]
mean_values = loan_groupby.mean()
print(mean_values)
# code ends here
| 2.234375 | 2 |
getimage.py | junerye/test | 5 | 12798200 |
#!/usr/local/bin/python3
#encoding:utf8
'''
作用:爬取京东商城手机分类下的的所有手机商品的展示图片。
url:为需要爬取的网址
page:页数
'''
import re
import urllib.request
def getimage(url, page):
html = urllib.request.urlopen(url).read();
html = str(html);
pattern1 = '<div id="plist".+? <div class="page clearfix">';
rst1 = re.compile(pattern1).findall(html);
rst1 = rst1[0];
pattern2 = '<img width="220" height="220" .+?//.+?\.jpg';
imagelist = re.compile(pattern2).findall(rst1);
x = 1;
for imageurl in imagelist:
imagename = "Desktop/jd/"+str(page)+"-"+str(x)+".jpg";
pattern3 = '//.+?\.jpg';
imageurl = re.compile(pattern3).findall(imageurl);
imageurl = "http:"+imageurl[0];
try:
urllib.request.urlretrieve(imageurl, filename=imagename);
except urllib.error.URLError as e:
if hasattr(e, 'code'):
x+=1;
if hasattr(e, 'reason'):
x+=1;
x+=1;
for i in range(1, 2):
url = "https://list.jd.com/list.html?cat=9987,653,655&page=" + str(i);
getimage(url, i);
| 1.953125 | 2 |
inferlo/base/graph_model.py | InferLO/inferlo | 1 | 12798208 | <filename>inferlo/base/graph_model.py<gh_stars>1-10
# Copyright (c) 2020, The InferLO authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 - see LICENSE file.
from __future__ import annotations
import abc
import itertools
from typing import TYPE_CHECKING, Iterable, Tuple, Dict, List
import networkx as nx
import numpy as np
from inferlo.base.factors import FunctionFactor
from inferlo.base.variable import Variable
if TYPE_CHECKING:
from inferlo.base import Domain, Factor
class GraphModel(abc.ABC):
"""Abstract class representing any graphical model."""
def __init__(self, num_variables: int, domain: Domain):
"""
:param num_variables: Number of variables in the model.
:param domain: Default domain of each variable.
"""
self.num_variables = num_variables
self._default_domain = domain
self._vars = dict()
def get_variable(self, idx: int) -> Variable:
"""Returns variable by its index."""
if not 0 <= idx < self.num_variables:
raise IndexError(
"index %d is out of bounds for random vector of size %d" % (
idx, self.num_variables))
if idx not in self._vars:
v = Variable(self, idx, self._default_domain)
self._vars[idx] = v
return self._vars[idx]
def get_variables(self) -> List[Variable]:
"""Returns all variables."""
return [self.get_variable(i) for i in range(self.num_variables)]
def __getitem__(self, idx: int) -> Variable:
return self.get_variable(idx)
@abc.abstractmethod
def add_factor(self, factor: Factor):
"""Adds a factor to the model."""
def __imul__(self, other: Factor):
self.add_factor(other)
return self
def __len__(self):
return self.num_variables
@abc.abstractmethod
def infer(self, algorithm='auto', **kwargs):
"""Performs inference."""
@abc.abstractmethod
def max_likelihood(self, algorithm='auto', **kwargs) -> np.ndarray:
"""Finds the most probable state."""
def sample(self, num_samples: int, algorithm='auto',
**kwargs) -> np.ndarray:
"""Generates samples."""
@abc.abstractmethod
def get_factors(self) -> Iterable[Factor]:
"""Returns all factors."""
def get_symbolic_variables(self) -> List[FunctionFactor]:
"""Prepares variables for usage in expressions.
Returns lists of trivial ``FunctionFactor`` s, each of them
representing a factor on one variable with identity function.
They can be used in mathematical expressions, which will result in
another ``FunctionFactor``.
"""
return [FunctionFactor(self, [i], lambda x: x[0]) for i in
range(self.num_variables)]
def get_factor_graph(self) -> Tuple[nx.Graph, Dict[int, str]]:
"""Builds factor graph for the model.
Factor graph is a bipartite graph with variables in one part and
factors in other graph. Edge denotes that factor depends on variable.
"""
factors = list(self.get_factors())
var_labels = [v.name for v in self.get_variables()]
fact_labels = [f.get_name() for f in factors]
labels = var_labels + fact_labels
labels = {i: labels[i] for i in range(len(labels))}
graph = nx.Graph()
graph.add_nodes_from(range(self.num_variables), bipartite=0)
graph.add_nodes_from(
range(self.num_variables, self.num_variables + len(factors)),
bipartite=1)
for factor_id in range(len(factors)):
for var_id in factors[factor_id].var_idx:
graph.add_edge(var_id, self.num_variables + factor_id)
return graph, labels
def draw_factor_graph(self, ax):
"""Draws the factor graph."""
graph, labels = self.get_factor_graph()
top = nx.bipartite.sets(graph)[0]
vc = self.num_variables
fc = len(nx.bipartite.sets(graph)[1])
pos = nx.bipartite_layout(graph, top)
nx.draw_networkx(graph, pos, ax, labels=labels, node_shape='o',
nodelist=list(range(vc)),
node_color='#ffaaaa')
# Draw factors in another color.
nx.draw_networkx(graph, pos, ax, labels=labels,
nodelist=list(range(vc, vc + fc)),
node_shape='s',
edgelist=[],
node_color='lightgreen')
def evaluate(self, x: np.ndarray) -> float:
"""Returns value of non-normalized pdf in point.
In other words, just substitutes values into factors and multiplies
them.
"""
x = np.array(x)
assert x.shape == (self.num_variables,)
result = 1.0
for factor in self.get_factors():
result *= factor.value(x[factor.var_idx])
return result
def part_func_bruteforce(model):
"""Evaluates partition function in very inefficient way."""
part_func = 0
for x in itertools.product(
*(v.domain.values for v in model.get_variables())):
part_func += model.evaluate(np.array(x))
return part_func
def max_likelihood_bruteforce(model):
"""Evaluates most likely state in a very inefficient way."""
best_state = None
best_prob = 0.0
for x in itertools.product(
*(v.domain.values for v in model.get_variables())):
prob = model.evaluate(np.array(x))
if prob >= best_prob:
best_state = x
best_prob = prob
return best_state
def get_max_domain_size(self):
"""Returns the biggest domain size over all variables."""
return max([var.domain.size() for var in self.get_variables()])
| 1.992188 | 2 |
linuxOperation/app/security/views.py | zhouli121018/core | 0 | 12798216 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import copy
# import os
import json
# import ConfigParser
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.template.response import TemplateResponse
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.db.models import Q
from django_redis import get_redis_connection
from django.utils.translation import ugettext_lazy as _
from app.core.models import Mailbox, DomainAttr, Domain
from app.utils.domain_session import get_domainid_bysession, get_session_domain
# from lib.tools import get_process_pid, restart_process, get_fail2ban_info, fail2ban_ip
from lib.licence import licence_required
from lib.tools import clear_redis_cache
from .forms import BanRuleForm, BanBlockListForm, Fail2BanTrustForm, SpamSetForm, \
SendFrequencyForm, PasswordWeakForm, PasswordWeakImportForm
from .models import Fail2Ban, Fail2BanTrust, Fail2BanBlock, PasswordWeakList
def clear_fail2ban_cache():
redis = get_redis_connection()
for keyname in redis.keys("fail2ban_cache*") :
redis.delete(keyname)
clear_redis_cache()
###############################
# 禁用IP列表
@licence_required
def fail2ban_rulelist(request):
if request.method == "POST":
id = request.POST.get('id', "")
status = request.POST.get('status', "")
if status == "delete":
Fail2Ban.objects.filter(pk=id).delete()
clear_fail2ban_cache()
messages.add_message(request, messages.SUCCESS, _(u'删除成功'))
return HttpResponseRedirect(reverse('fail2ban_rulelist'))
return render(request, "security/fail2ban_rulelist.html",context={})
@licence_required
def fail2ban_rulelist_ajax(request):
data = request.GET
order_column = data.get('order[0][column]', '')
order_dir = data.get('order[0][dir]', '')
search = data.get('search[value]', '')
colums = ['id', 'name', 'proto', 'internal','block_fail', 'block_unexists', 'block_minute', 'update_time', 'disabled',]
lists = Fail2Ban.objects.all()
if search:
lists = lists.filter( Q(name__icontains=search) | Q(proto__icontains=search) )
if lists.exists() and order_column and int(order_column) < len(colums):
if order_dir == 'desc':
lists = lists.order_by('-%s' % colums[int(order_column)])
else:
lists = lists.order_by('%s' % colums[int(order_column)])
try:
length = int(data.get('length', 1))
except ValueError:
length = 1
try:
start_num = int(data.get('start', '0'))
page = start_num / length + 1
except ValueError:
start_num = 0
page = 1
count = len(lists)
if start_num >= count:
page = 1
paginator = Paginator(lists, length)
try:
lists = paginator.page(page)
except (EmptyPage, InvalidPage):
lists = paginator.page(paginator.num_pages)
rs = {"sEcho": 0, "iTotalRecords": count, "iTotalDisplayRecords": count, "aaData": []}
re_str = '<td.*?>(.*?)</td>'
number = length * (page-1) + 1
for d in lists.object_list:
t = TemplateResponse(request, 'security/fail2ban_rulelist_ajax.html', {'d': d, 'number': number})
t.render()
rs["aaData"].append(re.findall(re_str, t.content, re.DOTALL))
number += 1
return HttpResponse(json.dumps(rs), content_type="application/json")
@licence_required
def fail2ban_rule_add(request):
form = BanRuleForm()
if request.method == "POST":
form = BanRuleForm(request.POST)
if form.is_valid():
form.save()
clear_fail2ban_cache()
messages.add_message(request, messages.SUCCESS, _(u'添加规则成功'))
return HttpResponseRedirect(reverse('fail2ban_rulelist'))
return render(request, "security/fail2ban_rule_add.html",context={"form":form})
@licence_required
def fail2ban_rule_modify(request, rule_id):
obj = Fail2Ban.objects.get(id=rule_id)
form = BanRuleForm(instance=obj)
if request.method == "POST":
form = BanRuleForm(request.POST, instance=obj)
if form.is_valid():
form.save()
clear_fail2ban_cache()
messages.add_message(request, messages.SUCCESS, _(u'修改规则成功'))
return HttpResponseRedirect(reverse('fail2ban_rulelist'))
return render(request, "security/fail2ban_rule_add.html",context={"form":form})
###############################
# 屏蔽IP
@licence_required
def fail2ban_blocklist(request):
if request.method == "POST":
id = request.POST.get('id', "")
status = request.POST.get('status', "")
if status == "delete":
Fail2BanBlock.objects.filter(pk=id).delete()
clear_fail2ban_cache()
messages.add_message(request, messages.SUCCESS, _(u'删除成功'))
return HttpResponseRedirect(reverse('fail2ban_blocklist'))
return render(request, "security/fail2ban_blocklist.html",context={})
@licence_required
def fail2ban_blocklist_ajax(request):
data = request.GET
order_column = data.get('order[0][column]', '')
order_dir = data.get('order[0][dir]', '')
search = data.get('search[value]', '')
colums = ['id', 'name', 'ip', 'expire_time', 'update_time', 'disabled',]
lists = Fail2BanBlock.objects.all()
if search:
lists = lists.filter( Q(name__icontains=search) | Q(ip__icontains=search) )
if lists.exists() and order_column and int(order_column) < len(colums):
if order_dir == 'desc':
lists = lists.order_by('-%s' % colums[int(order_column)])
else:
lists = lists.order_by('%s' % colums[int(order_column)])
try:
length = int(data.get('length', 1))
except ValueError:
length = 1
try:
start_num = int(data.get('start', '0'))
page = start_num / length + 1
except ValueError:
start_num = 0
page = 1
count = len(lists)
if start_num >= count:
page = 1
paginator = Paginator(lists, length)
try:
lists = paginator.page(page)
except (EmptyPage, InvalidPage):
lists = paginator.page(paginator.num_pages)
rs = {"sEcho": 0, "iTotalRecords": count, "iTotalDisplayRecords": count, "aaData": []}
re_str = '<td.*?>(.*?)</td>'
number = length * (page-1) + 1
for d in lists.object_list:
t = TemplateResponse(request, 'security/fail2ban_blocklist_ajax.html', {'d': d, 'number': number})
t.render()
rs["aaData"].append(re.findall(re_str, t.content, re.DOTALL))
number += 1
return HttpResponse(json.dumps(rs), content_type="application/json")
@licence_required
def fail2ban_block_add(request):
form = BanBlockListForm()
if request.method == "POST":
form = BanBlockListForm(request.POST)
if form.is_valid():
form.save()
clear_fail2ban_cache()
messages.add_message(request, messages.SUCCESS, _(u'添加成功'))
return HttpResponseRedirect(reverse('fail2ban_blocklist'))
return render(request, "security/fail2ban_block_add.html",context={"form":form})
@licence_required
def fail2ban_block_modify(request, block_id):
obj = Fail2BanBlock.objects.get(id=block_id)
form = BanBlockListForm(instance=obj)
if request.method == "POST":
form = BanBlockListForm(request.POST, instance=obj)
if form.is_valid():
form.save()
clear_fail2ban_cache()
messages.add_message(request, messages.SUCCESS, _(u'修改成功'))
return HttpResponseRedirect(reverse('fail2ban_blocklist'))
return render(request, "security/fail2ban_block_add.html",context={"form":form})
###############################
# 屏蔽白名单
@licence_required
def fail2ban_whitelist(request):
if request.method == "POST":
id = request.POST.get('id', "")
status = request.POST.get('status', "")
if status == "delete":
Fail2BanTrust.objects.filter(pk=id).delete()
clear_fail2ban_cache()
messages.add_message(request, messages.SUCCESS, _(u'删除成功'))
return HttpResponseRedirect(reverse('fail2ban_whitelist'))
return render(request, "security/fail2ban_whitelist.html",context={})
@licence_required
def fail2ban_whitelist_add(request):
form = Fail2BanTrustForm()
if request.method == "POST":
form = Fail2BanTrustForm(request.POST)
if form.is_valid():
form.save()
clear_fail2ban_cache()
messages.add_message(request, messages.SUCCESS, _(u'添加成功'))
return HttpResponseRedirect(reverse('fail2ban_whitelist'))
return render(request, "security/fail2ban_whitelist_add.html",context={"form":form})
@licence_required
def fail2ban_whitelist_modify(request, white_id):
obj = Fail2BanTrust.objects.get(id=white_id)
form = Fail2BanTrustForm(instance=obj)
if request.method == "POST":
form = Fail2BanTrustForm(request.POST, instance=obj)
if form.is_valid():
form.save()
clear_fail2ban_cache()
messages.add_message(request, messages.SUCCESS, _(u'修改成功'))
return HttpResponseRedirect(reverse('fail2ban_whitelist'))
return render(request, "security/fail2ban_whitelist_add.html",context={"form":form})
@licence_required
def fail2ban_whitelist_ajax(request):
data = request.GET
order_column = data.get('order[0][column]', '')
order_dir = data.get('order[0][dir]', '')
search = data.get('search[value]', '')
colums = ['id', 'ip', 'name', 'disabled',]
lists = Fail2BanTrust.objects.all()
if search:
lists = lists.filter( Q(name__icontains=search) | Q(ip__icontains=search) )
if lists.exists() and order_column and int(order_column) < len(colums):
if order_dir == 'desc':
lists = lists.order_by('-%s' % colums[int(order_column)])
else:
lists = lists.order_by('%s' % colums[int(order_column)])
try:
length = int(data.get('length', 1))
except ValueError:
length = 1
try:
start_num = int(data.get('start', '0'))
page = start_num / length + 1
except ValueError:
start_num = 0
page = 1
count = len(lists)
if start_num >= count:
page = 1
paginator = Paginator(lists, length)
try:
lists = paginator.page(page)
except (EmptyPage, InvalidPage):
lists = paginator.page(paginator.num_pages)
rs = {"sEcho": 0, "iTotalRecords": count, "iTotalDisplayRecords": count, "aaData": []}
re_str = '<td.*?>(.*?)</td>'
number = length * (page-1) + 1
for d in lists.object_list:
t = TemplateResponse(request, 'security/fail2ban_whitelist_ajax.html', {'d': d, 'number': number})
t.render()
rs["aaData"].append(re.findall(re_str, t.content, re.DOTALL))
number += 1
return HttpResponse(json.dumps(rs), content_type="application/json")
@licence_required
def security_antispam(request):
domain_id = get_domainid_bysession(request)
obj = Domain.objects.filter(id=domain_id).first()
if not obj:
return HttpResponseRedirect(reverse('security_antispam'))
spam_set = DomainAttr.objects.filter(domain_id=obj.id,type="system",item="cf_antispam").first()
form = SpamSetForm(instance=spam_set, request=request, domain_id=obj.id)
if request.method == "POST":
form = SpamSetForm(instance=spam_set, post=request.POST, request=request, domain_id=obj.id)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, _(u'修改设置成功'))
return HttpResponseRedirect(reverse('security_antispam'))
else:
messages.add_message(request, messages.ERROR, _(u'修改设置失败,请检查输入参数'))
return render(request, "security/antispam.html", context={
"form": form,
"domain": obj,
"spam_check_local_spam" : form.spam_check_local_spam.value,
"spam_check_local_virus" : form.spam_check_local_virus.value,
"spam_check_outside_spam" : form.spam_check_outside_spam.value,
"spam_check_outside_virus" : form.spam_check_outside_virus.value,
})
@licence_required
def security_frequency(request):
domain_id = get_domainid_bysession(request)
domain = Domain.objects.filter(id=domain_id).first()
if not domain:
return HttpResponseRedirect(reverse('security_frequency'))
frequency_set = DomainAttr.objects.filter(domain_id=domain.id,type="system",item="cf_sendlimit").first()
form = SendFrequencyForm(instance=frequency_set)
if request.method == "POST":
form = SendFrequencyForm(instance=frequency_set, post=request.POST)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, _(u'修改设置成功'))
return render(request, "security/frequency_setting.html", context={
"form" : form,
"domain" : domain,
})
@licence_required
def password_weaklist(request):
if request.method == "POST":
id = request.POST.get('id', "")
status = request.POST.get('status', "")
if status == "delete":
PasswordWeakList.objects.filter(pk=id).delete()
clear_redis_cache()
messages.add_message(request, messages.SUCCESS, _(u'删除成功'))
return HttpResponseRedirect(reverse('password_weaklist'))
return render(request, "security/password_weak_list.html",context={})
@licence_required
def password_weaklist_ajax(request):
data = request.GET
order_column = data.get('order[0][column]', '')
order_dir = data.get('order[0][dir]', '')
search = data.get('search[value]', '')
colums = ['id', 'password']
if search:
lists = PasswordWeakList.objects.filter( Q(password__contains=search) )
else:
lists = PasswordWeakList.objects.all()
if lists.exists() and order_column and int(order_column) < len(colums):
if order_dir == 'desc':
lists = lists.order_by('-%s' % colums[int(order_column)])
else:
lists = lists.order_by('%s' % colums[int(order_column)])
lists = lists[:10000]
try:
length = int(data.get('length', 1))
except ValueError:
length = 1
try:
start_num = int(data.get('start', '0'))
page = start_num / length + 1
except ValueError:
start_num = 0
page = 1
count = lists.count()
if start_num >= count:
page = 1
paginator = Paginator(lists, length)
try:
lists = paginator.page(page)
except (EmptyPage, InvalidPage):
lists = paginator.page(paginator.num_pages)
rs = {"sEcho": 0, "iTotalRecords": count, "iTotalDisplayRecords": count, "aaData": []}
re_str = '<td.*?>(.*?)</td>'
number = length * (page-1) + 1
for d in lists.object_list:
t = TemplateResponse(request, 'security/password_weak_ajax.html', {'d': d, 'number': number})
t.render()
rs["aaData"].append(re.findall(re_str, t.content, re.DOTALL))
number += 1
return HttpResponse(json.dumps(rs), content_type="application/json")
@licence_required
def password_weaklist_import(request):
form = PasswordWeakImportForm()
domain_id = get_domainid_bysession(request)
domain = get_session_domain(domain_id)
if request.method == "POST":
form = PasswordWeakImportForm(data=request.POST, files=request.FILES)
if form.is_valid():
success, fail = 0, 0
fail_list = []
password_list = []
if form.file_ext == 'txt':
for line in form.file_obj.readlines():
password = line.strip().replace('\n', '').replace('\r', '').replace('\000', '').replace(' ', '').replace('\t', '')
if not password:
continue
password_list.append( password )
if form.file_ext == 'csv':
import csv
lines = list(csv.reader(form.file_obj))
for elem in lines:
password = line.strip().replace('\n', '').replace('\r', '').replace('\000', '').replace(' ', '').replace('\t', '')
if not password:
continue
password_list.append( password )
if form.file_ext in ('xls', 'xlsx'):
import xlrd
content = form.file_obj.read()
workbook = xlrd.open_workbook(filename=None, file_contents=content)
table = workbook.sheets()[0]
for line in xrange(table.nrows):
#前两行跳过
if line in (0,1):
continue
password = table.row_values(line)
password = password.strip().replace('\n', '').replace('\r', '').replace('\000', '').replace(' ', '').replace('\t', '')
if not password:
continue
password_list.append( password )
fail_list = form.save_password_list(password_list)
fail = len(fail_list)
success = len(password_list) - fail
for line in fail_list:
messages.add_message(request, messages.ERROR, _(u'批量添加失败 : %(fail)s') % {"fail": line})
messages.add_message(request, messages.SUCCESS,
_(u'批量添加成功%(success)s个, 失败%(fail)s个') % {"success": success, "fail": fail})
return HttpResponseRedirect(reverse('password_weaklist'))
return render(request, "security/password_weak_import.html", {'form': form,}) | 1.1875 | 1 |
datatrans/fooddata/search/request.py | KooCook/datatrans | 1 | 12798224 | <reponame>KooCook/datatrans
"""
References:
https://fdc.nal.usda.gov/api-guide.html#food-search-endpoint
"""
from typing import Dict, Union
from datatrans import utils
from datatrans.utils.classes import JSONEnum as Enum
__all__ = ['FoodDataType', 'SortField', 'SortDirection', 'FoodSearchCriteria']
class FoodDataType(Enum):
FOUNDATION = 'Foundation'
SURVEY = 'Survey (FNDDS)'
BRANDED = 'Branded'
LEGACY = 'SR Legacy'
class SortField(Enum):
DESCRIPTION = 'lowercaseDescription.keyword'
DATATYPE = 'dataType.keyword'
PUBDATE = 'publishedDate'
ID = 'fdcId'
class SortDirection(Enum):
ASC = 'asc'
DESC = 'desc'
def verify_included_data_types(d: Dict[Union[FoodDataType, str], bool]):
d = {FoodDataType(k): v for k, v in d.items()}
return {
FoodDataType.FOUNDATION.value: d.pop(FoodDataType.FOUNDATION, False),
FoodDataType.SURVEY.value: d.pop(FoodDataType.SURVEY, False),
FoodDataType.BRANDED.value: d.pop(FoodDataType.BRANDED, False),
FoodDataType.LEGACY.value: d.pop(FoodDataType.LEGACY, False),
}
class FoodSearchCriteria(utils.DataClass):
"""Represents a FoodData Central search criteria.
Attributes:
general_search_input (str): Search query (general text)
included_data_types (Dict[str, bool]): Specific data types to include in search
ingredients: The list of ingredients (as it appears on the product label)
brand_owner (str): Brand owner for the food
require_all_words (bool): When True, the search will only return foods
contain all of the words that were entered in the search field
page_number (int): The page of results to return
sort_field (SortField): The name of the field by which to sort
sort_direction (SortDirection): The direction of the sorting
"""
__slots__ = (
'general_search_input', 'included_data_types', 'ingredients', 'brand_owner', 'require_all_words', 'page_number',
'sort_field', 'sort_direction')
__attr__ = (
('general_search_input', str),
('included_data_types', dict,
verify_included_data_types),
('ingredients', str),
('brand_owner', str),
('require_all_words', bool),
('page_number', int),
('sort_field', SortField),
('sort_direction', SortDirection),
)
def __init__(self, _dict_: dict = None, **kwargs):
if _dict_ is not None:
super().__init__(_dict_=_dict_)
return
for k, v in kwargs.items():
if k in self.__slots__:
kwargs[utils.snake_to_camel(k)] = kwargs.pop(k)
super().__init__(_dict_=kwargs)
| 1.648438 | 2 |
botforces/utils/discord_common.py | coniferousdyer/Botforces | 0 | 12798232 | """
Contains functions related to Discord-specific features, such as embeds.
"""
import discord
import datetime
import time
from botforces.utils.constants import (
NUMBER_OF_ACS,
USER_WEBSITE_URL,
PROBLEM_WEBSITE_URL,
)
from botforces.utils.services import enclose_tags_in_spoilers
"""
User embeds.
"""
async def create_user_embed(user, author, color):
"""
Creates an embed with user information.
"""
Embed = discord.Embed(
title=user["handle"],
url=f"{USER_WEBSITE_URL}{user['handle']}",
color=color,
)
Embed.set_thumbnail(url=user["avatar"])
if "firstName" in user and "lastName" in user:
Embed.add_field(
name="Name",
value=f"{user['firstName']} {user['lastName']}",
inline=False,
)
if "city" in user and "country" in user:
Embed.add_field(
name="City",
value=f"{user['city']}, {user['country']}",
inline=False,
)
if "rank" in user:
Embed.add_field(
name="Rank",
value=user["rank"].title(),
inline=False,
)
else:
Embed.add_field(name="Rank", value="Unranked", inline=False)
if "rating" in user:
Embed.add_field(
name="Rating",
value=user["rating"],
inline=False,
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
"""
Problem embeds.
"""
async def create_problem_embed(problem, author):
"""
Creates an embed with problem information.
"""
Embed = discord.Embed(
title=f"{problem['contestId']}{problem['contestIndex']}. {problem['name']}",
url=f"{PROBLEM_WEBSITE_URL}{problem['contestId']}/{problem['contestIndex']}",
color=0xFF0000,
)
Embed.add_field(name="Rating", value=problem[4], inline=False)
# Printing the tags in spoilers
if problem["tags"] != "[]":
tags = await enclose_tags_in_spoilers(problem["tags"])
Embed.add_field(name="Tags", value=tags)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
"""
Upcoming contests embeds.
"""
async def create_contest_embed(contestList, author):
"""
Creates an embed with contest information.
"""
Embed = discord.Embed(title="List of upcoming contests", color=0xFF0000)
# Adding each contest as a field to the embed
for contest in contestList:
# Obtaining the start time of the contest
date = datetime.datetime.fromtimestamp(contest["startTimeSeconds"])
dateString = date.strftime("%b %d, %Y, %H:%M")
# Obtaining contest duration
duration = datetime.timedelta(seconds=contest["durationSeconds"])
hours = duration.seconds // 3600
minutes = (duration.seconds // 60) % 60
Embed.add_field(
name=contest["name"],
value=f"{contest['id']} - {dateString} {time.tzname[0]} - {hours} hrs, {minutes} mins",
inline=False,
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
"""
Stalk embeds.
"""
async def create_submissions_embed(submissions, count, handle, author):
"""
Creates an embed with information about a user's last n solved problems.
"""
Embed = discord.Embed(
title=f"Last {count} solved by {handle}",
description=submissions,
color=0xFF0000,
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
"""
Graph embeds.
"""
async def create_rating_plot_embed(handle, author):
"""
Creates an embed with the rating plot of a user.
"""
Embed = discord.Embed(
title=f"{handle}'s solved problems",
description="Note: ? refers to problems that do not have a rating on Codeforces.",
color=0xFF0000,
)
Embed.set_image(url="attachment://figure.png")
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_index_plot_embed(handle, author):
"""
Creates an embed with the index plot of a user.
"""
Embed = discord.Embed(title=f"{handle}'s solved problems", color=0xFF0000)
Embed.set_image(url="attachment://figure.png")
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_tags_plot_embed(handle, author):
"""
Creates an embed with the tags plot of a user.
"""
Embed = discord.Embed(title=f"{handle}'s solved problems", color=0xFF0000)
Embed.set_image(url="attachment://figure.png")
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
"""
Help embeds.
"""
async def create_general_help_embed(author):
"""
Displays an embed with instructions on how to use all commands.
"""
Embed = discord.Embed(
title="Help Menu",
description="Type `-help command` to learn about a specific command.",
color=0xFF0000,
)
Embed.add_field(
name="user", value="Displays information about a user.", inline=False
)
Embed.add_field(
name="stalk",
value="Displays the last n problems solved by a user.",
inline=False,
)
Embed.add_field(name="problem", value="Displays a random problem.", inline=False)
Embed.add_field(
name="upcoming",
value="Displays the list of upcoming Codeforces contests.",
inline=False,
)
Embed.add_field(
name="duel",
value="Challenges another user to a duel over a problem.",
inline=False,
)
Embed.add_field(
name="plotrating",
value="Plots the problems done by a user, grouped by rating.",
inline=False,
)
Embed.add_field(
name="plotindex",
value="Plots the problems done by a user, grouped by contest index.",
inline=False,
)
Embed.add_field(
name="plottags",
value="Plots the problems done by a user, grouped by tags.",
inline=False,
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_user_help_embed(author):
"""
Displays an embed with instructions on how to use the user command.
"""
Embed = discord.Embed(
title="user", description="Displays information about a user.", color=0xFF0000
)
Embed.add_field(name="Syntax", value="`-user <codeforces_handle>`", inline=False)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_stalk_help_embed(author):
"""
Displays an embed with instructions on how to use the stalk command.
"""
Embed = discord.Embed(
title="stalk",
description=f"Displays the last n problems solved by a user ({NUMBER_OF_ACS} by default).",
color=0xFF0000,
)
Embed.add_field(
name="Syntax",
value=f"`-stalk <codeforces_handle>` - Displays last {NUMBER_OF_ACS} submissions of the user\n`-stalk <codeforces_handle> <n>` - Displays last n submissions of the user",
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_problem_help_embed(author):
"""
Displays an embed with instructions on how to use the problem command.
"""
Embed = discord.Embed(
title="problem",
description="Displays a random problem of optional rating and/or tags.",
color=0xFF0000,
)
Embed.add_field(
name="Syntax",
value='`-problem` - Displays a random problem.\n`-problem <rating>` - Displays a random problem of that rating.\n`-problem <list_of_tags>` - Displays a random problem of those tags (multiple tags are allowed).\n`-problem <rating> <list_of_tags>` - Displays a random problem of those tags and rating (order does not matter).\n\nNote: For tags like "binary search", enclose the tag in double quotes.',
inline=False,
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_upcoming_help_embed(author):
"""
Displays an embed with instructions on how to use the upcoming command.
"""
Embed = discord.Embed(
title="upcoming",
description="Displays information about upcoming contests.",
color=0xFF0000,
)
Embed.add_field(name="Syntax", value="`-upcoming`", inline=False)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_duel_help_embed(author):
"""
Displays an embed with instructions on how to use the duel command.
"""
Embed = discord.Embed(
title="duel",
description="Challenges another user to a duel over a problem.",
color=0xFF0000,
)
Embed.add_field(
name="Syntax",
value="`-duel @<discord_user> <optional_rating> <optional_tags>` - To challenge a user\n`-endduel` - To end a duel and decide the result (only if a duel is in progress).",
inline=False,
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_plotrating_help_embed(author):
"""
Displays an embed with instructions on how to use the plotrating command.
"""
Embed = discord.Embed(
title="plotrating",
description="Plots the problems done by a user, grouped by rating.",
color=0xFF0000,
)
Embed.add_field(
name="Syntax", value="`-plotrating <codeforces_handle>`", inline=False
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_plotindex_help_embed(author):
"""
Displays an embed with instructions on how to use the plotindex command.
"""
Embed = discord.Embed(
title="plotindex",
description="Plots the problems done by a user, grouped by contest index.",
color=0xFF0000,
)
Embed.add_field(
name="Syntax", value="`-plotindex <codeforces_handle>`", inline=False
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_plottags_help_embed(author):
"""
Displays an embed with instructions on how to use the plottags command.
"""
Embed = discord.Embed(
title="plottags",
description="Plots the problems done by a user, grouped by tags.",
color=0xFF0000,
)
Embed.add_field(
name="Syntax", value="`-plottags <codeforces_handle>`", inline=False
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
"""
Duel embeds.
"""
async def create_duel_begin_embed(problem, author, opponent):
"""
Displays an embed with information about the duel.
"""
Embed = discord.Embed(
title=f"{problem['contestId']}{problem['contestIndex']}. {problem['name']}",
url=f"{PROBLEM_WEBSITE_URL}{problem['contestId']}/{problem['contestIndex']}",
description="The duel starts now!",
color=0xFF0000,
)
Embed.add_field(name="Rating", value=problem["rating"], inline=False)
# Printing the tags in spoilers
if problem["tags"] != "[]":
tags = await enclose_tags_in_spoilers(problem["tags"])
Embed.add_field(name="Tags", value=tags)
Embed.add_field(
name="Duel",
value=f"{author.display_name} vs {opponent.display_name}",
inline=False,
)
return Embed
async def create_duels_embed(duels):
"""
Displays an embed with information about all ongoing duels.
"""
Embed = discord.Embed(
title="Ongoing duels",
color=0xFF0000,
)
# Adding fields to embed
for duel in duels:
date = datetime.datetime.strptime(
duel["startTime"], "%Y-%m-%d %H:%M:%S.%f"
).strftime("%b %d, %Y %H:%M:%S")
Embed.add_field(
name=f"{duel['handle_1']} vs {duel['handle_2']}",
value=f"Problem: {PROBLEM_WEBSITE_URL}{duel['contestId']}/{duel['contestIndex']}\nStart Time: {date} {time.tzname[0]}",
inline=False,
)
return Embed
| 1.78125 | 2 |
danlp/datasets/ddisco.py | alexandrainst/DaNLP | 1 | 12798240 | <gh_stars>1-10
import os
import pandas as pd
from danlp.download import DEFAULT_CACHE_DIR, download_dataset, _unzip_process_func, DATASETS
class DDisco:
"""
Class for loading the DDisco dataset.
The DDisco dataset is annotated for discourse coherence.
It contains user-generated texts from Reddit and Wikipedia.
Annotation labels are:
* 1: low coherence
* 2: medium coherence
* 3: high coherence
:param str cache_dir: the directory for storing cached models
:param bool verbose: `True` to increase verbosity
"""
def __init__(self, cache_dir: str = DEFAULT_CACHE_DIR):
self.dataset_name = 'ddisco'
self.file_extension = DATASETS[self.dataset_name]['file_extension']
self.dataset_dir = download_dataset(self.dataset_name, process_func=_unzip_process_func, cache_dir=cache_dir)
def load_with_pandas(self):
"""
Loads the DDisco dataset in dataframes with pandas.
:return: 2 dataframes -- train, test
"""
df_train = pd.read_csv(os.path.join(self.dataset_dir, self.dataset_name + '.train' + self.file_extension), sep='\t', index_col=0, encoding='utf-8').dropna()
df_test = pd.read_csv(os.path.join(self.dataset_dir, self.dataset_name + '.test' + self.file_extension), sep='\t', index_col=0, encoding='utf-8').dropna()
return df_train, df_test
| 1.4375 | 1 |
Hackerearth Set/TheOldMonk.py | Siddharth2016/PYTHON3_prog | 2 | 12798248 | <gh_stars>1-10
# THE OLD MONK
for _ in range(int(input())):
N = int(input())
A = [int(a) for a in input().split()]
B = [int(a) for a in input().split()]
res = 0
mx = 0
for i in range(N):
for j in range(i,N,1):
if A[i]>B[j]:
break
res = j-i
if res>mx:
mx = res
print(mx)
| 2.203125 | 2 |
ch16/app.py | rauhaanrizvi/code | 10 | 12798256 | from pyreact import setTitle, useEffect, useState, render, createElement as el
def App():
newTask, setNewTask = useState("")
editTask, setEditTask = useState(None)
taskList, setTaskList = useState([])
taskCount, setTaskCount = useState(0)
taskFilter, setTaskFilter = useState("all")
def handleSubmit(event):
event.preventDefault()
new_list = list(taskList) # Make a copy
if editTask is not None: # In edit mode
taskIndex = new_list.index(editTask) # Get list position
new_list[taskIndex].update({'name': newTask}) # Update name
else: # In add mode
new_list.append({'name': newTask, 'status': False}) # Add new item
setTaskList(new_list) # Update our state
setNewTask("") # Clear the new item value
setEditTask(None) # Clear the edit item value
def handleEdit(task):
setNewTask(task['name']) # Set the new item value
setEditTask(task) # Set the edit item value
def handleDelete(task):
new_list = list(taskList) # Make a copy
new_list.remove(task) # Remove the specified item
setTaskList(new_list) # Update our state
def handleChange(event):
target = event['target']
if target['name'] == 'taskFilter':
setTaskFilter(target['value'])
else:
setNewTask(target['value'])
def handleChangeStatus(event, task):
target = event['target']
new_list = list(taskList) # Make a copy
taskIndex = new_list.index(task) # Get list position
new_list[taskIndex].update({'status': target['checked']}) # Update
setTaskList(new_list) # Update our state
def ListItem(props):
task = props['task']
if taskFilter == "all" or \
(taskFilter == "open" and not task['status']) or \
(taskFilter == "closed" and task['status']):
return el('li', None,
task['name'] + " ",
el('button',
{'type': 'button',
'onClick': lambda: handleDelete(task)
}, "Delete"
),
el('button',
{'type': 'button',
'onClick': lambda: handleEdit(task)
}, "Edit"
),
el('label', {'htmlFor': 'status'}, " Completed:"),
el('input',
{'type': 'checkbox',
'id': 'status',
'onChange': lambda e: handleChangeStatus(e, task),
'checked': task['status']
}
),
)
else:
return None
def ListItems():
return [el(ListItem, {'key': task['name'], 'task': task}) for task in taskList]
def updateCount():
if taskFilter == 'open':
new_list = [task for task in taskList if not task['status']]
elif taskFilter == 'closed':
new_list = [task for task in taskList if task['status']]
else:
new_list = [task for task in taskList]
setTaskCount(len(new_list))
useEffect(lambda: setTitle("ToDo List"), [])
useEffect(updateCount, [taskList, taskFilter])
return el('form', {'onSubmit': handleSubmit},
el('div', None, f"Number of Tasks: {taskCount}"),
el('div', None,
el('label', {'htmlFor': 'all'}, "All Tasks:"),
el('input', {'type': 'radio',
'name': 'taskFilter',
'id': 'all',
'value': 'all',
'onChange': handleChange,
'checked': taskFilter == 'all'
}
),
el('label', {'htmlFor': 'open'}, " Active:"),
el('input', {'type': 'radio',
'name': 'taskFilter',
'id': 'open',
'value': 'open',
'onChange': handleChange,
'checked': taskFilter == 'open'
}
),
el('label', {'htmlFor': 'closed'}, " Completed:"),
el('input', {'type': 'radio',
'name': 'taskFilter',
'id': 'closed',
'value': 'closed',
'onChange': handleChange,
'checked': taskFilter == 'closed'
}
),
),
el('label', {'htmlFor': 'editBox'},
"Edit Task: " if editTask is not None else "Add Task: "
),
el('input', {'id': 'editBox',
'onChange': handleChange,
'value': newTask
}
),
el('input', {'type': 'submit'}),
el('ol', None,
el(ListItems, None)
),
)
render(App, None, 'root')
| 1.789063 | 2 |
features/steps/levenshtein_steps.py | clibc/howabout | 2 | 12798264 | <filename>features/steps/levenshtein_steps.py
import random
from behave import given, when, then
from howabout import get_levenshtein
@given('two long strings')
def step_two_long_strings(context):
alphabet = 'abcdefghijklmnopqrstuvwxyz'
random_str = lambda size: [random.choice(alphabet) for _ in range(0, size)]
context.first = random_str(1024)
context.second = random_str(1024)
@given('two empty strings')
def step_two_empty_strings(context):
context.first = ''
context.second = ''
@when('we compare them')
def step_compare_two_strings(context):
context.distance = get_levenshtein(context.first, context.second)
@then('the interpreter should not overflow')
def step_assert_no_overflow(context):
assert not context.failed
@given('"{string}" and the empty string')
def step_a_string_and_the_emtpy_string(context, string):
context.first = string
context.second = ''
@given('a string "{string}"')
def step_a_string(context, string):
context.first = string
@when('we compare it to itself')
def step_compare_string_to_itself(context):
string = context.first, context.first
context.distance = get_levenshtein(string, string)
@then('the distance is {distance:d}')
def step_assert_distance(context, distance):
assert context.distance == distance
@given('the first string "{first}" and the second string "{second}" starting with "{prefix}"')
def step_impl2(context, first, second, prefix):
"""
:type context behave.runner.Context
:type first str
:type second str
:type prefix str
"""
context.first = first
context.second = second | 2.21875 | 2 |
hplusminus/sid.py | bio-phys/hplusminus | 1 | 12798272 | <filename>hplusminus/sid.py
# Copyright (c) 2020 <NAME>, Max Planck Institute of Biophysics, Frankfurt am Main, Germany
# Released under the MIT Licence, see the file LICENSE.txt.
import os
import numpy as np
from scipy.stats import gamma as gamma_dist
import scipy
def _get_package_gsp():
"""
Return the directory path containing gamma spline parameter files that come bundled with the package.
-------
gsp_dir: str
directory path containing gamma spline parameter files
"""
package_dir = os.path.dirname(os.path.abspath(__file__))
gsp_dir = os.path.join(package_dir, "gsp")
if not os.path.exists(gsp_dir):
raise RuntimeError("gamma spline parameter directory not found at " + gsp_dir)
else:
return gsp_dir
def load_spline_parameters(ipath, tests=['h', 'both', 'h_simple', 'both_simple']):
"""
Load knots and coefficients for B-splines representing :math:`\alpha`, :math:`\beta`, :math:`\matcal{I}_o` paramters of the shifted gamma disributions as functions of :math:`\log_{10} N`, where :math:`N` is the number of data points.
Parameters
----------
ipath: str
Input path.
tests: List of str (optional)
Names of tests, for which paramaters are read in. Names identify the corresponding files.
Returns
-------
spline_par: dict
Dictionary containing knots and coefficients of B-splines for all tests and parameters of the shifted gamma disributions.
"""
spline_par = {}
for k in tests:
spline_par[k] = {}
for na in ["alpha", "beta", "I0"]:
spline_par[k][na] = {}
for tmp in ["knots", "coeffs"]:
iname = "%s_%s_%s.npy" % (tmp, k, na)
spline_par[k][na][tmp] = np.load(os.path.join(ipath, iname))
return spline_par
def cumulative_SID_gamma(SI, alpha, beta, I0):
"""
Returns cumulative distribution function of the Shannon information given by gamma distribution.
Parameters
----------
SI: float or array-like
Shannon information
alpha: float
Shape parameter of the gamma disribution.
beta: float
Inverser scale parameter of the gamma disribution.
I0: float
Shift (location) parameter of the gamma distribution.
Returns
-------
cdf: float
Value of Shannon information
"""
cdf = 1. - gamma_dist.cdf(SI, alpha, scale=1. / beta, loc=I0)
return cdf
def get_spline(spline_par, tests=['h', 'both', 'h_simple', 'both_simple']):
"""
Returns spline function objects for the data size dependence of the parameters of the gamma distributions representing cumulative Shannon information distribution functions.
Parameters
----------
spline_par: dict
Dictionary containing knots and coefficients of B-splines for all tests and parameters of the shifted gamma disributions. Ouput of load_spline_parameters().
tests: List of str (optional)
Names of tests.
Returns
-------
spline_func: dict
Dictionary of spline functions.
"""
nam = ["alpha", "beta", "I0"]
spline_func = {}
for k in tests:
spline_func[k] = {}
for i in range(3):
spline_func[k][nam[i]] = scipy.interpolate.BSpline(t=spline_par[k][nam[i]]["knots"], c=spline_par[k][nam[i]]["coeffs"], k=3)
return spline_func
def get_gamma_parameters(Ns, test, spline_func):
"""
Returns parameters of shifted gamma distributions for given number of data points.
Parameters
----------
Ns: int
Number of data points.
test: str
Name of test.
spline_func: dict
Dictionary of spline functions. Output of get_spline() or init().
Returns
-------
alpha: float
Shape parameter of the gamma disribution.
beta: float
Inverser scale parameter of the gamma disribution.
I0: float
Shift (location) parameter of the gamma distribution.
"""
log_Ns = np.log10(Ns)
alpha = spline_func[test]["alpha"](log_Ns)
beta = spline_func[test]["beta"](log_Ns)
I0 = spline_func[test]["I0"](log_Ns)
return alpha, beta, I0
def init(gamma_params_ipath=_get_package_gsp()):
"""
Initialises spline function object.
Parameters
----------
gamma_params_ipath: str
Input path.
Returns
-------
spline_func: dict
Dictionary of spline functions. Output of get_spline() or init().
"""
spline_par = load_spline_parameters(gamma_params_ipath)
spline_func = get_spline(spline_par)
return spline_func
def cumulative(SI, number_data_points, test, spline_func):
"""
Calculate p-values for given test using gamma disribuiton approximation of Shannon information distribution.
Parameters
----------
SI: float
Shannon information value.
number_data_points: int
Number of data points.
test: str
Name of statistical test.
spline_func: dict
Dictionary of spline functions. Output of get_spline() or init().
Returns
-------
p-value: float
P-value for given test.
"""
#tests = ['chi2', 'h', 'hpm', 'chi2_h', 'chi2_hp']
if test == "chi2":
alpha = 0.5
beta = 1.
I0 = -np.log(scipy.stats.chi2.pdf(number_data_points - 2, number_data_points))
p_value = cumulative_SID_gamma(SI, alpha, beta, I0)
elif test == "h":
alpha, beta, I0 = get_gamma_parameters(number_data_points, "h_simple", spline_func)
p_value = cumulative_SID_gamma(SI, alpha, beta, I0)
elif test == "hpm":
alpha, beta, I0 = get_gamma_parameters(number_data_points, "h", spline_func)
p_value = cumulative_SID_gamma(SI, alpha, beta, I0)
elif test == "chi2_h":
alpha, beta, I0 = get_gamma_parameters(number_data_points, "both_simple", spline_func)
p_value = cumulative_SID_gamma(SI, alpha, beta, I0)
elif test == "chi2_hpm":
alpha, beta, I0 = get_gamma_parameters(number_data_points, "both", spline_func)
p_value = cumulative_SID_gamma(SI, alpha, beta, I0)
else:
print("Error: Test \"%s\" not available!")
print("Exiting. Returning -1.")
return -1.
return p_value
def get_p_value(SI, number_data_points, test, spline_func):
"""
Calculate p-values for given test using the gamma distribution approximation of the Shannon information distribution.
Wrapper function for function cumulative(SI, number_data_points, test, spline_func)
Parameters
----------
SI: float
Shannon information value.
number_data_points: int
Number of data points.
test: str
Name of statistical test.
spline_func: dict
Dictionary of spline functions. Output of get_spline() or init().
Returns
-------
p-value: float
P-value for given test.
"""
p_value = cumulative(SI, number_data_points, test, spline_func)
return p_value
| 1.515625 | 2 |
moulin/builders/android_kernel.py | Deedone/moulin | 0 | 12798280 | # SPDX-License-Identifier: Apache-2.0
# Copyright 2021 EPAM Systems
"""
Android kernel builder module
"""
import os.path
from typing import List
from moulin.yaml_wrapper import YamlValue
from moulin import ninja_syntax
def get_builder(conf: YamlValue, name: str, build_dir: str, src_stamps: List[str],
generator: ninja_syntax.Writer):
"""
Return configured AndroidKernel class
"""
return AndroidKernel(conf, name, build_dir, src_stamps, generator)
def gen_build_rules(generator: ninja_syntax.Writer):
"""
Generate yocto build rules for ninja
"""
cmd = " && ".join([
"export $env",
"cd $build_dir",
"build/build.sh",
])
generator.rule("android_kernel_build",
command=f'bash -c "{cmd}"',
description="Invoke Android Kernel build script",
pool="console")
generator.newline()
class AndroidKernel:
"""
AndroidBuilder class generates Ninja rules for given Android build configuration
"""
def __init__(self, conf: YamlValue, name: str, build_dir: str, src_stamps: List[str],
generator: ninja_syntax.Writer):
self.conf = conf
self.name = name
self.generator = generator
self.src_stamps = src_stamps
self.build_dir = build_dir
def gen_build(self):
"""Generate ninja rules to build AOSP"""
env_node = self.conf.get("env", None)
if env_node:
env_values = [x.as_str for x in env_node]
else:
env_values = []
env = " ".join(env_values)
variables = {
"build_dir": self.build_dir,
"env": env,
}
targets = self.get_targets()
self.generator.build(targets, "android_kernel_build", self.src_stamps, variables=variables)
self.generator.newline()
return targets
def get_targets(self):
"Return list of targets that are generated by this build"
return [os.path.join(self.build_dir, t.as_str) for t in self.conf["target_images"]]
def capture_state(self):
"""
This method should capture Android Kernel state for a reproducible builds.
Luckily, there is nothing to do, as Android state is controlled solely by
its repo state. And repo state is captured by repo fetcher code.
"""
| 1.617188 | 2 |
ismore/invasive/sim_passive_movement.py | DerekYJC/bmi_python | 0 | 12798288 | import numpy as np
import socket, struct
from ismore import settings, udp_feedback_client
import time
from ismore import common_state_lists, ismore_bmi_lib
import pandas as pd
import pickle
import os
class Patient(object):
def __init__(self, targets_matrix_file):
self.addrs = [settings.ARMASSIST_UDP_SERVER_ADDR, settings.REHAND_UDP_SERVER_ADDR]
self.socks = [socket.socket(socket.AF_INET, socket.SOCK_DGRAM), socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]
self.n_dofs = [range(3), range(3, 7)]
self.plant_types = ['ArmAssist', 'ReHand']
self.aa_p = range(3) #common_state_lists.aa_pos_states
self.rh_p = range(4) #common_state_lists.rh_pos_states
self.rh_v = range(4, 8) #common_state_lists.rh_vel_states
self.aa_v = range(3, 6)
#self.aa = udp_feedback_client.ArmAssistData()
#self.rh = udp_feedback_client.ReHandData()
#self.aa.start()
#self.last_aa_pos = pd.Series(np.zeros((3, )), dtype=self.aa_p) #self.aa.get()['data'][self.aa_p]
#self.last_aa_pos_t = time.time()
#self.rh.start()
assister_kwargs = {
'call_rate': 20,
'xy_cutoff': 5,
}
self.assister = ismore_bmi_lib.ASSISTER_CLS_DICT['IsMore'](**assister_kwargs)
self.targets_matrix = pickle.load(open(targets_matrix_file))
def send_vel(self, vel):
for i, (ia, sock, ndof, plant) in enumerate(zip(self.addrs, self.socks, self.n_dofs, self.plant_types)):
self._send_command('SetSpeed %s %s\r' % (plant, self.pack_vel(vel[ndof], ndof)), ia, sock)
def pack_vel(self, vel, n_dof):
format_str = "%f " * len(n_dof)
return format_str % tuple(vel)
def _send_command(self, command, addr, sock):
sock.sendto(command, addr)
def _get_current_state(self):
#aa_data = self.aa.get()['data']
with open(os.path.expandvars('$HOME/code/bmi3d/log/armassist.txt'), 'r') as f:
lines = f.read().splitlines()
last_line = lines[-2]
aa_data = np.array([float(i) for i in last_line.split(',')])
with open(os.path.expandvars('$HOME/code/bmi3d/log/rehand.txt'), 'r') as f:
lines = f.read().splitlines()
last_line = lines[-2]
rh_data = np.array([float(i) for i in last_line.split(',')])
#daa = np.array([aa_data[0][i] - self.last_aa_pos[0][i] for i in range(3)])
#aa_vel = daa/(time.time() - self.last_aa_pos_t)
#self.last_aa_pos = aa_data[self.aa_p]
#rh_data = self.rh.get()['data']
pos = np.hstack(( aa_data[self.aa_p], rh_data[self.rh_p] ))
vel = np.hstack(( aa_data[self.aa_v], rh_data[self.rh_v] ))
return np.hstack((pos, vel))
def get_to_target(self, target_pos):
current_state = np.mat(self._get_current_state()).T
target_state = np.mat(np.hstack((target_pos, np.zeros((7, ))))).T
assist_kwargs = self.assister(current_state, target_state, 1., mode=None)
self.send_vel(10*np.squeeze(np.array(assist_kwargs['Bu'][7:14])))
return np.sum((np.array(current_state)-np.array(target_state))**2)
def go_to_target(self, target_name, tix=0):
if len(self.targets_matrix[target_name].shape) > 1:
targ = self.targets_matrix[target_name][tix]
else:
targ = self.targets_matrix[target_name]
d = 100
while d > 20:
d = self.get_to_target(targ)
print d
| 1.546875 | 2 |
Python/hardware/Arms.py | marcostrullato/RoobertV2 | 0 | 12798296 | #!/usr/bin/env python
# Roobert V2 - second version of home robot project
# ________ ______ _____
# ___ __ \______________ /_______________ /_
# __ /_/ / __ \ __ \_ __ \ _ \_ ___/ __/
# _ _, _// /_/ / /_/ / /_/ / __/ / / /_
# /_/ |_| \____/\____//_.___/\___//_/ \__/
#
# Project website: http://roobert.springwald.de
#
# ########
# # Arms #
# ########
#
# Licensed under MIT License (MIT)
#
# Copyright (c) 2018 <NAME> | <EMAIL>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import division
import time, sys, os
my_file = os.path.abspath(__file__)
my_path ='/'.join(my_file.split('/')[0:-1])
sys.path.insert(0,my_path + "/../DanielsRasPiPythonLibs/multitasking")
sys.path.insert(0,my_path + "/../DanielsRasPiPythonLibs/hardware")
from MultiProcessing import *
from array import array
from SharedInts import SharedInts
from SharedFloats import SharedFloats
from LX16AServos import LX16AServos
from SmartServoManager import SmartServoManager
import atexit
clear = lambda: os.system('cls' if os.name=='nt' else 'clear')
class Arms():
_servoManager = None;
_released = False;
_armHanging = [[1,185],[3,273],[5,501],[6,541],[7,495],[8,499]]
_lookAtHand = [[1,226],[3,680],[5,346],[6,802],[7,830],[8,499]]
_wink1 = [[1,476],[3,770],[5,396],[6,866],[7,542],[8,499]]
_wink2 = [[1,459],[3,639],[5,396],[6,739],[7,601],[8,499]]
_stretchSide = [[1,335],[3,442],[5,542],[6,593],[7,770],[8,499]]
#_rightCenteredValues = [[1,370],[3,685],[5,510],[6,460],[7,495],[8,500]]
def __init__(self, smartServoManager, leftHandOpen=480, leftHandClose=580, rightHandOpen=540, rightHandClose=430):
self._servoManager = smartServoManager
self._leftHandOpen = leftHandOpen
self._leftHandClose = leftHandClose
self._rightHandOpen = rightHandOpen
self._rightHandClose = rightHandClose
self.DefineArms()
#self.SetArm(gesture=Arms._armHanging, left=False);
#self.SetHand(opened=True, left=False);
#self.SetArm(gesture=Arms._armHanging, left=True);
#self.SetHand(opened=True, left=True);
#self.WaitTillTargetsReached();
def DefineArms(self):
# right arm
self._servoManager.AddMasterServo(servoId=1, centeredValue=370);
self._servoManager.AddSlaveServo(servoId=2, masterServoId=1, reverseToMaster=-1, centeredValue=608);
self._servoManager.AddMasterServo(servoId=3, centeredValue=685);
self._servoManager.AddSlaveServo(servoId=4, masterServoId=3, reverseToMaster=-1, centeredValue=352);
self._servoManager.AddMasterServo(servoId=5, centeredValue=510);
self._servoManager.AddMasterServo(servoId=6, centeredValue=460);
self._servoManager.AddMasterServo(servoId=7, centeredValue=495);
self._servoManager.AddMasterServo(servoId=8, centeredValue=500);
# left arm
self._servoManager.AddMasterServo(servoId=11, centeredValue=545);
self._servoManager.AddSlaveServo(servoId=12, masterServoId=11, reverseToMaster=-1, centeredValue=459);
self._servoManager.AddMasterServo(servoId=13, centeredValue=329);
self._servoManager.AddSlaveServo(servoId=14, masterServoId=13, reverseToMaster=-1, centeredValue=700);
self._servoManager.AddMasterServo(servoId=15, centeredValue=477);
self._servoManager.AddMasterServo(servoId=16, centeredValue=486);
self._servoManager.AddMasterServo(servoId=17, centeredValue=501);
self._servoManager.AddMasterServo(servoId=18, centeredValue=503);
def PrintRightArmValues(self):
for id in range(1,8):
self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True);
self._servoManager.Start()
while(True):
self._servoManager.PrintReadOnlyServoValues()
time.sleep(0.1)
def PrintLeftArmValues(self):
for id in range(11,18):
self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True);
self._servoManager.Start()
while(True):
self._servoManager.PrintReadOnlyServoValues(onlyMasterServos=False)
time.sleep(0.1)
def MirrorRightArmToLeftStart(self):
for id in range(1,8):
self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=True);
#self._servoManager.Start()
def MirrorRightArmToLeftUpdate(self):
for id in [1,3,5,6,7,8]:
value = self._servoManager.ReadServo(id);
#print (str(id) + ":" +str(value))
value = -(value - self._servoManager.GetCenteredValue(id)) + self._servoManager.GetCenteredValue(id+10)
self._servoManager.MoveServo(id+10, pos=value);
def MirrorRightArmToLeftEnd(self):
for id in range(1,8):
self._servoManager.SetIsReadOnly(servoId=id, isReadOnly=False);
def SetArm(self, gesture, left):
for p in range(0,len(gesture)):
id = gesture[p][0]
value = gesture[p][1]
if (left == True):
id = id + 10;
value = -(value - self._servoManager.GetCenteredValue(id-10)) + self._servoManager.GetCenteredValue(id)
self._servoManager.MoveServo(id,value);
#print ("left:" + str(id));
else:
self._servoManager.MoveServo(id,value);
#print ("right:" + str(id))
def WaitTillTargetsReached(self):
while (self._servoManager.allTargetsReached == False):
time.sleep(0.1);
def SetHand(self, opened, left):
if (left==True):
if (opened==True):
self._servoManager.MoveServo(18,self._leftHandOpen)
else:
self._servoManager.MoveServo(18,self._leftHandClose)
else:
if (opened==True):
self._servoManager.MoveServo(8,self._rightHandOpen);
else:
self._servoManager.MoveServo(8,self._rightHandClose);
def Release(self):
if (self._released == False):
self._released = True;
self.SetArm(gesture=Arms._armHanging, left=False);
self.SetArm(gesture=Arms._armHanging, left=True);
self.SetHand(opened=True, left=False);
self.SetHand(opened=True, left=True);
self.WaitTillTargetsReached();
def __del__(self):
self.Release()
def exit_handler():
tester.Release()
servoManager.Release()
servos.Release()
if __name__ == "__main__":
atexit.register(exit_handler)
ended = False;
servos = LX16AServos()
servoManager = SmartServoManager(lX16AServos=servos, ramp=0, maxSpeed=1)
tester = Arms(servoManager)
#tester.MirrorRightArmToLeft();
#tester.PrintRightArmValues()
tester.PrintLeftArmValues();
servoManager.Start();
#time.sleep(1);
#tester.SetArm(gesture=Arms._rightCenteredValues, left=True);
#tester.WaitTillTargetsReached();
#while(True):
# print()
while(True):
tester.SetArm(gesture=Arms._armHanging, left=False);
tester.SetArm(gesture=Arms._armHanging, left=True);
tester.WaitTillTargetsReached();
tester.SetArm(gesture=Arms._lookAtHand, left=False);
tester.WaitTillTargetsReached();
for i in range(1,4):
tester.SetArm(gesture=Arms._wink2, left=False);
tester.WaitTillTargetsReached();
tester.SetArm(gesture=Arms._wink1, left=False);
tester.WaitTillTargetsReached();
tester.SetArm(gesture=Arms._armHanging, left=True);
tester.WaitTillTargetsReached();
tester.SetArm(gesture=Arms._lookAtHand, left=True);
tester.WaitTillTargetsReached();
for i in range(1,4):
tester.SetArm(gesture=Arms._wink2, left=True);
tester.WaitTillTargetsReached();
tester.SetArm(gesture=Arms._wink1, left=True);
tester.WaitTillTargetsReached();
#plus = 100
#servoManager.Start()
#while(True):
#plus = - plus
##tester._servoManager.MoveServo(1,400+plus)
#tester._servoManager.MoveServo(3,600+plus)
#while (tester._servoManager.allTargetsReached == False):
#time.sleep(0.1)
#tester.SetHand(opened=False, left= True);
#tester.SetHand(opened=False, left= False);
#tester.WaitTillTargetsReached();
#time.sleep(1);
#tester.SetHand(opened=True, left= True);
#tester.SetHand(opened=True, left= False);
#tester.WaitTillTargetsReached();
#time.sleep(1);
##while(True):
## time.sleep(1)
## print("sleep")
#tester.SetArm(gesture=Arms._strechSide, left=True);
#tester.WaitTillTargetsReached();
##tester.SetArm(gesture=Arms._lookHand, left=False);
##tester.WaitTillTargetsReached();
#tester.SetArm(gesture=Arms._strechSide, left=True);
#tester.SetArm(gesture=Arms._strechSide, left=False);
#tester.WaitTillTargetsReached();
#tester.SetArm(gesture=Arms._wink1, left=True);
#tester.WaitTillTargetsReached();
#tester.SetArm(gesture=Arms._wink2, left= True);
#tester.WaitTillTargetsReached();
#tester.SetArm(gesture=Arms._wink1, left=True);
#tester.WaitTillTargetsReached();
#tester.SetArm(gesture=Arms._wink2, left= True);
#tester.WaitTillTargetsReached();
#tester.SetHand(opened=False, left= True);
#tester.SetArm(gesture=Arms._ghettoFist1, left= True);
#tester.WaitTillTargetsReached();
#tester.SetArm(gesture=Arms._ghettoFist2, left= True);
#tester.WaitTillTargetsReached();
print("done");
| 1.570313 | 2 |
12403/save_setu.py | sc458/uHunt-solutions | 0 | 12798304 | res = 0
T = int(input())
for i in range(0,T):
inp = input()
if(inp == 'report'):
print(res)
else:
inp_arr = inp.split(' ')
res += int(inp_arr[1])
| 1.820313 | 2 |
lib/fama/pe_functional_pipeline.py | aekazakov/FamaProfiling | 0 | 12798312 | <reponame>aekazakov/FamaProfiling
"""Runs Fama functional profiling pipeline"""
import os
import gzip
from fama.utils.const import ENDS, STATUS_GOOD
from fama.se_functional_pipeline import run_fastq_pipeline
from fama.utils.utils import run_external_program
from fama.project.sample import Sample
from fama.diamond_parser.diamond_parser import DiamondParser
from fama.output.report import generate_fastq_report, generate_sample_report
from fama.output.pdf_report import generate_pdf_report
from fama.output.krona_xml_writer import make_functions_chart
from fama.output.json_util import export_annotated_reads, export_sample
from fama.third_party.microbe_census import run_pipeline, report_results
from fama.diamond_parser.hit_utils import parse_fastq_seqid
def run_ref_search(parser, command):
"""Runs pre-selection DIAMOND search
Args:
parser (:obj:DiamondParser): parser object processing an input sequence file
command (str): either 'blastx' or 'blastp' (see DIAMOND manual)
"""
print('Starting DIAMOND')
diamond_args = [parser.config.diamond_path,
command,
'--db',
parser.config.get_reference_diamond_db(
parser.options.get_collection(parser.sample.sample_id)
),
'--query',
parser.options.get_fastq_path(parser.sample.sample_id, parser.end),
'--out',
os.path.join(
parser.options.get_project_dir(parser.sample.sample_id),
parser.sample.sample_id + '_' + parser.end + '_'
+ parser.options.ref_output_name
),
'--max-target-seqs',
'50',
'--evalue',
str(parser.config.get_evalue_cutoff(
parser.options.get_collection(parser.sample.sample_id)
)),
# '--threads',
# parser.config.threads,
'--outfmt', '6', 'qseqid', 'sseqid', 'pident', 'length',
'mismatch', 'slen', 'qstart', 'qend', 'sstart', 'send',
'evalue', 'bitscore']
run_external_program(diamond_args)
print('DIAMOND finished')
def run_bgr_search(parser, command):
"""Runs classification DIAMOND search
Args:
parser (:obj:DiamondParser): parser object processing an input sequence file
command (str): either 'blastx' or 'blastp' (see DIAMOND manual)
"""
print('Starting DIAMOND')
diamond_args = [parser.config.diamond_path,
command,
'--db',
parser.config.get_background_diamond_db(
parser.options.get_collection(parser.sample.sample_id)
),
'--query',
os.path.join(
parser.options.get_project_dir(parser.sample.sample_id),
parser.sample.sample_id + '_' + parser.end + '_'
+ parser.options.ref_hits_fastq_name
),
'--out',
os.path.join(
parser.options.get_project_dir(parser.sample.sample_id),
parser.sample.sample_id + '_' + parser.end + '_'
+ parser.options.background_output_name
),
'--max-target-seqs',
'100',
'--evalue',
str(
parser.config.get_background_db_size(
parser.options.get_collection(parser.sample.sample_id)
) * parser.config.get_evalue_cutoff(
parser.options.get_collection(parser.sample.sample_id)
) / parser.config.get_reference_db_size(
parser.options.get_collection(parser.sample.sample_id)
)),
# '--threads',
# parser.config.threads,
'--outfmt', '6', 'qseqid', 'sseqid', 'pident', 'length', 'mismatch',
'slen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore']
run_external_program(diamond_args)
print('DIAMOND finished')
def run_microbecensus(sample, config):
"""Runs MicrobeCensus
Args:
sample (:obj:Sample): sample analyzed
config (:obj:ProgramConfig): program configuration object
"""
args = {}
if sample.is_paired_end:
args['seqfiles'] = [sample.fastq_fwd_path, sample.fastq_rev_path]
else:
args['seqfiles'] = [sample.fastq_fwd_path]
args['verbose'] = True
args['diamond'] = config.diamond_path
args['data_dir'] = config.microbecensus_datadir
args['outfile'] = os.path.join(sample.work_directory, 'microbecensus.out.txt')
args['threads'] = int(config.threads)
args['no_equivs'] = True
if sample.fastq_fwd_readcount < 1500000:
# MicrobeCensus subsamples 2M reads by default, but sequence library
# must have more reads as some reads are always discarded by filtering
args['nreads'] = sample.fastq_fwd_readcount // 2
elif sample.fastq_fwd_readcount < 3000000:
args['nreads'] = sample.fastq_fwd_readcount - 1000000
else:
args['nreads'] = 2000000
print(args)
est_ags, args = run_pipeline(args)
report_results(args, est_ags, None)
def import_fastq_pe(parser1, parser2):
"""Reads uncompressed or gzipped FASTQ file, finds sequences of
selected reads and stores them
Returns:
read_count (int): number of reads in the file
base_count (int): total number of bases in all reads
"""
fastq_file1 = parser1.options.get_fastq_path(parser1.sample.sample_id, parser1.end)
line_counter = 0
read_count1 = 0
base_count1 = 0
current_read = None
infile_handle = None
if fastq_file1.endswith('.gz'):
infile_handle = gzip.open(fastq_file1, 'rb')
else:
infile_handle = open(fastq_file1, 'rb')
for line in infile_handle:
# count lines as each FASTQ entry has exactly four lines
line_counter += 1
if line_counter == 5:
line_counter = 1
line = line.decode('utf8').rstrip('\n\r')
if line_counter == 1:
read_count1 += 1
(read_id, _) = parse_fastq_seqid(line)
current_read = read_id
if current_read in parser1.reads:
parser1.reads[current_read].read_id_line = line
if current_read in parser2.reads:
parser2.reads[current_read].pe_id = line
elif line_counter == 2:
base_count1 += len(line)
if current_read in parser1.reads:
parser1.reads[current_read].sequence = line
if current_read in parser2.reads:
parser2.reads[current_read].pe_sequence = line
elif line_counter == 3:
if current_read in parser1.reads:
parser1.reads[current_read].line3 = line
if current_read in parser2.reads:
parser2.reads[current_read].pe_line3 = line
elif line_counter == 4:
if current_read in parser1.reads:
parser1.reads[current_read].quality = line
if current_read in parser2.reads:
parser2.reads[current_read].pe_quality = line
infile_handle.close()
fastq_file2 = parser1.options.get_fastq_path(parser2.sample.sample_id, parser2.end)
line_counter = 0
read_count2 = 0
base_count2 = 0
current_read = None
if fastq_file2.endswith('.gz'):
infile_handle = gzip.open(fastq_file2, 'rb')
else:
infile_handle = open(fastq_file2, 'rb')
for line in infile_handle:
# count lines as each FASTQ entry has exactly four lines
line_counter += 1
if line_counter == 5:
line_counter = 1
line = line.decode('utf8').rstrip('\n\r')
if line_counter == 1:
read_count2 += 1
(read_id, _) = parse_fastq_seqid(line)
current_read = read_id
if current_read in parser1.reads:
parser1.reads[current_read].pe_id = line
if current_read in parser2.reads:
parser2.reads[current_read].read_id_line = line
elif line_counter == 2:
base_count2 += len(line)
if current_read in parser1.reads:
parser1.reads[current_read].pe_sequence = line
if current_read in parser2.reads:
parser2.reads[current_read].sequence = line
elif line_counter == 3:
if current_read in parser1.reads:
parser1.reads[current_read].pe_line3 = line
if current_read in parser2.reads:
parser2.reads[current_read].line3 = line
elif line_counter == 4:
if current_read in parser1.reads:
parser1.reads[current_read].pe_quality = line
if current_read in parser2.reads:
parser2.reads[current_read].quality = line
infile_handle.close()
return (parser1, parser2, read_count1, read_count2, base_count1, base_count2)
def export_paired_end_reads_fastq(parser):
""" For paired-end sequence reads, write paired-end reads for pre-selected
reads into a separate FASTQ file
"""
outdir = parser.sample.work_directory
read_ids = {}
for read_id in sorted(parser.reads.keys()):
read_ids[read_id] = read_id
fastq_outfile = os.path.join(outdir,
parser.sample.sample_id + '_'
+ parser.end + '_'
+ parser.options.pe_reads_fastq_name + '.gz')
with gzip.open(fastq_outfile, 'wt') as outfile:
for read_id in sorted(parser.reads.keys()):
outfile.write(parser.reads[read_id].pe_id + '\n')
outfile.write(parser.reads[read_id].pe_sequence + '\n')
outfile.write(parser.reads[read_id].pe_line3 + '\n')
outfile.write(parser.reads[read_id].pe_quality + '\n')
def fastq_pe_pipeline(project, sample_identifier=None, end_identifier=None):
"""Functional profiling pipeline for entire project
Args:
project (:obj:Project): current project
sample_identifier (str, optional): sample identifier
end_identifier (str, optional): end identifier
"""
for sample_id in project.list_samples():
if sample_identifier and sample_identifier != sample_id:
continue
sample = Sample(sample_id)
sample.load_sample(project.options)
project.samples[sample_id] = sample
if end_identifier:
project.samples[sample_id].reads[end_identifier] = \
run_fastq_pipeline(project,
sample=project.samples[sample_id],
end_id=end_identifier)
else:
project.samples[sample_id].reads = \
run_pe_fastq_pipeline(project,
sample=project.samples[sample_id])
export_sample(project.samples[sample_id])
# Generate output for the sample or delete sample from memory
project.options.set_sample_data(project.samples[sample_id])
metric = None
for sample_id in project.list_samples():
if project.is_paired_end():
metric = 'efpkg'
for sample_id in project.list_samples():
if project.samples[sample_id].rpkg_scaling_factor == 0.0:
metric = 'fragmentcount'
else:
metric = 'erpkg'
for sample_id in project.list_samples():
if project.samples[sample_id].rpkg_scaling_factor == 0.0:
metric = 'readcount'
# Generate output for all samples
for sample_id in project.list_samples():
generate_sample_report(project, sample_id, metric=metric)
# Generate output for the project
if sample_identifier is None:
# Skip project report if the pipeline is running for only one sample
project.generate_report()
# Rename existing project file and save current version
project.save_project_options()
return project
def run_pe_fastq_pipeline(project, sample):
"""Functional profiling pipeline for single FASTQ file processing
Args:
project (:obj:Project): current project
sample (:obj:Sample): current sample
"""
result = {}
parser1 = DiamondParser(config=project.config,
options=project.options,
taxonomy_data=project.taxonomy_data,
ref_data=project.ref_data,
sample=sample,
end=ENDS[0])
parser2 = DiamondParser(config=project.config,
options=project.options,
taxonomy_data=project.taxonomy_data,
ref_data=project.ref_data,
sample=sample,
end=ENDS[1])
if not os.path.isdir(project.options.get_project_dir(sample.sample_id)):
os.makedirs(project.options.get_project_dir(sample.sample_id), exist_ok=True)
if not os.path.isdir(os.path.join(project.options.get_project_dir(sample.sample_id),
project.options.get_output_subdir(sample.sample_id))):
os.mkdir(os.path.join(project.options.get_project_dir(sample.sample_id),
project.options.get_output_subdir(sample.sample_id)))
# Search in reference database
if not os.path.exists(
os.path.join(
parser1.options.get_project_dir(parser1.sample.sample_id),
parser1.sample.sample_id + '_' + parser1.end + '_' + parser1.options.ref_output_name
)
):
run_ref_search(parser1, 'blastx')
if not os.path.exists(
os.path.join(
parser2.options.get_project_dir(parser2.sample.sample_id),
parser2.sample.sample_id + '_' + parser2.end + '_' + parser2.options.ref_output_name
)
):
run_ref_search(parser2, 'blastx')
# Process output of reference DB search
parser1.parse_reference_output()
parser2.parse_reference_output()
# Import sequence data for selected sequence reads
print('Reading FASTQ file')
(parser1, parser2, read_count1, read_count2, base_count1, base_count2) = import_fastq_pe(
parser1, parser2
)
if sample.fastq_fwd_readcount == 0:
sample.fastq_fwd_readcount = read_count1
if sample.fastq_fwd_basecount == 0:
sample.fastq_fwd_basecount = base_count1
if sample.fastq_rev_readcount == 0:
sample.fastq_rev_readcount = read_count2
if sample.fastq_rev_basecount == 0:
sample.fastq_rev_basecount = base_count2
if sample.rpkg_scaling_factor == 0.0:
sample.import_rpkg_scaling_factor()
if sample.rpkg_scaling_factor == 0.0:
run_microbecensus(sample=sample, config=project.config)
sample.import_rpkg_scaling_factor()
project.options.set_sample_data(sample)
if parser1.reads:
parser1.export_hit_fastq()
print('Hits for forward end reads exported in FASTQ format')
parser1.export_hit_list()
print('List of hits fo forward end reads exported')
if not os.path.exists(
os.path.join(
parser1.options.get_project_dir(parser1.sample.sample_id),
parser1.sample.sample_id + '_' + parser1.end + '_'
+ parser1.options.background_output_name
)
):
run_bgr_search(parser1, 'blastx')
print('Classification DB search finished')
parser1.parse_background_output()
print('Classification DB search results imported')
parser1.export_read_fastq()
print('Classified forward end reads exported in FASTQ format')
export_paired_end_reads_fastq(parser1)
print('Paired reads for classified forward end reads exported')
export_annotated_reads(parser1)
print('Classified forward end reads exported in JSON format')
generate_fastq_report(parser1)
print('Text report for forward end reads created')
generate_pdf_report(parser1)
print('PDF report for forward end reads created')
make_functions_chart(parser1)
print('Krona chart for forward end reads created')
result[ENDS[0]] = {read_id: read for (read_id, read) in
parser1.reads.items() if read.status == STATUS_GOOD}
else:
# No hits found
print('Pre-selection search did not find any hits for forward end reads')
result[ENDS[0]] = {}
if parser2.reads:
parser2.export_hit_fastq()
print('Hits for reverse end reads exported in FASTQ format')
parser2.export_hit_list()
print('List of hits for reverse end reads exported')
if not os.path.exists(
os.path.join(
parser2.options.get_project_dir(parser2.sample.sample_id),
parser2.sample.sample_id + '_' + parser2.end + '_'
+ parser2.options.background_output_name
)
):
run_bgr_search(parser2, 'blastx')
print('Classification DB search for reverse end reads finished')
parser2.parse_background_output()
print('Classification DB search results for reverse end reads imported')
parser2.export_read_fastq()
print('Classified reverse end reads exported in FASTQ format')
export_paired_end_reads_fastq(parser2)
print('Paired reads for classified reverse end reads exported')
export_annotated_reads(parser2)
print('Classified reverse end reads exported in JSON format')
generate_fastq_report(parser2)
print('Text report for reverse end reads created')
generate_pdf_report(parser2)
print('PDF report for reverse end reads created')
make_functions_chart(parser2)
print('Krona chart for reverse end reads created')
result[ENDS[1]] = {read_id: read for (read_id, read) in
parser2.reads.items() if read.status == STATUS_GOOD}
else:
# No hits found
print('Pre-selection search did not find any hits for reverse end reads')
result[ENDS[1]] = {}
return result
def main():
"""Main function"""
print('This program is not intended to run directly.')
if __name__ == '__main__':
main()
| 1.859375 | 2 |
src/view/view_analisys.py | jcemelanda/PyGPA2.0 | 0 | 12798320 | <filename>src/view/view_analisys.py
from PyQt4 import QtGui, QtCore
from widgets.window_analisys import Analysis_Window
class Analise_View(QtGui.QMainWindow):
def __init__(self, controle):
QtGui.QMainWindow.__init__(self)
self.controle = controle
self.ui = Analysis_Window()
self.ui.setup(self)
self.count = 3
self.shortcut_right = QtGui.QShortcut(QtGui.QKeySequence("l"), self, self.controle.incrementa_view)
self.shortcut_left = QtGui.QShortcut(QtGui.QKeySequence("j"), self, self.controle.decrementa_view)
self.shortcut_end = QtGui.QShortcut(QtGui.QKeySequence("end"), self, self.controle.last_view)
self.shortcut_home = QtGui.QShortcut(QtGui.QKeySequence("home"), self, self.controle.first_view)
QtCore.QObject.connect(self.ui.actionAbrir_Conjunto_de_Matrizes, QtCore.SIGNAL(
'triggered()'), self.controle.abrir_arquivo)
QtCore.QObject.connect(self.ui.horizontalSlider, QtCore.SIGNAL(
'valueChanged(int)'), self.controle.set_view)
QtCore.QObject.connect(self.ui.Tabs, QtCore.SIGNAL(
'currentChanged(int)'), self.controle.set_current_tab)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
av = Analise_View()
av.add_widgets()
av.showMaximized()
sys.exit(app.exec_())
| 1.53125 | 2 |
database.py | Anve94/DiscordBot-public | 0 | 12798328 | <reponame>Anve94/DiscordBot-public
import os
import sys
from sqlalchemy import Column, ForeignKey, Integer, String, Boolean
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
Base = declarative_base()
# Abuser class to insert people abusing the bugreporting feature
class Abuser(Base):
__tablename__ = 'abusers'
id = Column(Integer, primary_key = True)
discord_id = Column(String, nullable = False)
# Entry class for voice-related giveaway entries
class Entry(Base):
__tablename__ = 'entries'
# Table column definitions
id = Column(Integer, primary_key = True)
discord_id = Column(String, nullable = False)
score = Column(Integer, nullable = False)
# EventMessage class for stuff_happening messages
class EventMessage(Base):
__tablename__ = 'message'
id = Column(Integer, primary_key = True)
token = Column(String, nullable = False)
content = Column(String, nullable = False)
image_url = Column(String, nullable = True)
position = Column(Integer, nullable = False)
# Giveaway class to keep track of community giveaways
class Giveaway(Base):
__tablename__ = 'giveaway'
id = Column(Integer, primary_key = True)
discord_id = Column(String, nullable = False)
# Create the engine to the sqlite database
engine = create_engine('sqlite:///database/database.sqlite')
# Handles the creation of tables (if none exist etc.)
Base.metadata.create_all(engine)
| 1.882813 | 2 |
tests/test_all.py | avidale/compress-fasttext | 111 | 12798336 | import os
import gensim
import pytest
import compress_fasttext
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from compress_fasttext.feature_extraction import FastTextTransformer
BIG_MODEL_FILE = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data/test_data/ft_leipzig_ru_mini.bin')
BASE_MODEL_URL = 'https://github.com/avidale/compress-fasttext/releases/download/'
def cosine_sim(x, y):
return sum(x * y) / (sum(x**2) * sum(y**2)) ** 0.5
@pytest.mark.parametrize('method, params', [
(compress_fasttext.quantize_ft, dict(qdim=32)),
(compress_fasttext.prune_ft_freq, dict(pq=False, new_ngrams_size=10_000, new_vocab_size=10_000)),
(compress_fasttext.prune_ft_freq, dict(pq=True, new_ngrams_size=10_000, new_vocab_size=10_000, qdim=16)),
(compress_fasttext.prune_ft, dict(new_ngrams_size=10_000, new_vocab_size=10_000)),
(compress_fasttext.svd_ft, dict(n_components=32)),
])
def test_prune_save_load(method, params):
word1 = 'синий'
word2 = 'белый'
big_ft = gensim.models.fasttext.FastTextKeyedVectors.load(BIG_MODEL_FILE)
vec0 = big_ft[word1]
small_model = method(big_ft, **params)
assert cosine_sim(vec0, small_model[word1]) > 0.75
out1 = small_model.most_similar(word1)
assert word2 in {w for w, sim in out1}
small_model.save('tmp_small.bin')
small_model2 = compress_fasttext.models.CompressedFastTextKeyedVectors.load('tmp_small.bin')
assert cosine_sim(vec0, small_model2[word1]) > 0.75
out2 = small_model2.most_similar(word1)
assert word2 in {w for w, sim in out2}
assert out1[0][1] == pytest.approx(out2[0][1])
@pytest.mark.parametrize('word1, word2, model_name', [
('белый', 'черный', 'gensim-4-draft/geowac_tokens_sg_300_5_2020-100K-20K-100.bin'),
('white', 'black', 'gensim-4-draft/ft_cc.en.300_freqprune_50K_5K_pq_100.bin'),
('white', 'black', 'v0.0.4/cc.en.300.compressed.bin'),
])
def test_loading_existing_models(word1, word2, model_name):
ft = compress_fasttext.models.CompressedFastTextKeyedVectors.load(BASE_MODEL_URL + model_name)
out = ft.most_similar(word1)
assert word2 in {w for w, sim in out}
def test_sklearn_wrapper():
small_model = compress_fasttext.models.CompressedFastTextKeyedVectors.load(
'https://github.com/avidale/compress-fasttext/releases/download/v0.0.4/cc.en.300.compressed.bin'
)
classifier = make_pipeline(
FastTextTransformer(model=small_model),
LogisticRegression()
).fit(
['banana', 'soup', 'burger', 'car', 'tree', 'city'],
[1, 1, 1, 0, 0, 0]
)
assert (classifier.predict(['jet', 'train', 'cake', 'apple']) == [0, 0, 1, 1]).all()
| 1.773438 | 2 |
get_positions.py | gregorytadams/Model_UN | 0 | 12798344 | # get_positions.py
import pandas as pd
from math import ceil
from sys import argv
'''
Current known problems:
- do schools at different times (ew)
- Bias towards double delegate committees
'''
class Team:
def __init__(self, name, num_delegates, preferences):
'''
num_delegats is an int of the total number of delegates
preferences is the ranked preferences as a list, in order (all committees must be present)
picks is the picks we assign to make the draft fair
assigned committees will be the committees assigned to be outputted
'''
self.name = name
self.num_delegates = num_delegates
self.preferences = preferences
self.picks = self._get_picks(list(range(len(preferences))), num_delegates)
self.assigned_committees = []
self.num_dels_assigned = 0
def _get_picks(self, sequence, num):
'''
Intersperses picks for small delegations.
Takes a list of possible rounds the number of picks and returns a list of picks that they get.
Thanks stack overflow!
http://stackoverflow.com/questions/9873626/choose-m-evenly-spaced-elements-from-a-sequence-of-length-n
'''
picks = []
length = float(len(sequence))
for i in range(num):
picks.append(sequence[int(ceil(i * length / num))])
return picks
class Committee:
def __init__(self, name, num_spots, delegation_size):
'''
name: name of committee
num_spots: maximum number of delegates that can be assigned to that committee
delegation size: 1 for single, 2 for double, and so on
assigned schools: the schools who have a spot on the committee
'''
self.name = name
self.num_spots = num_spots
self.delegation_size = delegation_size
self.assigned_schools = []
def read_info(school_info_filename, committee_info_filename):
'''
Takes the filepaths and returns the dataframes
'''
schools = pd.read_csv(school_info_filename)
comms = pd.read_csv(committee_info_filename)
return schools, comms
def format_for_main(schools, comms):
'''
Creates all the objects and fills in the information from the dataframes
inputs:
schools, comms: pandas dataframes from read_info
outputs:
teams, a list of Team objects
committees, a dict mapping committee names to Committee objects
'''
teams = []
committees = {}
max_at_conf = 0
comms.columns = ['Committee', 'Number of Spots', 'Delegation Size']
schools.columns = ['School', 'Number of Delegates'] + \
["Preference {}".format(str(i)) for i in range(len(comms))]
for index, row in comms.iterrows():
comm = Committee(row['Committee'], row['Number of Spots'], row['Delegation Size'])
committees[row['Committee']] = comm
max_at_conf += row['Delegation Size']
for index, row in schools.iterrows():
prefs = [j for j in row[2:]]
for i in range(ceil(row['Number of Delegates'] / max_at_conf)): # handling more delegates requested
# than there are committees.
num_dels = row['Number of Delegates'] - i * max_at_conf
if num_dels > max_at_conf:
team = Team(row['School']+str(i+2), max_at_conf, prefs)
teams.append(team)
else:
team = Team(row['School'], row['Number of Delegates'], prefs)
teams.append(team)
return teams, committees
def assign(teams, committees):
'''
My algorithm! Draft-based assignment. Takes the teams' constraints/preferences and committees and
simulates a draft. Each team got picks assigned at initialization (first round, fourth round, etc.),
and it iterates through each round of the draft until either all delegates are assigned or all
committees are filled.
Inputs:
teams, a list of Team objects from format_for_main
committees, a dict of committees (name : Committee object) from format_for_main
Outputs:
teams, a list of Team objects with assignments
committees, a dict of committees (formatted the same) with assignments
'''
for r in range(len(committees)):
print("round {}".format(r))
for team in teams:
if r in team.picks and len(team.assigned_committees) < team.num_delegates:
# print(team.name, team.preferences)
for pref in team.preferences:
p = team.preferences.pop(team.preferences.index(pref))
c = committees[p]
if len(c.assigned_schools) < c.num_spots and team.num_dels_assigned < team.num_delegates \
- 1 + c.delegation_size:
c.assigned_schools.append(team.name)
team.assigned_committees.append(c.name)
team.num_dels_assigned += c.delegation_size
if team.num_dels_assigned > team.num_delegates:
for i, val in enumerate(team.assigned_committees):
if committees[val].delegation_size == 1:
index_to_drop = i #no break so I can grab the last value
c_to_drop = val
committees[c_to_drop].assigned_schools.pop(committees[c_to_drop]\
.assigned_schools.index(team.name))
team.assigned_committees.pop(index_to_drop)
print("assigned {} to {}".format(team.name, c.name))
break
else:
continue
else:
continue
return teams, committees
def output(teams, committees):
'''
Outputs the master documents.
Inputs from assign
'''
all_school_assignments = []
all_comm_assignments = []
for team in teams:
all_school_assignments.append([team.name, team.num_delegates] + team.assigned_committees)
for comm in committees:
all_comm_assignments.append([comm, committees[comm].num_spots, committees[comm].delegation_size] \
+ committees[comm].assigned_schools)
schools_df = pd.DataFrame(all_school_assignments)
schools_df.rename(columns = {0:'School', 1:'Number of Delegates'}, inplace = True)
comm_df = pd.DataFrame(all_comm_assignments)
schools_df.to_csv('all_school_assignments.csv')
comm_df.to_csv("all_committees_assignments.csv")
for index, row in schools_df.iterrows():
row.to_csv("school_assignments/{}'s_assignments.csv".format(row['School']))
def go(school_filename, committee_filename):
'''
Runs the whole darn thing.
'''
schools, comms = read_info(school_filename, committee_filename)
teams, committees = format_for_main(schools, comms)
teams, committees = assign(teams, committees)
output(teams, committees)
s = 0
for i in teams: s += i.num_delegates
s2 = 0
for key in committees: s2 += len(committees[key].assigned_schools)*committees[key].delegation_size
if s == s2:
print("It worked! :)")
else:
print("There's a bug. Bad computer. :(")
if __name__ == "__main__":
try:
go(argv[1], argv[2])
except:
print("Something went wrong. Please make sure your usage is correct and files are formatted correctly.")
print("Usage: python3 get_positions.py [school_info_filepath] [committee info filepath]")
| 2.984375 | 3 |
src/day6.py | blu3r4y/AdventOfCode2018 | 2 | 12798352 | <gh_stars>1-10
# Advent of Code 2018, Day 6
# (c) blu3r4y
import numpy as np
def part1(coordinates):
# create a matrix filled with -1 big enough to hold all coordinates
shape = np.amax(coordinates, axis=0) + (1, 1)
matrix = np.full(shape, -1)
for cell, _ in np.ndenumerate(matrix):
# calculate manhattan distance to every coordinate
dists = np.sum(np.abs(cell - coordinates), axis=1)
# assign the minimum distance to the cell, if it is unique
mins = np.argwhere(dists == np.amin(dists))
if len(mins) == 1:
matrix[cell] = mins[0][0]
# invalidate infinite regions
infinite = np.unique(np.hstack((matrix[(0, -1), :], matrix[:, (0, -1)].T)))
matrix[np.isin(matrix, infinite)] = -1
# measure region size
_, counts = np.unique(matrix.ravel(), return_counts=True)
return np.max(counts[1:])
def part2(coordinates, min_dist):
# create an empty matrix big enough to hold all coordinates
shape = np.amax(coordinates, axis=0) + (1, 1)
matrix = np.zeros(shape, dtype=int)
for cell, _ in np.ndenumerate(matrix):
# sum manhattan distance to every coordinate
dist = np.sum(np.abs(cell - coordinates))
# assign a marker if the distance is small enough
if dist < min_dist:
matrix[cell] = 1
return np.sum(matrix)
if __name__ == "__main__":
print(part1(np.array([(1, 1), (1, 6), (8, 3), (3, 4), (5, 5), (8, 9)])))
print(part1(np.loadtxt(r"../assets/day6.txt", delimiter=',', dtype=int)))
print(part2(np.array([(1, 1), (1, 6), (8, 3), (3, 4), (5, 5), (8, 9)]), 32))
print(part2(np.loadtxt(r"../assets/day6.txt", delimiter=',', dtype=int), 10000))
| 2.8125 | 3 |
mundo 3/des084.py | Pedroluis1/python | 0 | 12798360 | temp = []
princ = []
mai = men = 0
while True:
temp.append(str(input('nome: ')))
temp.append(float(input('peso: ')))
if len(princ) == 0:
mai = men = temp[1]
else:
if temp[1] > mai:
mai = temp[1]
if temp[1] < men:
men = temp[1]
princ.append(temp[:])
temp.clear()
resp = str(input('quer continuar? '))
if resp in 'Nn':
break
print('-=' * 30)
print(f'Ao todo você cadastrou {len(princ)} pessoas.')
print(f'O maior peso foi {mai}Kg de', end='')
for p in princ:
if p[1] == mai:
print(f'{[p[0]]}', end='')
print(f'E o menor peso foi {men} de ', end='')
for p in princ:
if p[1] == men:
print(f'{[p[0]]}', end='')
| 2.40625 | 2 |
twiml/voice/pay/pay-tokenize-connector/pay-tokenize-connector.6.x.py | stuyy/api-snippets | 0 | 12798368 | <filename>twiml/voice/pay/pay-tokenize-connector/pay-tokenize-connector.6.x.py<gh_stars>0
from twilio.twiml.voice_response import Pay, VoiceResponse
response = VoiceResponse()
response.pay(charge_amount='0', payment_connector='My_Pay_Connector', action='https://your-callback-function-url.com/pay')
print(response)
| 1.21875 | 1 |
libkol/request/clan_rumpus_meat.py | danheath/pykol-lib | 6 | 12798376 | <reponame>danheath/pykol-lib
from enum import Enum
import libkol
from ..util import parsing
from .clan_rumpus import Furniture
from .request import Request
class MeatFurniture(Enum):
Orchid = Furniture.MeatOrchid
Tree = Furniture.MeatTree
Bush = Furniture.MeatBush
class clan_rumpus_meat(Request[int]):
"""
Uses a meat dispenser in the clan rumpus room.
"""
def __init__(self, session: "libkol.Session", furniture: MeatFurniture) -> None:
super().__init__(session)
spot, furni = furniture.value
params = {"action": "click", "spot": spot, "furni": furni}
self.request = session.request("clan_rumpus.php", params=params)
@staticmethod
async def parser(content: str, **kwargs) -> int:
return parsing.meat(content)
| 1.601563 | 2 |
python-core/src/Dates.py | NSnietol/python-core-and-advanced | 2 | 12798384 | '''
Created on Nov 14, 2018
@author: nilson.nieto
'''
import time, datetime
# Using Epoch
print(time.ctime(time.time()))
print('Current day')
print(datetime.datetime.today())
| 2.046875 | 2 |
test/test_data.py | sei-nmvanhoudnos/Juneberry | 0 | 12798392 | <filename>test/test_data.py
#! /usr/bin/env python3
# ==========================================================================================================================================================
# Copyright 2021 Carnegie Mellon University.
#
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS"
# BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER
# INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED
# FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM
# FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD (SEI)-style license, please see license.txt
# or contact <EMAIL> for full terms.
#
# [DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited distribution. Please see
# Copyright notice for non-US Government use and distribution.
#
# This Software includes and/or makes use of the following Third-Party Software subject to its own license:
# 1. Pytorch (https://github.com/pytorch/pytorch/blob/master/LICENSE) Copyright 2016 facebook, inc..
# 2. NumPY (https://github.com/numpy/numpy/blob/master/LICENSE.txt) Copyright 2020 Numpy developers.
# 3. Matplotlib (https://matplotlib.org/3.1.1/users/license.html) Copyright 2013 Matplotlib Development Team.
# 4. pillow (https://github.com/python-pillow/Pillow/blob/master/LICENSE) Copyright 2020 <NAME> and contributors.
# 5. SKlearn (https://github.com/scikit-learn/sklearn-docbuilder/blob/master/LICENSE) Copyright 2013 scikit-learn
# developers.
# 6. torchsummary (https://github.com/TylerYep/torch-summary/blob/master/LICENSE) Copyright 2020 <NAME>.
# 7. adversarial robust toolbox (https://github.com/Trusted-AI/adversarial-robustness-toolbox/blob/main/LICENSE)
# Copyright 2018 the adversarial robustness toolbox authors.
# 8. pytest (https://docs.pytest.org/en/stable/license.html) Copyright 2020 <NAME> and others.
# 9. pylint (https://github.com/PyCQA/pylint/blob/master/COPYING) Copyright 1991 Free Software Foundation, Inc..
# 10. python (https://docs.python.org/3/license.html#psf-license) Copyright 2001 python software foundation.
#
# DM20-1149
#
# ==========================================================================================================================================================
import csv
import random
import os
from pathlib import Path
from unittest import mock
import juneberry
import juneberry.data as jb_data
import juneberry.filesystem as jbfs
from juneberry.config.dataset import DatasetConfig
from juneberry.config.training import TrainingConfig
import juneberry.config.dataset as jb_dataset
import test_training_config
def make_data():
data_list = [["a", 0],
["b", 1],
["c", 2],
["d", 3],
["e", 0],
["f", 1],
["g", 2],
["h", 3],
["i", 0],
["j", 0],
["k", 1],
["l", 1],
]
data_dict = {0: ['a', 'e', 'i', 'j'],
1: ['b', 'f', 'k', 'l'],
2: ['c', 'g'],
3: ['d', 'h']}
return data_list, data_dict
def check_allocation(good_dict, result_dict):
for k, v in result_dict.items():
for i in v:
assert i in good_dict[k]
def test_listdir_no_hidden():
with mock.patch('os.listdir') as mocked_listdir:
mocked_listdir.return_value = ['thing1', '.myhidden', 'thing2']
results = jb_data.listdir_nohidden('')
assert len(results) == 2
assert '.myhidden' not in results
assert 'thing1' in results
assert 'thing2' in results
# _____
# |_ _|
# | | _ __ ___ __ _ __ _ ___
# | || '_ ` _ \ / _` |/ _` |/ _ \
# _| || | | | | | (_| | (_| | __/
# \___/_| |_| |_|\__,_|\__, |\___|
# __/ |
# |___/
# This is a hard-code list dir that we use to test get images
def mock_list_image_dir(path):
if str(path).endswith("frodo"):
return [f'fr_{x}.png' for x in range(6)]
elif str(path).endswith("sam"):
return [f'sm_{x}.png' for x in range(6)]
else:
return []
def make_basic_data_set_image_config():
return {
"numModelClasses": 4,
"timestamp": "never",
"formatVersion": jb_dataset.FORMAT_VERSION,
"labelNames": {"0": "frodo", "1": "sam"},
"dataType": 'image',
"imageData": {
"taskType": "classification",
"sources": [
{
"directory": "frodo",
"label": 0,
# "samplingCount": "4",
# "samplingFraction": ""
},
{
"directory": "sam",
"label": 1,
# "samplingCount": "4",
# "samplingFraction": ""
}
]
}
}
def make_sample_stanza(algorithm, args):
return {
"sampling": {
"algorithm": algorithm, # < 'none', 'randomFraction', 'randomQuantity', 'roundRobin' >
"arguments": args # < custom json structure depending on algorithm - see details >
}
}
def assert_correct_list(test_list, frodo_indexes, sam_indexes):
correct_names = [str(Path('data_root', 'frodo', f'fr_{x}.png')) for x in frodo_indexes]
correct_labels = [0] * len(frodo_indexes)
correct_names.extend([str(Path('data_root', 'sam', f'sm_{x}.png')) for x in sam_indexes])
correct_labels.extend([1] * len(sam_indexes))
for idx, train in enumerate(test_list):
assert train[0] == correct_names[idx]
assert train[1] == correct_labels[idx]
def test_generate_image_list():
# Just replace listdir
os.listdir = mock_list_image_dir
data_set_struct = make_basic_data_set_image_config()
data_set_config = DatasetConfig(data_set_struct)
dm = jbfs.DataManager({})
train_list, val_list = jb_data.generate_image_list('data_root', data_set_config, None, dm)
assert len(train_list) == 12
assert len(val_list) == 0
assert_correct_list(train_list, range(6), range(6))
def test_generate_image_sample_quantity():
# If we pass in sampling count we should just get those
# We know how the internal randomizer works. We know it uses random.sample on both
# sets in order. This is a secret and fragile to this test.
# With a seed of 1234 and two pulls of sampling with a count of 3, it pulls [3,0,4] and [0,4,5]
os.listdir = mock_list_image_dir
data_set_struct = make_basic_data_set_image_config()
data_set_struct.update(make_sample_stanza("randomQuantity", {'seed': 1234, 'count': 3}))
data_set_config = DatasetConfig(data_set_struct)
dm = jbfs.DataManager({})
train_list, val_list = jb_data.generate_image_list('data_root', data_set_config, None, dm)
assert len(train_list) == 6
assert len(val_list) == 0
# Make sure they are in this order
assert_correct_list(train_list, [3, 0, 4], [0, 4, 5])
def test_generate_image_sample_fraction():
# If we pass in sampling count we should just get those
# We know how the internal randomizer works. We know it uses random.sample on both
# sets in order. This is a secret and fragile to this test.
# With a seed of 1234 and two pulls of sampling with a count of 2, it pulls [3,0] and [0,5]
os.listdir = mock_list_image_dir
data_set_struct = make_basic_data_set_image_config()
data_set_struct.update(make_sample_stanza("randomFraction", {'seed': 1234, 'fraction': 0.3333333333}))
data_set_config = DatasetConfig(data_set_struct)
dm = jbfs.DataManager({})
train_list, val_list = jb_data.generate_image_list('data_root', data_set_config, None, dm)
assert len(train_list) == 4
assert len(val_list) == 0
# Make sure they are in this order
assert_correct_list(train_list, [3, 0], [0, 5])
def test_generate_image_validation_split():
os.listdir = mock_list_image_dir
data_set_struct = make_basic_data_set_image_config()
data_set_config = DatasetConfig(data_set_struct)
train_struct = test_training_config.make_basic_config()
train_struct['validation'] = {
"algorithm": "randomFraction",
"arguments": {
"seed": 1234,
"fraction": 0.3333333
}
}
train_config = TrainingConfig('', train_struct)
dm = jbfs.DataManager({})
train_list, val_list = jb_data.generate_image_list('data_root', data_set_config, train_config, dm)
assert len(train_list) == 8
assert len(val_list) == 4
# NOTE: Another fragile secret we know is the order from the validation is is reversed
assert_correct_list(train_list, [1, 2, 4, 5], [1, 2, 3, 4])
assert_correct_list(val_list, [3, 0], [5, 0])
# _____ _ _
# |_ _| | | | |
# | | __ _| |__ _ _| | __ _ _ __
# | |/ _` | '_ \| | | | |/ _` | '__|
# | | (_| | |_) | |_| | | (_| | |
# \_/\__,_|_.__/ \__,_|_|\__,_|_|
#
def make_basic_data_set_tabular_config():
return {
"numModelClasses": 4,
"timestamp": "never",
"formatVersion": jb_dataset.FORMAT_VERSION,
"labelNames": {"0": "frodo", "1": "sam"},
"dataType": 'tabular',
"tabularData": {
"sources": [
{
"root": "dataroot", # [ dataroot | workspace | relative ]
"path": "dr.csv", # subdirectory
},
{
"root": "workspace", # [ dataroot | workspace | relative ]
"path": "ws.csv", # subdirectory
},
{
"root": "relative", # [ dataroot | workspace | relative ]
"path": "re*.csv", # subdirectory
}
],
"labelIndex": 2
}
}
def fill_tabular_tempdir(root_dir):
"""
Creates the sample files to be read and returns the data we should find
:param root_dir: The root directory
:return: Good data in a dict of label -> dict of x -> y
"""
results = {0: {}, 1: {}}
# Directory, filename, val_range
dir_struct = [
['myworkspace', 'ws.csv', list(range(0, 4))],
['mydataroot', 'dr.csv', list(range(4, 8))],
['myrelative', 'rel.csv', list(range(8, 12))]
]
for dir_name, file_name, data in dir_struct:
dir_path = Path(root_dir) / dir_name
dir_path.mkdir()
csv_path = dir_path / file_name
with open(csv_path, "w") as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
csv_writer.writerow('x,y,label')
for idx, val in enumerate(data):
results[idx % 2][val] = val + 10
csv_writer.writerow([val, val + 10, idx % 2])
return results
def test_load_tabular_data(tmp_path):
correct = fill_tabular_tempdir(tmp_path)
juneberry.WORKSPACE_ROOT = Path(tmp_path) / 'myworkspace'
juneberry.DATA_ROOT = Path(tmp_path) / 'mydataroot'
data_set_struct = make_basic_data_set_tabular_config()
data_set_config = DatasetConfig(data_set_struct, Path(tmp_path) / 'myrelative')
train_list, val_list = jb_data.load_tabular_data(None, data_set_config)
# THe sample data is three files each with 4 sample with 2 in each class.
# THe default validation split is 2. So 3 * 4 / 2 = 6 per list
assert len(train_list) == 12
assert len(val_list) == 0
# Make sure that evert returned value is in the results.
for data, label in train_list:
assert correct[int(label)][int(data[0])] == int(data[1])
del correct[int(label)][int(data[0])]
def test_load_tabular_data_with_sampling(tmp_path):
correct = fill_tabular_tempdir(tmp_path)
juneberry.WORKSPACE_ROOT = Path(tmp_path) / 'myworkspace'
juneberry.DATA_ROOT = Path(tmp_path) / 'mydataroot'
# We only need to test one sample because the sampling core is tested elsewhere
data_set_struct = make_basic_data_set_tabular_config()
data_set_struct.update(make_sample_stanza("randomQuantity", {'seed': 1234, 'count': 3}))
data_set_config = DatasetConfig(data_set_struct, Path(tmp_path) / 'myrelative')
train_list, val_list = jb_data.load_tabular_data(None, data_set_config)
# THe sample data is three files each with 4 sample with 2 in each class.
# THe default validation split is 2. So 3 * 4 / 2 = 6 per list
assert len(train_list) == 6
assert len(val_list) == 0
# Now, make sure they are in each one, removing as we go
for data, label in train_list:
assert correct[int(label)][int(data[0])] == int(data[1])
del correct[int(label)][int(data[0])]
# At this point we should have three unused entries of each class
assert len(correct[0]) == 3
assert len(correct[1]) == 3
def test_load_tabular_data_with_validation(tmp_path):
correct = fill_tabular_tempdir(tmp_path)
juneberry.WORKSPACE_ROOT = Path(tmp_path) / 'myworkspace'
juneberry.DATA_ROOT = Path(tmp_path) / 'mydataroot'
data_set_struct = make_basic_data_set_tabular_config()
data_set_config = DatasetConfig(data_set_struct, Path(tmp_path) / 'myrelative')
train_struct = test_training_config.make_basic_config()
train_config = TrainingConfig('', train_struct)
train_list, val_list = jb_data.load_tabular_data(train_config, data_set_config)
# THe sample data is three files each with 4 sample with 2 in each class.
# THe default validation split is 2. So 3 * 4 / 2 = 6 per list
assert len(train_list) == 6
assert len(val_list) == 6
# Now, make sure they are in each one, removing as we go
for data, label in train_list:
assert correct[int(label)][int(data[0])] == int(data[1])
del correct[int(label)][int(data[0])]
assert len(correct[0]) == 3
assert len(correct[1]) == 3
for data, label in val_list:
assert correct[int(label)][int(data[0])] == int(data[1])
del correct[int(label)][int(data[0])]
assert len(correct[0]) == 0
assert len(correct[1]) == 0
# _____ _ _
# / ___| | (_)
# \ `--. __ _ _ __ ___ _ __ | |_ _ __ __ _
# `--. \/ _` | '_ ` _ \| '_ \| | | '_ \ / _` |
# /\__/ / (_| | | | | | | |_) | | | | | | (_| |
# \____/ \__,_|_| |_| |_| .__/|_|_|_| |_|\__, |
# | | __/ |
# |_| |___/
def test_sampling_random_quantity():
randomizer = random.Random()
randomizer.seed(1234)
data_list = list(range(6))
sampled = jb_data.sample_data_list(data_list, "randomQuantity", {"count": 3}, randomizer)
for correct, test in zip([3, 0, 4], sampled):
assert correct == test
def test_sampling_random_fraction():
randomizer = random.Random()
randomizer.seed(1234)
data_list = list(range(6))
sampled = jb_data.sample_data_list(data_list, "randomFraction", {"fraction": 0.3333333333}, randomizer)
for correct, test in zip([3, 0], sampled):
assert correct == test
def test_sampling_round_robin():
randomizer = random.Random()
randomizer.seed(1234)
data_list = list(range(9))
sampled = jb_data.sample_data_list(data_list, "roundRobin", {"groups": 3, "position": 1}, randomizer)
for correct, test in zip([3, 4, 1], sampled):
assert correct == test
def test_sampling_none():
randomizer = random.Random()
randomizer.seed(1234)
data_list = list(range(8))
sampled = jb_data.sample_data_list(data_list, "none", {}, randomizer)
for correct, test in zip(range(8), sampled):
assert correct == test
# ___ ____
# | \/ (_)
# | . . |_ ___ ___
# | |\/| | / __|/ __|
# | | | | \__ \ (__
# \_| |_/_|___/\___|
def test_flatten_dict_to_pairs():
data_list, data_dict = make_data()
result_pairs = jb_data.flatten_dict_to_pairs(data_dict)
# Order doesn't matter. Just check to make sure that the entries are in the original dict
assert len(result_pairs) == len(data_list)
for v, k in result_pairs:
assert v in data_dict[k]
def test_labeled_pairs_to_labeled_dict():
data_list, data_dict = make_data()
result_dict = jb_data.labeled_pairs_to_labeled_dict(data_list)
assert len(result_dict) == len(data_dict)
for k, v in result_dict.items():
assert len(v) == len(data_dict[k])
for i in v:
assert i in data_dict[k]
def test_make_balanced_list():
data_list, data_dict = make_data()
result = jb_data.make_balanced_labeled_list(data_list, -1, random.Random())
result_dict = jb_data.labeled_pairs_to_labeled_dict(result)
assert len(result_dict) == 4
for k, v in result_dict.items():
assert len(v) == 2
check_allocation(data_dict, result_dict)
def test_make_balanced_dict():
data_list, data_dict = make_data()
result_dict = jb_data.make_balanced_labeled_dict(data_dict, -1, random.Random())
assert len(result_dict) == 4
for k, v in result_dict.items():
assert len(v) == 2
check_allocation(data_dict, result_dict)
| 1.539063 | 2 |
pybilt/mda_tools/mda_msd.py | blakeaw/ORBILT | 11 | 12798400 | <gh_stars>10-100
#we are going to use the MDAnalysis to read in topo and traj
#numpy
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
#import my running stats class
from pybilt.common.running_stats import RunningStats
# import the coordinate wrapping function--for unwrapping
from pybilt.mda_tools.mda_unwrap import wrap_coordinates
from six.moves import range
'''
function to compute the mean square displacement (MSD) and diffusion constant
of a list of MDAnalysis atom selections (atom_sel_list). The list of atom selections
are averaged at each timestep.
Returns 2d numpy array with len(atom_sel_list)X6 elements:
[:,0]=dt [:,1]=msd [:,2]=msd_dev [:,3]=diff_con_instantaneous
[:,4]=diff_con_running_average [:,5]=diff_con_running_dev
Long time mean squared displacement:
MSD = lim_(t->inf) <||r_i(t) - r_i(0)||**2>_(nsels) = 2*dim*D*t
'''
def mda_msd (trajectory, atom_sel_list, lateral=False, plane="xy", unwrap=True, verbose=False):
dim=3
plane_index = [0,1,2]
if lateral:
dim=2
ii=0
jj=1
if plane=="yz" or plane=="zy":
ii=1
jj=2
if plane=="xz" or plane=="zx":
ii=0
jj=2
plane_index = [ii, jj]
naxes = len(plane_index)
#get the number of frames from the trajectory
nframes = len(trajectory)
#get the number of atomselections
nsels = len(atom_sel_list)
#initialize a numpy array to hold the center of mass vectors
comlist = np.zeros((nsels, nframes, 3))
#print "l comlist ",len(comlist)
times = np.zeros(nframes)
#index counter for the frame number
comit = 0
#combine all the selections into one (for wrapping)
msel = atom_sel_list[0]
for s in range(1, nsels):
msel+=atom_sel_list[s]
natoms = len(msel)
oldcoord = np.zeros((natoms,naxes))
index = msel.indices
firstframe = True
# loop over the trajectory
for ts in trajectory:
time=ts.time
if verbose:
print(" ")
print("frame ",ts.frame)
#unwrap coordinates -- currently unwraps all the coordinates
if unwrap:
if verbose:
print("unwrapping frame ",ts.frame)
currcoord = ts.positions[index]
if firstframe:
oldcoord = currcoord
firstframe = False
else:
abc = ts.dimensions[0:3]
wrapcoord = wrap_coordinates(abc, currcoord, oldcoord)
ts._pos[index] = wrapcoord[:]
#loop over the selections
for i in range(nsels):
if verbose:
print("frame ",ts.frame," getting com of selection ",atom_sel_list[i])
#compute the center of mass of the current selection and current frame
com = atom_sel_list[i].center_of_mass()
#print "com ",com
#add to the numpy array
comlist[i,comit]=com
#print comlist
times[comit]=time
comit+=1
#initialize a numpy array to hold the msd for each selection
msd = np.zeros((nframes, 6))
#initialize a running stats object to do the averaging
drs_stat = RunningStats()
#initialize a running stats object for the diffusion constant (frame/time average)
diff_stat = RunningStats()
#loop over the frames starting at index 1
#print comlist
#print len(comlist)
coml0 = comlist[:,0,plane_index]
#print coml0
for i in range(1, nframes):
# get the current com frame list
comlcurr = comlist[:,i,plane_index]
dr = comlcurr - coml0
drs = dr*dr
#loop over the selections for this frame
for j in range(nsels):
drs_curr = drs[j,:]
drs_mag = drs_curr.sum()
drs_stat.push(drs_mag)
#get the msd for the current selection
msdcurr = drs_stat.mean()
devcurr = drs_stat.deviation()
dt = times[i]-times[0]
DiffCon = msdcurr/(2.0*dim*dt)
diff_stat.push(DiffCon)
#print "msdcurr ",msdcurr
#push to the msd array
msd[i,0]=dt
msd[i,1]=msdcurr
msd[i,2]=devcurr
msd[i,3]=DiffCon
msd[i,4]=diff_stat.mean()
msd[i,5]=diff_stat.deviation()
if verbose:
print("selection number ",i," has msd ",msdcurr," with deviation ",devcurr)
#reset the running stats object--prepare for next selection
drs_stat.Reset()
#return msd array
return msd
| 2 | 2 |
src/qiskit_trebugger/views/widget/button_with_value.py | kdk/qiskit-timeline-debugger | 7 | 12798408 | import ipywidgets as widgets
class ButtonWithValue(widgets.Button):
def __init__(self, *args, **kwargs):
self.value = kwargs['value']
kwargs.pop('value', None)
super(ButtonWithValue, self).__init__(*args, **kwargs) | 1.234375 | 1 |
allennlp/data/dataset.py | pmulcaire/allennlp | 1 | 12798416 | """
A :class:`~Dataset` represents a collection of data suitable for feeding into a model.
For example, when you train a model, you will likely have a *training* dataset and a *validation* dataset.
"""
import logging
from collections import defaultdict
from typing import Dict, List, Union
import numpy
import tqdm
from allennlp.data.instance import Instance
from allennlp.data.vocabulary import Vocabulary
from allennlp.common.checks import ConfigurationError
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class Dataset:
"""
A collection of :class:`~allennlp.data.instance.Instance` objects.
The ``Instances`` have ``Fields``, and the fields
could be in an indexed or unindexed state - the ``Dataset`` has methods around indexing the
data and converting the data into arrays.
"""
def __init__(self, instances: List[Instance]) -> None:
"""
A Dataset just takes a list of instances in its constructor. It's important that all
subclasses have an identical constructor to this (though possibly with different Instance
types). If you change the constructor, you also have to override all methods in this base
class that call the constructor, such as `truncate()`.
"""
all_instance_fields_and_types: List[Dict[str, str]] = [{k: v.__class__.__name__
for k, v in x.fields.items()}
for x in instances]
# Check all the field names and Field types are the same for every instance.
if not all([all_instance_fields_and_types[0] == x for x in all_instance_fields_and_types]):
raise ConfigurationError("You cannot construct a Dataset with non-homogeneous Instances.")
self.instances = instances
def truncate(self, max_instances: int):
"""
If there are more instances than ``max_instances`` in this dataset, we truncate the
instances to the first ``max_instances``. This `modifies` the current object, and returns
nothing.
"""
if len(self.instances) > max_instances:
self.instances = self.instances[:max_instances]
def index_instances(self, vocab: Vocabulary):
"""
Converts all ``UnindexedFields`` in all ``Instances`` in this ``Dataset`` into
``IndexedFields``. This modifies the current object, it does not return a new object.
"""
logger.info("Indexing dataset")
for instance in tqdm.tqdm(self.instances):
instance.index_fields(vocab)
def get_padding_lengths(self) -> Dict[str, Dict[str, int]]:
"""
Gets the maximum padding lengths from all ``Instances`` in this dataset. Each ``Instance``
has multiple ``Fields``, and each ``Field`` could have multiple things that need padding.
We look at all fields in all instances, and find the max values for each (field_name,
padding_key) pair, returning them in a dictionary.
This can then be used to convert this dataset into arrays of consistent length, or to set
model parameters, etc.
"""
padding_lengths: Dict[str, Dict[str, int]] = defaultdict(dict)
all_instance_lengths: List[Dict[str, Dict[str, int]]] = [instance.get_padding_lengths()
for instance in self.instances]
if not all_instance_lengths:
return {**padding_lengths}
all_field_lengths: Dict[str, List[Dict[str, int]]] = defaultdict(list)
for instance_lengths in all_instance_lengths:
for field_name, instance_field_lengths in instance_lengths.items():
all_field_lengths[field_name].append(instance_field_lengths)
for field_name, field_lengths in all_field_lengths.items():
for padding_key in field_lengths[0].keys():
max_value = max(x[padding_key] if padding_key in x else 0 for x in field_lengths)
padding_lengths[field_name][padding_key] = max_value
return {**padding_lengths}
def as_array_dict(self,
padding_lengths: Dict[str, Dict[str, int]] = None,
verbose: bool = True) ->Dict[str, Union[numpy.ndarray, Dict[str, numpy.ndarray]]]:
# This complex return type is actually predefined elsewhere as a DataArray,
# but we can't use it because mypy doesn't like it.
"""
This method converts this ``Dataset`` into a set of numpy arrays that can be passed through
a model. In order for the numpy arrays to be valid arrays, all ``Instances`` in this
dataset need to be padded to the same lengths wherever padding is necessary, so we do that
first, then we combine all of the arrays for each field in each instance into a set of
batched arrays for each field.
Parameters
----------
padding_lengths : ``Dict[str, Dict[str, int]]``
If a key is present in this dictionary with a non-``None`` value, we will pad to that
length instead of the length calculated from the data. This lets you, e.g., set a
maximum value for sentence length if you want to throw out long sequences.
Entries in this dictionary are keyed first by field name (e.g., "question"), then by
padding key (e.g., "num_tokens").
verbose : ``bool``, optional (default=``True``)
Should we output logging information when we're doing this padding? If the dataset is
large, this is nice to have, because padding a large dataset could take a long time.
But if you're doing this inside of a data generator, having all of this output per
batch is a bit obnoxious.
Returns
-------
data_arrays : ``Dict[str, DataArray]``
A dictionary of data arrays, keyed by field name, suitable for passing as input to a
model. This is a `batch` of instances, so, e.g., if the instances have a "question"
field and an "answer" field, the "question" fields for all of the instances will be
grouped together into a single array, and the "answer" fields for all instances will be
similarly grouped in a parallel set of arrays, for batched computation. Additionally,
for TextFields, the value of the dictionary key is no longer a single array, but another
dictionary mapping TokenIndexer keys to arrays. The number of elements in this
sub-dictionary therefore corresponds to the number of ``TokenIndexers`` used to index
the Field.
"""
if padding_lengths is None:
padding_lengths = defaultdict(dict)
# First we need to decide _how much_ to pad. To do that, we find the max length for all
# relevant padding decisions from the instances themselves. Then we check whether we were
# given a max length for a particular field and padding key. If we were, we use that
# instead of the instance-based one.
if verbose:
logger.info("Padding dataset of size %d to lengths %s", len(self.instances), str(padding_lengths))
logger.info("Getting max lengths from instances")
instance_padding_lengths = self.get_padding_lengths()
if verbose:
logger.info("Instance max lengths: %s", str(instance_padding_lengths))
lengths_to_use: Dict[str, Dict[str, int]] = defaultdict(dict)
for field_name, instance_field_lengths in instance_padding_lengths.items():
for padding_key in instance_field_lengths.keys():
if padding_lengths[field_name].get(padding_key) is not None:
lengths_to_use[field_name][padding_key] = padding_lengths[field_name][padding_key]
else:
lengths_to_use[field_name][padding_key] = instance_field_lengths[padding_key]
# Now we actually pad the instances to numpy arrays.
field_arrays: Dict[str, list] = defaultdict(list)
if verbose:
logger.info("Now actually padding instances to length: %s", str(lengths_to_use))
for instance in self.instances:
for field, arrays in instance.as_array_dict(lengths_to_use).items():
field_arrays[field].append(arrays)
# Finally, we combine the arrays that we got for each instance into one big array (or set
# of arrays) per field. The `Field` classes themselves have the logic for batching the
# arrays together, so we grab a dictionary of field_name -> field class from the first
# instance in the dataset.
field_classes = self.instances[0].fields
final_fields = {}
for field_name, field_array_list in field_arrays.items():
final_fields[field_name] = field_classes[field_name].batch_arrays(field_array_list)
return final_fields
| 2.78125 | 3 |
init_handler.py | LucidumInc/update-manager | 0 | 12798424 | <reponame>LucidumInc/update-manager
import os
import shutil
import sys
from loguru import logger
from config_handler import get_lucidum_dir
from exceptions import AppError
def change_permissions_recursive(path, mode):
os.chmod(path, mode)
for root, dirs, files in os.walk(path):
for dir_ in dirs:
os.chmod(os.path.join(root, dir_), mode)
for file in files:
os.chmod(os.path.join(root, file), mode)
def create_directory(dir_):
if not os.path.exists(dir_):
os.makedirs(dir_)
logger.info("Created directory: {}", dir_)
return True
logger.info("Directory exists: {}", dir_)
return False
def copy_file(from_, to, force=False):
if os.path.exists(to):
if force:
shutil.copyfile(from_, to)
logger.info("Copied {} file to {} file", from_, to)
else:
logger.info("File exists: {}", to)
else:
shutil.copyfile(from_, to)
logger.info("Copied {} file to {} file", from_, to)
def create_mongo_directory(base_dir):
mongo_dir = os.path.join(base_dir, "mongo")
created = create_directory(os.path.join(mongo_dir, "db"))
if created:
change_permissions_recursive(mongo_dir, 0o777)
return mongo_dir
def create_mysql_directory(base_dir):
mysql_dir = os.path.join(base_dir, "mysql")
created = create_directory(mysql_dir)
create_directory(os.path.join(mysql_dir, "db"))
config_dir = os.path.join(mysql_dir, "config")
create_directory(config_dir)
copy_file(os.path.join("resources", "mysql_my_custom_cnf"), os.path.join(config_dir, "my_custom.cnf"))
if created:
change_permissions_recursive(mysql_dir, 0o777)
return mysql_dir
def create_web_directory(base_dir):
web_dir = os.path.join(base_dir, "web")
created = create_directory(web_dir)
create_directory(os.path.join(web_dir, "app", "logs"))
hostdata_dir = os.path.join(web_dir, "app", "hostdata")
create_directory(hostdata_dir)
app_dir = os.path.join(web_dir, "app")
conf_dir = os.path.join(web_dir, "app", "conf")
create_directory(conf_dir)
copy_file(os.path.join("resources", "server.pem"), os.path.join(hostdata_dir, "server.pem"))
copy_file(os.path.join("resources", "server_private.pem"), os.path.join(hostdata_dir, "server_private.pem"))
copy_file(os.path.join("resources", "server.xml"), os.path.join(conf_dir, "server.xml"))
copy_file(os.path.join("resources", "web.xml"), os.path.join(conf_dir, "web.xml"))
copy_file(os.path.join("resources", "index.jsp"), os.path.join(app_dir, "index.jsp"))
if created:
change_permissions_recursive(web_dir, 0o777)
return web_dir
@logger.catch(onerror=lambda _: sys.exit(1))
def init():
lucidum_dir = get_lucidum_dir()
logger.info("Lucidum directory: {}", lucidum_dir)
create_directory(lucidum_dir)
create_mongo_directory(lucidum_dir)
create_mysql_directory(lucidum_dir)
create_web_directory(lucidum_dir)
| 1.4375 | 1 |
spinesTS/pipeline/_pipeline.py | BirchKwok/spinesTS | 2 | 12798432 | import copy
import numpy as np
from sklearn.base import RegressorMixin
from spinesTS.base import EstimatorMixin
class Pipeline(RegressorMixin, EstimatorMixin):
"""estimators pipeline """
def __init__(self, steps:[tuple]):
"""
Demo:
'''python
from spinesTS.pipeline import Pipeline
from spinesTS.preprocessing import split_array
from spinesTS.datasets import LoadElectricDataSets
from sklearn.preprocessing import StandardScaler
from spinesTS.nn import TCN1D
X_train, X_test, y_train, y_test = LoadElectricDataSets().split_ds()
pp = Pipeline([
('sc', 'StandardScaler()),
('tcn', 'TCN1D(30, 30))
])
pp.fit(X_train, y_train)
y_hat = pp.predict(X_test)
print(pp.score(X_test, y_test))
'''
"""
assert 0 < len(steps) == np.sum([isinstance(i, tuple) for i in steps])
self._names, self._estimators = zip(*steps)
self._model = self._estimators[-1]
# validate steps
self._validate_steps()
self._init_steps = steps
self._order_steps = dict()
for n, c in zip(self._names, self._estimators):
self._order_steps[n] = c.__class__.__name__
def fit(self, train_x, train_y, eval_set=None, **kwargs):
x = copy.deepcopy(train_x)
y = copy.deepcopy(train_y)
for t in range(len(self._estimators[:-1])):
if hasattr(t, 'fit_transform'):
x = self._estimators[t].fit_transform(x)
else:
self._estimators[t].fit(x)
x = self._estimators[t].transform(x)
if eval_set is not None:
_target = copy.deepcopy(eval_set)
if isinstance(_target[0], tuple):
ex, ey = _target[0]
ex = self._estimators[t].transform(ex)
eval_set = [(ex, ey)]
else:
ex, ey = _target
ex = self._estimators[t].transform(ex)
eval_set = (ex, ey)
self._fit(x, y, eval_set=eval_set, **kwargs)
return self
def predict(self, x_pred, **kwargs):
x = copy.deepcopy(x_pred)
for t in range(len(self._estimators[:-1])):
x = self._estimators[t].transform(x)
return self._model.predict(x, **kwargs)
def get_params(self):
return copy.deepcopy(self._order_steps)
def _validate_steps(self):
transformers = self._estimators[:-1]
estimator = self._model
for t in transformers:
if t is None:
continue
else:
if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(
t, "transform"
):
raise TypeError(
"All intermediate steps should be "
"transformers and implement fit and transform "
"'%s' (type %s) doesn't" % (t, type(t))
)
if (
estimator is not None
and not hasattr(estimator, "fit") and not hasattr(estimator, "predict")
):
raise TypeError(
"Last step of Pipeline should implement fit and predict"
"'%s' (type %s) doesn't" % (estimator, type(estimator))
)
def save_model(self, path):
pass
| 2.15625 | 2 |
src/fhir_types/FHIR_Patient_Contact.py | anthem-ai/fhir-types | 2 | 12798440 | from typing import Any, List, Literal, TypedDict
from .FHIR_Address import FHIR_Address
from .FHIR_CodeableConcept import FHIR_CodeableConcept
from .FHIR_ContactPoint import FHIR_ContactPoint
from .FHIR_Element import FHIR_Element
from .FHIR_HumanName import FHIR_HumanName
from .FHIR_Period import FHIR_Period
from .FHIR_Reference import FHIR_Reference
from .FHIR_string import FHIR_string
# Demographics and other administrative information about an individual or animal receiving care or other health-related services.
FHIR_Patient_Contact = TypedDict(
"FHIR_Patient_Contact",
{
# Unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces.
"id": FHIR_string,
# May be used to represent additional information that is not part of the basic definition of the element. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension.
"extension": List[Any],
# May be used to represent additional information that is not part of the basic definition of the element and that modifies the understanding of the element in which it is contained and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions.Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself).
"modifierExtension": List[Any],
# The nature of the relationship between the patient and the contact person.
"relationship": List[FHIR_CodeableConcept],
# A name associated with the contact person.
"name": FHIR_HumanName,
# A contact detail for the person, e.g. a telephone number or an email address.
"telecom": List[FHIR_ContactPoint],
# Address for the contact person.
"address": FHIR_Address,
# Administrative Gender - the gender that the contact person is considered to have for administration and record keeping purposes.
"gender": Literal["male", "female", "other", "unknown"],
# Extensions for gender
"_gender": FHIR_Element,
# Organization on behalf of which the contact is acting or for which the contact is working.
"organization": FHIR_Reference,
# The period during which this contact person or organization is valid to be contacted relating to this patient.
"period": FHIR_Period,
},
total=False,
)
| 1.164063 | 1 |
catalog/category.py | cyrildzumts/django-catalog | 0 | 12798448 | <reponame>cyrildzumts/django-catalog<gh_stars>0
# from django.db import models
from catalog.models import Category, Product
class CategoryEntry:
def __init__(self, category):
self.current = category
self.children = self.current.get_categories
def is_parent(self):
return self.children is not None
def products(self):
return Product.objects.filter()
class CategoryTree(object):
pass
def has_children(category):
return Category.objects.filter(parent=category) is None
def get_children_categories(category, cat_list=[]):
if has_children(category):
cat_list.append()
| 1.867188 | 2 |
ofstest/ofs/doctype/store/test_store.py | keithyang77/ofstest | 0 | 12798456 | <reponame>keithyang77/ofstest
# Copyright (c) 2021, mds and Contributors
# See license.txt
# import frappe
import unittest
class TestStore(unittest.TestCase):
pass
| 0.71875 | 1 |
CircadianDesktops/app.py | Luke943/CircadianDesktops | 0 | 12798464 | <reponame>Luke943/CircadianDesktops<gh_stars>0
"""
Main script for Circadian Desktops app.
Settings file and logo images are stored locally.
Contains MainWindow class and script to run app.
"""
import os
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
import custom_qt
import functions
from ui_mainwindow import Ui_MainWindow
settingsFile = "settings.txt"
logoFile = "Icons\\logo.png"
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):
"""
MainWindow class for the UI.
Inherits from Ui_MainWindow, which contains the layout of the widgets.
"""
def __init__(self, parent=None, settings=None):
# setup
super(MainWindow, self).__init__(parent)
self.setupUi(self)
self.settingsPath = settings
self.isClosedFromTray = False
self.settings = functions.get_settings(settings)
self.activeImage = ''
# connect widgets to methods
self.btnSelectDayImg.clicked.connect(
lambda: self.get_image(self.labelDayImg))
self.btnSelectDDImg.clicked.connect(
lambda: self.get_image(self.labelDDImg))
self.btnSelectNightImg.clicked.connect(
lambda: self.get_image(self.labelNightImg))
self.comboBox.currentIndexChanged.connect(self.set_background_style)
self.spinShuffleTime.valueChanged.connect(self.set_shuffle_time)
self.radioDefaultTimes.clicked.connect(self.default_times)
self.radioCustomTimes.clicked.connect(self.custom_times)
self.boxDark.stateChanged.connect(self.set_palette)
self.boxMinimize.stateChanged.connect(self.minimize_behaviour)
self.boxStartup.stateChanged.connect(self.startup_behaviour)
# tray icon
self.trayIcon = QtWidgets.QSystemTrayIcon()
self.trayIcon.setIcon(QtGui.QIcon(logoFile))
self.trayIcon.setToolTip("Circadian Desktops")
self.trayIcon.activated.connect(self.__icon_activated)
self.trayIcon.show()
self.trayMenu = QtWidgets.QMenu()
self.trayMenu.addAction("Open Circadian Desktops", self.show_window)
self.trayMenu.addSeparator()
self.trayMenu.addAction(
"Exit Circadian Desktops", self.close_from_tray)
self.trayIcon.setContextMenu(self.trayMenu)
# timers
self.mainTimer = QtCore.QTimer()
self.mainTimer.timeout.connect(self.set_desktop)
self.shuffleTimer = QtCore.QTimer()
self.shuffleTimer.timeout.connect(self.shuffle_images)
# populate data
self.set_image(self.settings['labelDayImg'], self.labelDayImg)
self.set_image(self.settings['labelDDImg'], self.labelDDImg)
self.set_image(self.settings['labelNightImg'], self.labelNightImg)
self.load_times()
self.load_preferences()
self.set_desktop()
self.set_background_style()
def set_image(self, fileName: str, imageLbl: QtWidgets.QLabel):
if self.settings['isSlideshow']:
fileName = functions.random_image(fileName)
pixmap = QtGui.QPixmap(fileName)
pixmap = pixmap.scaled(
imageLbl.width(), imageLbl.height(), QtCore.Qt.KeepAspectRatio)
imageLbl.setPixmap(pixmap)
imageLbl.setAlignment(QtCore.Qt.AlignCenter)
self.settings[imageLbl.objectName()] = fileName
def get_image(self, imageLbl: QtWidgets.QLabel):
fileName, _ = QtWidgets.QFileDialog.getOpenFileName(
None, "Select image", "", "Image files (*.png *.jpg *.jpeg *.bmp)")
if fileName:
self.set_image(fileName, imageLbl)
self.set_desktop()
def shuffle_images(self):
self.set_image(self.settings['labelDayImg'], self.labelDayImg)
self.set_image(self.settings['labelDDImg'], self.labelDDImg)
self.set_image(self.settings['labelNightImg'], self.labelNightImg)
self.shuffleTimer.start(self.settings['shuffleTime'] * 60000)
self.set_desktop()
def set_desktop(self):
now = QtCore.QTime.currentTime()
if self.timeDawn.time() < now <= self.timeDay.time():
imageFile = self.settings['labelDDImg']
elif self.timeDay.time() < now <= self.timeDusk.time():
imageFile = self.settings['labelDayImg']
elif self.timeDusk.time() < now <= self.timeNight.time():
imageFile = self.settings['labelDDImg']
else:
imageFile = self.settings['labelNightImg']
if imageFile != self.activeImage:
functions.set_desktop(imageFile)
self.activeImage = imageFile
self.mainTimer.start(60000)
def set_background_style(self):
if self.comboBox.currentText() == 'single image':
self.shuffleTimer.stop()
self.settings['isSlideshow'] = 0
self.spinShuffleTime.setReadOnly(True)
elif self.comboBox.currentText() == 'slideshow from folders':
self.shuffleTimer.start(self.settings['shuffleTime'] * 60000)
self.settings['isSlideshow'] = 1
self.spinShuffleTime.setReadOnly(False)
def set_shuffle_time(self):
newTime = self.spinShuffleTime.value() * 60000
if self.shuffleTimer.remainingTime() > newTime:
self.shuffleTimer.start(newTime)
self.settings['shuffleTime'] = self.spinShuffleTime.value()
def load_times(self):
if int(self.settings['isCustomTimes']):
self.timeDawn.setTime(QtCore.QTime(
int(self.settings['dawnhour']), int(self.settings['dawnmin']), 0))
self.timeDay.setTime(QtCore.QTime(
int(self.settings['dayhour']), int(self.settings['daymin']), 0))
self.timeDusk.setTime(QtCore.QTime(
int(self.settings['duskhour']), int(self.settings['duskmin']), 0))
self.timeNight.setTime(QtCore.QTime(
int(self.settings['nighthour']), int(self.settings['nightmin']), 0))
self.custom_times()
self.radioCustomTimes.setChecked(True)
else:
self.default_times()
def custom_times(self):
self.timeDawn.setReadOnly(False)
self.timeDay.setReadOnly(False)
self.timeDusk.setReadOnly(False)
self.timeNight.setReadOnly(False)
def default_times(self):
d = functions.get_times()
self.timeDawn.setTime(QtCore.QTime(
d['dawn'].hour, d['dawn'].minute, 0))
self.timeDay.setTime(QtCore.QTime(
d['sunrise'].hour, d['sunrise'].minute, 0))
self.timeDusk.setTime(QtCore.QTime(
d['sunset'].hour, d['sunset'].minute, 0))
self.timeNight.setTime(QtCore.QTime(
d['dusk'].hour, d['dusk'].minute, 0))
self.timeDawn.setReadOnly(True)
self.timeDay.setReadOnly(True)
self.timeDusk.setReadOnly(True)
self.timeNight.setReadOnly(True)
def load_preferences(self):
if self.settings['isSlideshow']:
self.comboBox.setCurrentIndex(1)
else:
self.spinShuffleTime.setReadOnly(True)
self.spinShuffleTime.setValue(self.settings['shuffleTime'])
if self.settings['isDarkMode']:
self.boxDark.setChecked(True)
self.set_palette()
if self.settings['minimizeToTray']:
self.boxMinimize.setChecked(True)
else:
self.isClosedFromTray = True
if self.settings['runOnStartup']:
self.boxStartup.setChecked(True)
def set_palette(self):
if self.boxDark.isChecked():
self.setPalette(custom_qt.DarkPalette())
self.settings['isDarkMode'] = 1
else:
self.setPalette(QtGui.QPalette())
self.settings['isDarkMode'] = 0
def startup_behaviour(self):
if self.boxStartup.isChecked():
functions.run_on_startup(True)
self.settings['runOnStartup'] = 1
else:
functions.run_on_startup(False)
self.settings['runOnStartup'] = 0
def minimize_behaviour(self):
if self.boxMinimize.isChecked():
self.isClosedFromTray = False
self.settings['minimizeToTray'] = 1
else:
self.isClosedFromTray = True
self.settings['minimizeToTray'] = 0
def show_window(self):
functions.set_background_priority(False)
getattr(self, "raise")()
self.activateWindow()
self.setWindowState(QtCore.Qt.WindowNoState)
self.show()
def close_from_tray(self):
self.isClosedFromTray = True
self.close()
def closeEvent(self, event):
if self.radioCustomTimes.isChecked():
self.settings['isCustomTimes'] = 1
self.settings['dawnhour'] = self.timeDawn.time().hour()
self.settings['dawnmin'] = self.timeDawn.time().minute()
self.settings['dayhour'] = self.timeDay.time().hour()
self.settings['daymin'] = self.timeDay.time().minute()
self.settings['duskhour'] = self.timeDusk.time().hour()
self.settings['duskmin'] = self.timeDusk.time().minute()
self.settings['nighthour'] = self.timeNight.time().hour()
self.settings['nightmin'] = self.timeNight.time().minute()
else:
self.settings['isCustomTimes'] = 0
functions.write_settings(self.settingsPath, self.settings)
if self.isClosedFromTray:
event.accept()
else:
event.ignore()
self.hide()
functions.set_background_priority(True)
def __icon_activated(self, reason):
if reason == QtWidgets.QSystemTrayIcon.DoubleClick or reason == QtWidgets.QSystemTrayIcon.Trigger:
self.show_window()
if __name__ == "__main__":
os.chdir(os.path.dirname(os.path.abspath(__file__))) # To pick up settings & images
functions.set_process_explicit() # So Windows uses logo icon
app = QtWidgets.QApplication([])
ui = MainWindow(settings=settingsFile)
app.setStyle('fusion')
if '/noshow' in sys.argv:
functions.set_background_priority(True)
else:
ui.show()
app.setWindowIcon(QtGui.QIcon(logoFile))
ui.setWindowIcon(QtGui.QIcon(logoFile))
sys.exit(app.exec_())
| 1.351563 | 1 |
app/log.py | barry-ran/werobot | 1 | 12798472 | import os
import logging
from logging import handlers
from werkzeug.exceptions import InternalServerError
basedir = os.path.abspath(os.path.dirname(__file__))
def handle_error(error):
Log.logger().error(error)
return error
class Log:
LOG_PATH = os.path.join(basedir, 'logs')
LOG_NAME = os.path.join(LOG_PATH, 'log.txt')
LOG_LEVEL = 'INFO'
current_app = None
@staticmethod
def init_app(app):
Log.current_app = app
if not os.path.exists(Log.LOG_PATH):
os.makedirs(Log.LOG_PATH)
# 根据时间重命名log
file_handler = logging.handlers.TimedRotatingFileHandler(Log.LOG_NAME, when='D', interval=1, backupCount=0, encoding='utf-8')
file_handler.suffix = '%Y-%m-%d.log'
# 单独设置handler的日志级别:低于该级别则该handler不处理(一个logger可以有多个handler)
# file_handler用来写入文件
file_handler.setLevel(Log.LOG_LEVEL)
fmt = '%(asctime)s-%(levelname)s-%(filename)s-%(funcName)s-%(lineno)s: %(message)s'
formatter = logging.Formatter(fmt)
file_handler.setFormatter(formatter)
# 设置logger的日志级别:大于等于该级别才会交给handler处理
app.logger.setLevel('DEBUG')
app.logger.addHandler(file_handler)
# DEBUG模式下不会走到handle_error
app.register_error_handler(InternalServerError, handle_error)
@staticmethod
def logger():
return Log.current_app.logger | 1.984375 | 2 |
keras_classification_test.py | redkfa/PDF_classification | 0 | 12798480 | <reponame>redkfa/PDF_classification
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
from keras.applications.vgg19 import VGG19
from keras.models import Model
from keras.layers.normalization import BatchNormalization
import numpy as np
from keras.models import load_model
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
from keras.models import Sequential
import tensorflow as tf
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.metrics import auc
from sklearn.metrics import roc_curve
import os
from sklearn.metrics import auc
#validation_data_dir = r'C:\Users\randy\PycharmProjects\PJ1\classifiaction\test'
validation_data_dir = r'C:\Users\randy\PycharmProjects\PJ1\classifiaction\test5'
#C:\Users\randy\Downloads\betterdataset\test 494#
#C:\Users\randy\PycharmProjects\PJ1\classifiaction\test2 #16
test_count =sum([len(files) for r, d, files in os.walk(validation_data_dir)])
nb_validation_samples =test_count
batch_size =8
validation_steps= nb_validation_samples/batch_size
print(test_count)
print(validation_steps)
img_width, img_height = 224,224
my_model = load_model('VO_2_classification_model.h5')
test_datagen = ImageDataGenerator(rescale=1. / 255)
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
shuffle=False,
batch_size=batch_size)
Y_pred = my_model.predict_generator(validation_generator,len(validation_generator),verbose=1)
y_pred = np.argmax(Y_pred, axis=1)
y_true = validation_generator.classes
print('Confusion Matrix')
print(confusion_matrix(validation_generator.classes, y_pred))
print('Classification Report')
target_names = ['3view', 'others']
print(classification_report(y_true, y_pred, target_names=target_names))
'''
loss, acc = my_model.evaluate_generator(validation_generator, steps=len(validation_generator), verbose=1)
print('test acc = %.3f'%(acc))
print('test loss = %.3f'%(loss))
'''
'''
y_pred_keras = Y_pred.ravel()
fpr_keras, tpr_keras, thresholds_keras = roc_curve(validation_generator.classes, y_pred_keras)
auc_keras = auc(fpr_keras,tpr_keras)
print(auc_keras)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_keras, tpr_keras, label='ROC (area = {:.3f})'.format(auc_keras))
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
print(auc_keras)
'''
'''
#fpr, tpr, thresholds = metrics.roc_curve(y_true,Y_pred, pos_label=2)
plt.plot(fpr_keras,tpr_keras,marker = 'o')
plt.show()
#AUC = auc(fpr, tpr)
''' | 1.671875 | 2 |
kyokigo/migrations/0002_auto_20180405_0755.py | seoworks0/docker_test2 | 0 | 12798488 | # Generated by Django 2.0.3 on 2018-04-05 07:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kyokigo', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='kyokigo_input',
name='ownurl',
),
migrations.AlterField(
model_name='kyokigo_input',
name='text',
field=models.CharField(max_length=100, verbose_name='テキスト'),
),
]
| 0.800781 | 1 |
ServerComponent/DataLayer/DataSetEntry.py | CDU55/FakeNews | 0 | 12798496 | <reponame>CDU55/FakeNews
class FacebookDataSetEntry:
def __init__(self, followers_number, likes_number, comments_number, share_number, grammar_index, subject_relevance,
label):
self.followers_number = followers_number
self.likes_number = likes_number
self.comments_number = comments_number
self.share_number = share_number
self.grammar_index = grammar_index
self.subject_relevance = subject_relevance
self.label = label
class FacebookDataSetEntryUnlabeled:
def __init__(self, followers_number, likes_number, comments_number, share_number, grammar_index, subject_relevance):
self.followers_number = followers_number
self.likes_number = likes_number
self.comments_number = comments_number
self.share_number = share_number
self.grammar_index = grammar_index
self.subject_relevance = subject_relevance
class TwitterDataSetEntry:
def __init__(self, followers_number, verified, tweets_number, retweets, quote_tweets, likes_number, grammar_index,
subject_relevance, label):
self.followers_number = followers_number
self.verified = verified
self.tweets_number = tweets_number
self.retweets = retweets
self.quote_tweets = quote_tweets
self.likes_number = likes_number
self.grammar_index = grammar_index
self.subject_relevance = subject_relevance
self.label = label
class TwitterDataSetEntryUnlabeled:
def __init__(self, followers_number, verified, tweets_number, retweets, quote_tweets, likes_number, grammar_index,
subject_relevance):
self.followers_number = followers_number
self.verified = verified
self.tweets_number = tweets_number
self.retweets = retweets
self.quote_tweets = quote_tweets
self.likes_number = likes_number
self.grammar_index = grammar_index
self.subject_relevance = subject_relevance
| 1.664063 | 2 |
103. invert_binary_tree.py | chandravenky/puzzles | 0 | 12798504 | <gh_stars>0
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def flip(self, node):
if not node:
return None
hold_node = node.left
node.left = node.right
node.right = hold_node
self.flip(node.left)
self.flip(node.right)
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
self.flip(root)
return root
| 2.96875 | 3 |
Fullbit.py | Mizogg/Fillbit-Bitcoin-Address | 4 | 12798512 | <reponame>Mizogg/Fillbit-Bitcoin-Address
#Fullbit.py =====Made by <EMAIL> Donations 3P7PZLbwSt2bqUMsHF9xDsaNKhafiGuWDB =====
from bitcoinaddress import Wallet
import random
filename ='puzzle.txt'
with open(filename) as f:
line_count = 0
for line in f:
line != "\n"
line_count += 1
with open(filename) as file:
add = file.read().split()
add = set(add)
print('Total Bitcoin Addresses Loaded and Checking : ',str (line_count))
x=int(input("'Start range in BITs 0 or Higher(Puzzle StartNumber) -> "))
a = 2**x
y=int(input("Stop range Max in BITs 256 Max (Puzzle StopNumber) -> "))
b = 2**y
print("Starting search... Please Wait min range: " + str(a))
print("Max range: " + str(b))
print("==========================================================")
print('Total Bitcoin Addresses Loaded and Checking : ',str (line_count))
count=0
total=0
while True:
count+=1
total+=5
ran=random.randrange(a,b)
HEX = "%064x" % ran
wallet = Wallet(HEX)
uaddr = wallet.address.__dict__['mainnet'].__dict__['pubaddr1'] #Legacy uncompressed address
caddr = wallet.address.__dict__['mainnet'].__dict__['pubaddr1c'] #Legacy compressed address
saddr = wallet.address.__dict__['mainnet'].__dict__['pubaddr3'] #segwit_address
bcaddr = wallet.address.__dict__['mainnet'].__dict__['pubaddrbc1_P2WPKH']
bc1addr = wallet.address.__dict__['mainnet'].__dict__['pubaddrbc1_P2WSH']
wif = wallet.key.__dict__['mainnet'].__dict__['wif']
wifc = wallet.key.__dict__['mainnet'].__dict__['wifc']
#print('\nPrivatekey (dec): ', ran,'\nPrivatekey (hex): ', HEX, '\nPrivatekey Uncompressed: ', wif, '\nPrivatekey compressed: ', wifc, '\nPublic Address 1 Uncompressed: ', uaddr, '\nPublic Address 1 Compressed: ', caddr, '\nPublic Address 3 Segwit: ', saddr, '\nPublic Address bc1 P2WPKH: ', bcaddr, '\nPublic Address bc1 P2WSH: ', bc1addr)
print('Scan : ', count , ' : Total : ', total, ' : HEX : ', HEX, end='\r')
if caddr in add or uaddr in add or saddr in add or bcaddr in add or bc1addr in add:
print('\nMatch Found')
f=open("winner.txt","a")
f.write('\nPrivatekey (dec): ' + str(ran))
f.write('\nPrivatekey (hex): ' + HEX)
f.write('\nPrivatekey Uncompressed: ' + wif)
f.write('\nPrivatekey compressed: ' + wifc)
f.write('\nPublic Address 1 Compressed: ' + caddr)
f.write('\nPublic Address 1 Uncompressed: ' + uaddr)
f.write('\nPublic Address 3 Segwit: ' + saddr)
f.write('\nPublic Address bc1 P2WPKH: ' + bcaddr)
f.write('\nPublic Address bc1 P2WSH: ' + bc1addr)
f.write('\n =====Made by mizogg.co.uk Donations 3P7PZLbwSt2bqUMsHF9xDsaNKhafiGuWDB =====' )
f.close() | 2.09375 | 2 |
christmas_tree..py | SmashedFrenzy16/christmas-tree | 0 | 12798520 | import turtle
s = turtle.Screen()
t = turtle.Turtle()
s.title("Christmas Tree")
s.setup(width=800, height=600)
# Title on the window
pen = turtle.Turtle()
pen.speed(0)
pen.color("black")
pen.penup()
pen.hideturtle()
pen.goto(0, 260)
pen.write("Christmas Tree", align="center",font=("Arial", 24, "normal"))
# Starting position
t.up()
t.rt(90)
t.fd(100)
t.lt(90)
t.down()
# Stump
t.color("brown")
t.begin_fill()
t.fd(40)
t.lt(90)
t.fd(60)
t.lt(90)
t.fd(40)
t.lt(90)
t.fd(60)
t.end_fill()
t.up()
# First triangle
t.lt(180)
t.fd(60)
t.lt(90)
t.fd(20)
t.down()
t.color("green")
t.begin_fill()
t.rt(180)
t.fd(80)
t.lt(120)
t.fd(80)
t.lt(120)
t.fd(80)
t.end_fill()
t.up()
# Second Triangle
t.lt(180)
t.fd(80)
t.lt(120)
t.lt(90)
t.fd(20)
t.rt(90)
t.down()
t.begin_fill()
t.fd(35)
t.rt(120)
t.fd(70)
t.rt(120)
t.fd(70)
t.rt(120)
t.fd(35)
t.end_fill()
t.up()
# Thrid Triangle
t.fd(35)
t.rt(120)
t.fd(70)
t.lt(120)
t.lt(90)
t.fd(20)
t.rt(90)
t.down()
t.begin_fill()
t.fd(30)
t.rt(120)
t.fd(60)
t.rt(120)
t.fd(60)
t.rt(120)
t.fd(30)
t.end_fill()
t.up()
# Star
t.fd(30)
t.rt(120)
t.fd(60)
t.lt(120)
t.rt(180)
t.lt(90)
t.fd(15)
t.rt(90)
t.back(20)
t.color("yellow")
t.down()
t.begin_fill()
for i in range(5):
t.forward(40)
t.right(144)
t.end_fill()
t.hideturtle()
while True:
s.update()
| 2.453125 | 2 |
server/attendance/admin.py | CS305-software-Engineering/vehicle-attendance-system | 1 | 12798528 | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Vehicle)
admin.site.register(VehicleLogging)
admin.site.register(RegisteredUserLogging)
admin.site.register(VisitorUserLogging) | 0.667969 | 1 |
Day41-55/code/oa/hrs/migrations/0002_auto_20180523_0923.py | xie186/Python-100-Days-EN | 6 | 12798536 | # Generated by Django 2.0.5 on 2018-05-23 01:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hrs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='dept',
name='excellent',
field=models.BooleanField(default=0, verbose_name='是否优秀'),
),
migrations.AlterField(
model_name='dept',
name='location',
field=models.CharField(max_length=10, verbose_name='部门所在地'),
),
migrations.AlterField(
model_name='dept',
name='name',
field=models.CharField(max_length=20, verbose_name='部门名称'),
),
migrations.AlterField(
model_name='dept',
name='no',
field=models.IntegerField(primary_key=True, serialize=False, verbose_name='部门编号'),
),
migrations.AlterField(
model_name='emp',
name='comm',
field=models.DecimalField(blank=True, decimal_places=2, max_digits=7, null=True),
),
migrations.AlterField(
model_name='emp',
name='mgr',
field=models.IntegerField(blank=True, null=True),
),
]
| 0.890625 | 1 |
Web/chat/chatserver.py | kasztp/python-lessons | 35 | 12798544 | import logging
from time import time
from flask import Flask, request
PLAIN_HEADER = {'Content-Type': 'text/plain; charset=utf-8'}
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(threadName)s %(message)s')
log = logging.getLogger('chatserver')
app = Flask(__name__)
messages = []
@app.route('/post/<who>/<message>')
def post_message(who, message):
messages.append((time(), request.remote_addr, who, message))
print(messages)
return "Message saved.\n" + str(messages), 200, PLAIN_HEADER
app.run(host='localhost', debug=True, threaded=True)
| 1.421875 | 1 |
dataflows_aws/__init__.py | frictionlessdata/dataflows-aws | 2 | 12798552 | <reponame>frictionlessdata/dataflows-aws
from .processors.change_acl_on_s3 import change_acl_on_s3
from .processors.dump_to_s3 import S3Dumper as dump_to_s3
| 0.5625 | 1 |
Arrays/OddOneOut.py | d3xt3r0/Data-Structures-And-Algorithms | 4 | 12798560 | # Odd one out from hackerearth solution
def oddOneOut(arr : list) :
n = len(arr)
arr.sort()
summation = sum(arr)
actual_sum = int((n+1)/2 * (2*arr[0] + (n*2)))
print(actual_sum)
return actual_sum - summation
if __name__ == '__main__' :
arr = list(map(int, input("Enter the elements into the array : ").split()))
print(oddOneOut(arr)) | 2.40625 | 2 |
git_functions.py | psyonara/pylint-diff | 0 | 12798568 | import subprocess
def is_branch_merged(branch):
"""
Checks if given branch is merged into current branch.
:param branch: Name of branch
:return: True/False
"""
proc = subprocess.Popen(["git", "branch", "--merged"], stdout=subprocess.PIPE)
result = proc.stdout.read().decode()
return branch in result.strip().split("\n")
def get_file_contents_from_branch(filename, branch_name):
"""
Gets the contents of a file from a specific branch.
:param filename: Name of the file
:param branch_name: Name of the branch
:return: Contents of the file
"""
proc = subprocess.Popen(
["git", "show", "%s:%s" % (branch_name, filename)], stdout=subprocess.PIPE
)
return proc.stdout.read().decode()
def get_current_branch_name():
"""
Gets the name of the current git branch in the working directory.
:return: Name of the branch
"""
proc = subprocess.Popen(["git", "rev-parse", "--abbrev-ref", "HEAD"], stdout=subprocess.PIPE)
return proc.stdout.read().decode()
def get_changed_files(branch1, branch2):
"""
Gets a list of changed files between two branches.
:param branch1: name of first branch
:param branch2: name of second branch
:return: A list of changed files
"""
proc = subprocess.Popen(
["git", "diff", "--name-only", branch1, branch2], stdout=subprocess.PIPE
)
return proc.stdout.read().decode()
| 2.703125 | 3 |
UtilsPlot.py | felipegb94/ToFSim | 12 | 12798576 | <filename>UtilsPlot.py
"""UtilsPlot
Attributes:
colors (TYPE): Colors for plotting
plotParams (TYPE): Default plotting parameters
"""
#### Python imports
#### Library imports
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# from IPython.core import debugger
# breakpoint = debugger.set_trace
#### Local imports
import Utils
#### Default matplotlib preferences
plt.style.use('ggplot')
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
plotParams = {
'font.size': 16,
'figure.dpi': 80,
'figure.autolayout': True,
'figure.titleweight': 'bold',
'savefig.dpi': 200,
'axes.titlesize': 18, # main title
'axes.labelsize': 16, # x and y titles
'axes.titleweight': 'bold', # x and y titles
'axes.labelweight': 'bold', # x and y titles
'grid.linestyle': '--',
'grid.linewidth': 2,
'text.usetex': False,
'xtick.labelsize': 14,
'xtick.minor.visible': True,
'ytick.labelsize': 14,
'ytick.minor.visible': True,
'lines.linewidth': 2,
'lines.markersize': 8.0,
'legend.fontsize': 14,
'legend.shadow': True,
}
mpl.use('Qt4Agg', warn=False) ## Needed to allow drawing with matplotlib during debug mode
plt._INSTALL_FIG_OBSERVER = True
mpl.rcParams.update(plotParams)
plt.ion()
def PlotCodingScheme(ModFs, DemodFs):
"""PlotCodingScheme: Create a 1x3 figure with modulation, demodulation, and the correlation.
Args:
modF (numpy.ndarray): Modulation functions. N x K matrix.
demodF (numpy.ndarray): Demodulation functions. N x K matrix
Returns:
plt.figure: Figure handle
plt.axis: Axis handle
"""
#### Assume the following constants
totalEnergy = 1.
tau = 1.
averagePower = totalEnergy / tau
#### Reshape to ensure needed dimensions
## Assume that the number of elements is larger than the number of coding pairs, i.e. rows>cols
if(ModFs.shape[0] < ModFs.shape[1]): ModFs = ModFs.transpose()
if(DemodFs.shape[0] < DemodFs.shape[1]): DemodFs = DemodFs.transpose()
#### Verify Inputs
assert(ModFs.shape == DemodFs.shape), "Input Error - PlotCodingScheme: ModFs and \
DemodFs should be the same dimensions."
#### Set some parameters
(N,K) = ModFs.shape
avgPower = np.sum(ModFs[:,0])/N
#### Set default values
t = np.linspace(0, tau, N)
phase = np.linspace(0, 2*np.pi,N)
#### Reshape to ensure same dimensions
t = t.reshape((N,))
#### Get Correlation functions
CorrFs = Utils.GetCorrelationFunctions(ModFs=ModFs,DemodFs=DemodFs)
#### Plot Decomposition
## Clear current plot
plt.clf()
## Get current figure
fig = plt.gcf()
## Add subplots and get axis array
for i in range(K):
# breakpoint()
fig.add_subplot(K,3,3*i + 1)
fig.add_subplot(K,3,3*i + 2)
fig.add_subplot(K,3,3*i + 3)
axarr = fig.get_axes()
## Make all plots
## Calculate Avg power.
avgPower = np.sum(ModFs[:,0]) / N
avgPower = [avgPower for i in range(0, N)]
## Plot ObjCorrF first so that stars don't cover the corrFs.
for i in range(0, K):
labelInfo = str(i)
axarr[3*i + 0].plot(t, ModFs[:,i], label='Md-'+labelInfo,linewidth=2, color=colors[i])
axarr[3*i + 1].plot(t, DemodFs[:,i], label='Dmd-'+labelInfo,linewidth=2, color=colors[i])
axarr[3*i + 2].plot(phase, CorrFs[:,i], label='Crr-'+labelInfo,linewidth=2, color=colors[i])
axarr[3*i + 0].plot(t, avgPower, '--', label='AvgPower', linewidth=3, color=colors[i])
## Set axis labels
axarr[3*i + 0].set_xlabel('Time')
axarr[3*i + 1].set_xlabel('Time')
axarr[3*i + 2].set_xlabel('Phase')
axarr[3*i + 0].set_ylabel('Instant Power')
axarr[3*i + 1].set_ylabel('Exposure')
axarr[3*i + 2].set_ylabel('Magnitude')
## Set Titles
axarr[0].set_title('Modulation')
axarr[1].set_title('Demodulation')
axarr[2].set_title('Correlation')
# ## Set ylimit so that we can see the legend
# axarr[0].set_ylim([0,1.2*np.max(ModFs)])
# axarr[1].set_ylim([0,1.2*np.max(DemodFs)])
# axarr[2].set_ylim([0,1.2*np.max(CorrFs)])
return (fig, axarr)
| 1.820313 | 2 |
homework4/problem1.py | jojonium/CS-539-Machine-Learning | 0 | 12798584 | import numpy as np
# Note: please don't import any new package. You should solve this problem using only the package(s) above.
#-------------------------------------------------------------------------
'''
Problem 1: Multi-Armed Bandit Problem (15 points)
In this problem, you will implement the epsilon-greedy method for Multi-armed bandit problem.
A list of all variables being used in this problem is provided at the end of this file.
'''
#--------------------------
def Terms_and_Conditions():
'''
By submitting this homework or changing this function, you agree with the following terms:
(1) Not sharing your code/solution with any student before and after the homework due. For example, sending your code segment to another student, putting your solution online or lending your laptop (if your laptop contains your solution or your Dropbox automatically copied your solution from your desktop computer and your laptop) to another student to work on this homework will violate this term.
(2) Not using anyone's code in this homework and building your own solution. For example, using some code segments from another student or online resources due to any reason (like too busy recently) will violate this term. Changing other's code as your solution (such as changing the variable names) will also violate this term.
(3) When discussing with any other students about this homework, only discuss high-level ideas or use pseudo-code. Don't discuss about the solution at the code level. For example, two students discuss about the solution of a function (which needs 5 lines of code to solve) and they then work on the solution "independently", however the code of the two solutions are exactly the same, or only with minor differences (variable names are different). In this case, the two students violate this term.
All violations of (1),(2) or (3) will be handled in accordance with the WPI Academic Honesty Policy. For more details, please visit: https://www.wpi.edu/about/policies/academic-integrity/dishonesty
Note: we may use the Stanford Moss system to check your code for code similarity. https://theory.stanford.edu/~aiken/moss/
Historical Data: in one year, we ended up finding 25% of the students in that class violating this term in their homework submissions and we handled ALL of these violations according to the WPI Academic Honesty Policy.
'''
#*******************************************
# CHANGE HERE: if you have read and agree with the term above, change "False" to "True".
Read_and_Agree = True
#*******************************************
return Read_and_Agree
#----------------------------------------------------
'''
Given the player's memory about the previous results in the game and the action chosen and reward received at the current time step, update the player's memory.
---- Inputs: --------
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
* r: the reward received at the current time step, a float scalar.
* Rt: (player's memory) the total rewards (i.e., sum of rewards) collected for each action, a numpy float vector of length c. Rt_1[i] represents the sum of total rewards collected on the i-th action.
* Ct: (player's memory) the counts on how many times each action has been tried, a numpy integer vector of length c. Ct_1[i] represents the total number of samples collected on the i-th action, i.e., how many times the i-th action has been tried before".
---- Hints: --------
* This problem can be solved using 2 line(s) of code.
'''
#---------------------
def update_memory(a, r, Rt, Ct):
#########################################
## INSERT YOUR CODE HERE (3 points)
Rt[a] = Rt[a] + r
Ct[a] = Ct[a] + 1
#########################################
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_update_memory
--- OR ----
python3 -m nose -v test1.py:test_update_memory
--- OR ----
python -m nose -v test1.py:test_update_memory
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Explore-only) Given a multi-armed bandit game, choose an action at the current time step using explore-only strategy. Randomly pick an action with uniform distribution: equal probability for all actions.
---- Inputs: --------
* c: the number of possible actions in a multi-armed bandit problem, an integer scalar.
---- Outputs: --------
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
---- Hints: --------
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def choose_action_explore(c):
#########################################
## INSERT YOUR CODE HERE (3 points)
a = np.random.randint(0, c)
#########################################
return a
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_choose_action_explore
--- OR ----
python3 -m nose -v test1.py:test_choose_action_explore
--- OR ----
python -m nose -v test1.py:test_choose_action_explore
---------------------------------------------------
'''
#----------------------------------------------------
'''
(Exploit-only) Given a multi-armed bandit game and the player's memory about the previous results, choose an action at the current time step using exploit-only strategy: choose the action with the highest average reward.
---- Inputs: --------
* Rt: (player's memory) the total rewards (i.e., sum of rewards) collected for each action, a numpy float vector of length c. Rt_1[i] represents the sum of total rewards collected on the i-th action.
* Ct: (player's memory) the counts on how many times each action has been tried, a numpy integer vector of length c. Ct_1[i] represents the total number of samples collected on the i-th action, i.e., how many times the i-th action has been tried before".
---- Outputs: --------
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
---- Hints: --------
* If the count in Ct[i] for the i-th action is 0, we can assume the average reward for the i-th action is 0. For example, if the count Ct for 3 actions are [0,1,1], we can assume the average reward for the first action is 0.
* You could us the argmax() function in numpy to return the index of the largest value in a vector.
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def choose_action_exploit(Rt, Ct):
#########################################
## INSERT YOUR CODE HERE (3 points)
a = np.argmax([0 if Ct[i] == 0 else Rt[i] / Ct[i] for i in range(Rt.size)])
#########################################
return a
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_choose_action_exploit
--- OR ----
python3 -m nose -v test1.py:test_choose_action_exploit
--- OR ----
python -m nose -v test1.py:test_choose_action_exploit
---------------------------------------------------
'''
#----------------------------------------------------
'''
Given a multi-armed bandit game and the player's memory about the previous results, choose an action at the current step of the game using epsilon-greedy method: with a small probability (epsilon) to follow explore-only method (randomly choose an action) and with a large probability (1-epsilon) to follow exploit-only method (choose the action with the highest average reward).
---- Inputs: --------
* Rt: (player's memory) the total rewards (i.e., sum of rewards) collected for each action, a numpy float vector of length c. Rt_1[i] represents the sum of total rewards collected on the i-th action.
* Ct: (player's memory) the counts on how many times each action has been tried, a numpy integer vector of length c. Ct_1[i] represents the total number of samples collected on the i-th action, i.e., how many times the i-th action has been tried before".
* e: (epsilon) the probability of the player to follow the exploration-only strategy. e is a float scalar between 0 and 1. The player has 1-e probability in each time step to follow the exploitation-only strategy.
---- Outputs: --------
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
---- Hints: --------
* You could use the random.rand() function in numpy to sample a number randomly using uniform distribution between 0 and 1.
* This problem can be solved using 1 line(s) of code.
'''
#---------------------
def choose_action(Rt, Ct, e=0.05):
#########################################
## INSERT YOUR CODE HERE (6 points)
a = choose_action_explore(Ct.size) if np.random.random() < e else choose_action_exploit(Rt, Ct)
#########################################
return a
#-----------------
'''
TEST: Now you can test the correctness of your code above by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py:test_choose_action
--- OR ----
python3 -m nose -v test1.py:test_choose_action
--- OR ----
python -m nose -v test1.py:test_choose_action
---------------------------------------------------
'''
#--------------------------------------------
'''
TEST problem 1:
Now you can test the correctness of all the above functions by typing the following in the terminal:
---------------------------------------------------
nosetests -v test1.py
--- OR ----
python3 -m nose -v test1.py
--- OR ----
python -m nose -v test1.py
---------------------------------------------------
If your code passed all the tests, you will see the following message in the terminal:
----------- Problem 1 (15 points in total)--------------------- ... ok
* (3 points) update_memory ... ok
* (3 points) choose_action_explore ... ok
* (3 points) choose_action_exploit ... ok
* (6 points) choose_action ... ok
----------------------------------------------------------------------
Ran 4 tests in 0.586s
OK
'''
#--------------------------------------------
#--------------------------------------------
'''
List of All Variables
* c: the number of possible actions in a multi-armed bandit problem, an integer scalar.
* e: (epsilon) the probability of the player to follow the exploration-only strategy. e is a float scalar between 0 and 1. The player has 1-e probability in each time step to follow the exploitation-only strategy.
* Rt: (player's memory) the total rewards (i.e., sum of rewards) collected for each action, a numpy float vector of length c. Rt_1[i] represents the sum of total rewards collected on the i-th action.
* Ct: (player's memory) the counts on how many times each action has been tried, a numpy integer vector of length c. Ct_1[i] represents the total number of samples collected on the i-th action, i.e., how many times the i-th action has been tried before".
* a: the index of the action being chosen by the player, an integer scalar between 0 and c-1.
* r: the reward received at the current time step, a float scalar.
'''
#-------------------------------------------- | 2.5 | 2 |
3test.py | zerovm/zpython2 | 4 | 12798592 | #!/usr/bin/python
import os
import sys
import subprocess
import socket
import tempfile
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file', help='file containing tests list')
args = parser.parse_args()
# will use it as return code for script
test_result = 0
devnull = open(os.devnull, "w")
PATH = os.path.abspath(os.path.dirname(__file__))
TEST_DIR = os.path.join(PATH, 'Lib', 'test')
NVRAM_TMPLT = """[args]
args = python /dev/1.test.py
[fstab]
channel=/dev/1.python.tar,mountpoint=/,access=ro,removable=no
"""
MANIFEST_TMPLT = """Job = %(socket)s
Node = 1
Version = 20130611
Timeout = 50
Memory = 4294967296,0
Program = %(path)s/python
Channel = /dev/stdin,/dev/stdin,0,0,4294967296,4294967296,0,0
Channel = /dev/null,/dev/stdout,0,0,0,0,4294967296,4294967296
Channel = /dev/null,/dev/stderr,0,0,0,0,4294967296,4294967296
Channel = %(path)s/python.tar,/dev/1.python.tar,3,0,4294967296,4294967296,4294967296,4294967296
Channel = %(path)s/nvram.1,/dev/nvram,3,0,4294967296,4294967296,4294967296,4294967296
Channel = %(test_path)s/%(test)s.py,/dev/1.test.py,3,0,4294967296,4294967296,4294967296,4294967296
"""
# predefined tests
tests = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
]
if args.file:
tests = [l for l in open(args.file, 'r').readlines()]
def client(server_address, input):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
data = ''
try:
sock.connect(server_address)
sdata = input
size = '0x%06x' % (len(sdata))
sock.sendall(size + sdata)
resp = sock.makefile()
sdata = resp.read(8)
size = int(sdata, 0)
data = resp.read(size)
except IOError, e:
print str(e)
raise
finally:
sock.close()
return data
class Daemon(object):
def __enter__(self):
# self.socket = os.path.join(PATH, 'tmp1234')
# self.fd, self.socket = 0, '/tmp/tmp.Mkba0cwcdk'
self.fd, self.socket = tempfile.mkstemp()
self._start_daemon()
return self
def __exit__(self, type, value, traceback):
self._stop_daemon()
os.remove(self.socket)
return False
def send(self, test):
params = {'socket': self.socket, 'path': PATH, 'test_path': TEST_DIR,
'test': test}
self.manifest = MANIFEST_TMPLT % params
return client(self.socket, self.manifest)
def _start_daemon(self):
with open(os.path.join(PATH, 'manifest.1'), 'w') as mfile:
params = {'socket': self.socket, 'path': PATH,
'test_path': TEST_DIR, 'test': ''}
self.manifest = MANIFEST_TMPLT % params
mfile.write(self.manifest)
with open(os.path.join(PATH, 'nvram.1'), 'w') as nfile:
params = {'test': ''}
nfile.write(NVRAM_TMPLT % params)
subprocess.call(['zerovm', os.path.join(PATH, 'manifest.1')],
stdout=devnull, stderr=devnull)
def _stop_daemon(self):
subprocess.call(['pkill', 'zvm'])
with Daemon() as daemon:
for test in tests:
print("%s.." % test.strip()[5:]),
sys.stdout.flush()
try:
ret = daemon.send(test.strip())
retcode = int(ret.splitlines()[2])
if retcode:
test_result = 1
print('\033[1;31mfail\033[1;m')
else:
print('\033[1;32mok\033[1;m')
except KeyboardInterrupt:
break
devnull.close()
sys.exit(test_result)
| 1.359375 | 1 |
proliantutils/ilo/constants.py | anta-nok/proliantutils | 0 | 12798600 | <reponame>anta-nok/proliantutils
# Copyright 2017 Hewlett Packard Enterprise Development LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# SUPPORTED_BOOT_MODE constants
SUPPORTED_BOOT_MODE_LEGACY_BIOS_ONLY = 'legacy bios only'
SUPPORTED_BOOT_MODE_UEFI_ONLY = 'uefi only'
SUPPORTED_BOOT_MODE_LEGACY_BIOS_AND_UEFI = 'legacy bios and uefi'
SUPPORTED_BIOS_PROPERTIES = [
"AdvancedMemProtection",
"AutoPowerOn",
"BootMode",
"BootOrderPolicy",
"CollabPowerControl",
"DynamicPowerCapping",
"DynamicPowerResponse",
"IntelligentProvisioning",
"IntelPerfMonitoring",
"IntelProcVtd",
"IntelQpiFreq",
"IntelTxt",
"PowerProfile",
"PowerRegulator",
"ProcAes",
"ProcCoreDisable",
"ProcHyperthreading",
"ProcNoExecute",
"ProcTurbo",
"ProcVirtualization",
"SecureBootStatus",
"Sriov",
"ThermalConfig",
"ThermalShutdown",
"TpmState",
"TpmType",
"UefiOptimizedBoot"
]
SUPPORTED_REDFISH_BIOS_PROPERTIES = SUPPORTED_BIOS_PROPERTIES + [
"WorkloadProfile"
]
| 0.773438 | 1 |
tools/bpy_smooth_tiles.py | Avnerus/3d-tiles-validator | 1 | 12798608 | import sys
import bpy
if __name__ == "__main__":
args = sys.argv[sys.argv.index('--'):]
print(args)
bpy.ops.import_scene.gltf(filepath=args[1])
obj = bpy.context.active_object
mod = obj.modifiers.new("CorrectiveSmooth", 'CORRECTIVE_SMOOTH')
mod.factor = 0.1
mod.scale = 1.5
bpy.ops.object.modifier_apply(modifier="CorrectiveSmooth")
bpy.ops.export_scene.gltf(
filepath=args[2],
export_normals=False,
export_colors=False,
use_selection=True
)
| 1.273438 | 1 |
process_data/all/code/split_voxel_then_img.py | hailieqh/3D-Object-Primitive-Graph | 2 | 12798616 | import scipy.io
import numpy as np
import os
import random
import json
import pdb
def check_image_voxel_match(cls):
root = os.path.abspath('.')
out_dir = os.path.join(root, '../output', cls)
# out_dir = '/Users/heqian/Research/projects/primitive-based_3d/data/all_classes/chair'
voxel_txt_dir = os.path.join(out_dir, 'voxeltxt')
voxel_dirs = {x: os.path.join(voxel_txt_dir, 'voxel_{}.txt'.format(x))
for x in ['train', 'val', 'test']}
img_dirs = {x: os.path.join(voxel_txt_dir, '{}.txt'.format(x))
for x in ['train', 'val', 'test']}
with open(os.path.join(out_dir, 'voxels_dir_{}.txt'.format(cls)), 'r') as f:
voxel_all = f.readlines()
voxel_names = {}
img_names = {}
for phase in ['train', 'val', 'test']:
with open(os.path.join(voxel_dirs[phase]), 'r') as f:
voxel_names[phase] = f.readlines()
with open(os.path.join(img_dirs[phase]), 'r') as f:
img_names[phase] = f.readlines()
# pix3d_dir = os.path.join(root, '../input/pix3d.json')
# pix3d = json.load(open(pix3d_dir, 'r'))
match_id = scipy.io.loadmat(os.path.join(out_dir, 'img_voxel_idxs.mat'))
img_match_vox = {x: [] for x in ['train', 'val', 'test']}
for phase in ['train', 'val', 'test']:
for img in img_names[phase]:
id_img_ori = int(img.split('.')[0]) # 1-3839
img_id_real = list(match_id['img_idxs'][0]).index(id_img_ori) # 0-3493
voxel_id_ori = match_id['voxel_idxs'][0, img_id_real] # 1-216
vox = voxel_all[voxel_id_ori - 1]
img_match_vox[phase].append(vox)
# img_match_vox[phase].append('model/'+vox)
img_match_vox = {x: sorted(set(img_match_vox[x])) for x in ['train', 'val', 'test']}
# pdb.set_trace()
for phase in ['train', 'val', 'test']:
if len(set(voxel_names[phase]).difference(set(img_match_vox[phase]))) > 0:
print('error')
if len(set(img_match_vox[phase]).difference(set(voxel_names[phase]))) > 0:
print('error')
for name in voxel_names[phase]:
if name not in img_match_vox[phase]:
print(name)
for name in img_match_vox[phase]:
if name not in voxel_names[phase]:
print(name)
def split_voxel_then_image(cls):
# data_dir = '/Users/heqian/Research/projects/3dprnn/data/pix3d'
split_by_model = True # True-split by 216 models, False-split by 34 images
## split voxels into train, val, test
root = os.path.abspath('.')
out_dir = os.path.join(root, '../output', cls)
voxel_txt_dir = os.path.join(out_dir, 'voxeltxt')
if not os.path.exists(voxel_txt_dir):
os.makedirs(voxel_txt_dir)
voxel_train_txtpath = os.path.join(voxel_txt_dir, 'voxel_train.txt')
voxel_val_txtpath = os.path.join(voxel_txt_dir, 'voxel_val.txt')
voxel_test_txtpath = os.path.join(voxel_txt_dir, 'voxel_test.txt')
voxel_ftrain = open(voxel_train_txtpath, 'w')
voxel_fval = open(voxel_val_txtpath, 'w')
voxel_ftest = open(voxel_test_txtpath, 'w')
voxel_ltrain = []
voxel_lval = []
voxel_ltest = []
voxel_ctrain = 0
voxel_cval = 0
voxel_ctest = 0
with open(os.path.join(out_dir, 'voxels_dir_{}.txt'.format(cls)), 'r') as f:
voxel_dirs = f.readlines()
for i in range(len(voxel_dirs)):
voxel_dirs[i] = voxel_dirs[i].strip()
voxel_dirs[i] = voxel_dirs[i]
tmp = random.random()
if tmp < 0.65:
voxel_ftrain.write(voxel_dirs[i]+'\n')
voxel_ltrain.append(voxel_dirs[i])
voxel_ctrain += 1
elif tmp >= 0.65 and tmp < 0.8:
voxel_fval.write(voxel_dirs[i]+'\n')
voxel_lval.append(voxel_dirs[i])
voxel_cval += 1
else:
voxel_ftest.write(voxel_dirs[i]+'\n')
voxel_ltest.append(voxel_dirs[i])
voxel_ctest += 1
voxel_ftrain.close()
voxel_fval.close()
voxel_ftest.close()
## split images into train, val, test, according to voxels
# img_voxel_idxs = []
img_idxs = []
voxel_idxs = []
train_txtpath = os.path.join(voxel_txt_dir, 'train.txt')
val_txtpath = os.path.join(voxel_txt_dir, 'val.txt')
test_txtpath = os.path.join(voxel_txt_dir, 'test.txt')
ftrain = open(train_txtpath, 'w')
fval = open(val_txtpath, 'w')
ftest = open(test_txtpath, 'w')
ctrain = 0
cval = 0
ctest = 0
pix3d_dir = os.path.join(root, '../input/pix3d.json')
pix3d = json.load(open(pix3d_dir, 'r'))
for i in range(len(pix3d)):
# if json_file[i]['img'][4:9] == 'chair' and json_file[i]['voxel'] not in voxel_dirs:
# print(json_file[i]['img'], json_file[i]['voxel'])
voxel_dir = pix3d[i]['voxel'][6:]
if voxel_dir in voxel_dirs:
# pdb.set_trace()
img_file = pix3d[i]['img'].split('/')[-1] #[10:]
img_id = int(img_file.split('.')[0]) #int(pix3d[i]['img'][10:14])
img_idxs.append(img_id)
voxel_idxs.append(voxel_dirs.index(voxel_dir) + 1)
# img_voxel_idxs.append(voxel_dirs.index(voxel_dir))
# if img_id != len(img_voxel_idxs):
# print('Error!!!=======', img_id)
if split_by_model:
if voxel_dir in voxel_ltrain:
ftrain.write(img_file+'\n')
ctrain += 1
elif voxel_dir in voxel_lval:
fval.write(img_file+'\n')
cval += 1
elif voxel_dir in voxel_ltest:
ftest.write(img_file+'\n')
ctest += 1
else:
tmp = random.random()
if tmp < 0.65:
ftrain.write(img_file+'\n')
ctrain += 1
elif tmp >= 0.65 and tmp < 0.8:
fval.write(img_file+'\n')
cval += 1
else:
ftest.write(img_file+'\n')
ctest += 1
ftrain.close()
fval.close()
ftest.close()
# scipy.io.savemat(os.path.join(out_dir, 'img_voxel_idxs.mat'),
# {'img_voxel_idxs': np.array(img_voxel_idxs)})
scipy.io.savemat(os.path.join(out_dir, 'img_voxel_idxs.mat'),
{'img_idxs': np.array(img_idxs), 'voxel_idxs': np.array(voxel_idxs)})
print(voxel_ctrain+voxel_cval+voxel_ctest, voxel_ctrain, voxel_cval, voxel_ctest)
print(ctrain+cval+ctest, ctrain, cval, ctest)
print(len(img_idxs))
if __name__ == '__main__':
cls_all = ['chair', 'bed', 'bookcase', 'desk', 'misc', 'sofa', 'table', 'tool', 'wardrobe']
cls = 'table'
# for cls in cls_all:
split_voxel_then_image(cls)
check_image_voxel_match(cls)
| 1.476563 | 1 |
src/petronia/core/platform/api/locale/__init__.py | groboclown/petronia | 19 | 12798624 | <filename>src/petronia/core/platform/api/locale/__init__.py
"""
Information regarding the current user locale.
TODO should this be its own extension? It seems like it should, but that
would mean asking for a translation would need to go through the event bus,
and that doesn't seem right.
"""
| 0.453125 | 0 |
tests/create_table_test.py | aescwork/sqlitemgr | 1 | 12798632 | <reponame>aescwork/sqlitemgr
import unittest
import os
import sys
sys.path.append("../sqlitemgr/")
import sqlitemgr as sqm
class CreateTableTest(unittest.TestCase):
"""
The way the create_table function is being tested is to have the SQLiteMgr object compose and execute an SQL statement to create a table in fruit.db,
then get the cursor from the object, execute a SELECT statement against the table, then get the name of the columns in the table (in a list),
and compare with what should be the same list assigned to self.comp_names. If they match, the object successfully created the nut table in fruit.db.
"""
def setUp(self):
self.sm = sqm.SQLiteMgr("../fixtures/fruit.db")
self.sm.new_table("nuts").add_table_column("Nmbr", "INT", "PRIMARY KEY").add_table_column("Called", "TEXT", "UNIQUE").add_table_column("Description", "TEXT").create_table()
self.cursor = self.sm.get_cursor()
self.cursor.execute("SELECT * FROM nuts")
self.col_names = [description[0] for description in self.cursor.description] # gets the column names from nuts table because self.sm.get_cursor().execute() is selecting from nuts table
self.comp_names = ['Nmbr', 'Called', 'Description']
def test_create_table(self):
self.assertEqual(self.col_names, self.comp_names)
def test_result(self):
self.assertEqual(self.sm.result, "OK")
def tearDown(self):
self.sm.__del__()
if __name__ == '__main__':
unittest.main()
| 2.109375 | 2 |
main.py | szabolcsdombi/heightmap-multitexture-terrain | 1 | 12798640 | <filename>main.py
import math
import struct
import GLWindow
import ModernGL
from PIL import Image
from pyrr import Matrix44
wnd = GLWindow.create_window()
ctx = ModernGL.create_context()
prog = ctx.program([
ctx.vertex_shader('''
#version 330
uniform mat4 Mvp;
uniform sampler2D Heightmap;
in vec2 vert;
out vec2 v_text;
void main() {
vec4 vertex = vec4(vert - 0.5, texture(Heightmap, vert).r * 0.2, 1.0);
gl_Position = Mvp * vertex;
v_text = vert;
}
'''),
ctx.fragment_shader('''
#version 330
uniform sampler2D Heightmap;
uniform sampler2D Color1;
uniform sampler2D Color2;
uniform sampler2D Cracks;
uniform sampler2D Darken;
in vec2 v_text;
out vec4 f_color;
void main() {
float height = texture(Heightmap, v_text).r;
float border = smoothstep(0.5, 0.7, height);
vec3 color1 = texture(Color1, v_text * 7.0).rgb;
vec3 color2 = texture(Color2, v_text * 6.0).rgb;
vec3 color = color1 * (1.0 - border) + color2 * border;
color *= 0.8 + 0.2 * texture(Darken, v_text * 3.0).r;
color *= 0.5 + 0.5 * texture(Cracks, v_text * 5.0).r;
color *= 0.5 + 0.5 * height;
f_color = vec4(color, 1.0);
}
'''),
])
img0 = Image.open('data/heightmap.jpg').convert('L').transpose(Image.FLIP_TOP_BOTTOM)
img1 = Image.open('data/grass.jpg').convert('RGB').transpose(Image.FLIP_TOP_BOTTOM)
img2 = Image.open('data/rock.jpg').convert('RGB').transpose(Image.FLIP_TOP_BOTTOM)
img3 = Image.open('data/cracks.jpg').convert('L').transpose(Image.FLIP_TOP_BOTTOM)
img4 = Image.open('data/checked.jpg').convert('L').transpose(Image.FLIP_TOP_BOTTOM)
tex0 = ctx.texture(img0.size, 1, img0.tobytes())
tex1 = ctx.texture(img1.size, 3, img1.tobytes())
tex2 = ctx.texture(img2.size, 3, img2.tobytes())
tex3 = ctx.texture(img3.size, 1, img3.tobytes())
tex4 = ctx.texture(img4.size, 1, img4.tobytes())
tex0.build_mipmaps()
tex1.build_mipmaps()
tex2.build_mipmaps()
tex3.build_mipmaps()
tex4.build_mipmaps()
tex0.use(0)
tex1.use(1)
tex2.use(2)
tex3.use(3)
tex4.use(4)
prog.uniforms['Heightmap'].value = 0
prog.uniforms['Color1'].value = 1
prog.uniforms['Color2'].value = 2
prog.uniforms['Cracks'].value = 3
prog.uniforms['Darken'].value = 4
index = 0
vertices = bytearray()
indices = bytearray()
for i in range(64 - 1):
for j in range(64):
vertices += struct.pack('2f', i / 64, j / 64)
indices += struct.pack('i', index)
index += 1
vertices += struct.pack('2f', (i + 1) / 64, j / 64)
indices += struct.pack('i', index)
index += 1
indices += struct.pack('i', -1)
vbo = ctx.buffer(vertices)
ibo = ctx.buffer(indices)
vao = ctx.vertex_array(prog, [(vbo, '2f', ['vert'])], ibo)
while wnd.update():
angle = wnd.time * 0.5
width, height = wnd.size
proj = Matrix44.perspective_projection(45.0, width / height, 0.01, 10.0)
look = Matrix44.look_at((math.cos(angle), math.sin(angle), 0.8), (0.0, 0.0, 0.1), (0.0, 0.0, 1.0))
prog.uniforms['Mvp'].write((proj * look).astype('float32').tobytes())
ctx.enable(ModernGL.DEPTH_TEST)
ctx.viewport = wnd.viewport
ctx.clear(1.0, 1.0, 1.0)
vao.render(ModernGL.TRIANGLE_STRIP)
| 1.796875 | 2 |
src/OTLMOW/OTLModel/Datatypes/KlLEMarkeringSoort.py | davidvlaminck/OTLClassPython | 2 | 12798648 | <gh_stars>1-10
# coding=utf-8
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlLEMarkeringSoort(KeuzelijstField):
"""Mogelijke markeringsoorten op een lijvormig element."""
naam = 'KlLEMarkeringSoort'
label = 'Soort markering van lijnvormig element'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlLEMarkeringSoort'
definition = 'Mogelijke markeringsoorten op een lijvormig element.'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlLEMarkeringSoort'
options = {
'biggenrug': KeuzelijstWaarde(invulwaarde='biggenrug',
label='biggenrug',
definitie='Een betonnen obstakel dat meestal een infrastructurele en beschermende functie heeft',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlLEMarkeringSoort/biggenrug'),
'boordsteen': KeuzelijstWaarde(invulwaarde='boordsteen',
label='boordsteen',
definitie='Een lijnvormig element dat de scheiding verzorgt tussen een rijbaan en het meestal hoger gelegen trottoir',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlLEMarkeringSoort/boordsteen'),
'boordsteen-parkeerverbod': KeuzelijstWaarde(invulwaarde='boordsteen-parkeerverbod',
label='boordsteen parkeerverbod',
definitie='Een lijnvormig element dat de scheiding verzorgt tussen een rijbaan en het meestal hoger gelegen trottoir met als functie het aanduiden van parkeerverbod',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlLEMarkeringSoort/boordsteen-parkeerverbod'),
'new-Jersey': KeuzelijstWaarde(invulwaarde='new-Jersey',
label='new Jersey',
definitie='Een afschermende constructie uit kunststof, beton of metaal dat naast wegen wordt geplaatst om te voorkomen dat voertuigen de weg in zijdelingse richting verlaten, kantelen of de middenberm doorkruisen.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlLEMarkeringSoort/new-Jersey'),
'vangrail': KeuzelijstWaarde(invulwaarde='vangrail',
label='vangrail',
definitie='Een afschermende constructie die naast wegen wordt geplaatst om te voorkomen dat voertuigen de weg in zijdelingse richting verlaten, kantelen of de middenberm doorkruisen.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlLEMarkeringSoort/vangrail')
}
| 0.988281 | 1 |
streamlit/utils/ui.py | T-Sumida/ObjectDetection-Streamlit | 1 | 12798656 | # -*- coding:utf-8 -*-
from typing import Optional, Tuple, List
import cv2
import numpy as np
import streamlit as st
from PIL import Image
from utils.model import MODEL_TYPE, draw_bboxes
def description(header: str, description: str):
"""show description
Args:
header (str): header message
description (str): description text
"""
st.subheader(header)
st.markdown(description)
def object_detector_ui() -> Tuple[int, str, float]:
"""show object detector ui in sidebar
Returns:
Tuple[int, str, float]: [number of threads, model type string, threshold]
"""
st.sidebar.markdown("# Model Config")
num_thread = st.sidebar.slider("Number of Thread", 1, 4, 1, 1)
confidence_threshold = st.sidebar.slider(
"Confidence threshold", 0.0, 1.0, 0.5, 0.01)
model_type = st.sidebar.radio("Model Type", MODEL_TYPE)
return num_thread, model_type, confidence_threshold
def upload_image() -> Optional[np.ndarray]:
"""show upload image area
Returns:
Optional[np.ndarray]: uploaded image
"""
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "JPG"])
if uploaded_file is not None:
file_bytes = np.asarray(
bytearray(uploaded_file.read()), dtype=np.uint8)
image = cv2.imdecode(file_bytes, 1)
return image
else:
return None
def show_image(image: np.ndarray, bboxes: List, scores: List, classes: List, detect_num: int, elapsed_time: int):
"""show processed image.
Args:
image (np.ndarray): original image
bboxes (List): detected bounding box
scores (List): detected score
classes (List): detected class names
detect_num (int): number of detection
elapsed_time (int): processing time
"""
image = draw_bboxes(image, bboxes, scores, classes, detect_num)
image = cv2pil(image)
st.image(image, caption='Uploaded Image.', use_column_width=True)
st.markdown("**elapsed time : " + str(elapsed_time) + "[msec]**")
pass
def cv2pil(image: np.ndarray) -> Image:
"""cv2 image to PIL image
Args:
image (np.ndarray): cv2 image
Returns:
Image: PIL image
"""
new_image = image.copy()
if new_image.ndim == 2: # モノクロ
pass
elif new_image.shape[2] == 3: # カラー
new_image = cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB)
elif new_image.shape[2] == 4: # 透過
new_image = cv2.cvtColor(new_image, cv2.COLOR_BGRA2RGBA)
new_image = Image.fromarray(new_image)
return new_image
| 2.25 | 2 |
Strip_Method.py | BeenashPervaiz/Command_Line_Task | 0 | 12798664 | <filename>Strip_Method.py
name = " Pervaiz "
dots = " ........."
print(name.lstrip() + dots) #lstrip Method
print(name.rstrip() + dots) #rstrip Method
print(name.strip() + dots) #strip Method
print(name.replace(" ", "") + dots) #Replace Method | 1.234375 | 1 |
02.Button/09.SwitchFun.py | sarincr/Python-App-Development-using-Kivy | 1 | 12798672 | <gh_stars>1-10
from kivy.app import App
from kivy.uix.switch import Switch
class SwitchApp(App):
def build(self):
switch = Switch()
switch.bind(active=self.switch_state)
return switch
def switch_state(self, instance, value):
print('Switch is', value)
SwitchApp().run()
| 1.84375 | 2 |
svg_ultralight/strings/svg_strings.py | ShayHill/svg_ultralight | 1 | 12798680 | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
"""Explicit string formatting calls for arguments that aren't floats or strings.
:author: <NAME>
:created: 10/30/2020
The `string_conversion` module will format floats or strings. Some other formatters can
make things easier.
"""
from typing import Iterable, Tuple
from ..string_conversion import format_number
def svg_color_tuple(rgb_floats):
"""
Turn an rgb tuple (0-255, 0-255, 0-255) into an svg color definition.
:param rgb_floats: (0-255, 0-255, 0-255)
:return: "rgb(128,128,128)"
"""
r, g, b = (round(x) for x in rgb_floats)
return f"rgb({r},{g},{b})"
def svg_ints(floats: Iterable[float]) -> str:
"""
Space-delimited ints
:param floats: and number of floats
:return: each float rounded to an int, space delimited
"""
return " ".join(str(round(x)) for x in floats)
def svg_float_tuples(tuples: Iterable[Tuple[float, float]]) -> str:
"""
Space-delimited tuples
:param tuples: [(a, b), (c, d)]
:return: "a,b c,d"
"""
tuples = [",".join(format_number(x) for x in y) for y in tuples]
return " ".join(tuples)
| 2.8125 | 3 |
distances/migrations/0011_auto_20170602_1044.py | tkettu/rokego | 0 | 12798688 | <filename>distances/migrations/0011_auto_20170602_1044.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-02 07:44
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('distances', '0010_auto_20170519_1604'),
]
operations = [
migrations.AlterField(
model_name='dates',
name='startDate',
field=models.DateField(default=datetime.datetime(2017, 5, 26, 10, 44, 7, 194576)),
),
]
| 0.601563 | 1 |
app.py | heminsatya/free_notes | 0 | 12798696 | <filename>app.py
# Dependencies
from aurora import Aurora
# Instantiate the root app
root = Aurora()
# Run the root app
if __name__ == '__main__':
root.run()
| 0.867188 | 1 |
2_Advanced_Images_3_TransferLearningAndFIneTuning2.py | BrunoDatoMeneses/TensorFlowTutorials | 0 | 12798704 | <reponame>BrunoDatoMeneses/TensorFlowTutorials
import matplotlib.pyplot as plt
import numpy as np
import os
import tensorflow as tf
from tensorflow.keras.preprocessing import image_dataset_from_directory
_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True)
PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
BATCH_SIZE = 32
IMG_SIZE = (160, 160)
train_dataset = image_dataset_from_directory(train_dir,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE)
validation_dataset = image_dataset_from_directory(validation_dir,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE)
class_names = train_dataset.class_names
plt.figure(figsize=(10, 10))
for images, labels in train_dataset.take(1):
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
val_batches = tf.data.experimental.cardinality(validation_dataset)
test_dataset = validation_dataset.take(val_batches // 5)
validation_dataset = validation_dataset.skip(val_batches // 5)
print('Number of validation batches: %d' % tf.data.experimental.cardinality(validation_dataset))
print('Number of test batches: %d' % tf.data.experimental.cardinality(test_dataset))
AUTOTUNE = tf.data.AUTOTUNE
train_dataset = train_dataset.prefetch(buffer_size=AUTOTUNE)
validation_dataset = validation_dataset.prefetch(buffer_size=AUTOTUNE)
test_dataset = test_dataset.prefetch(buffer_size=AUTOTUNE)
data_augmentation = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),
])
for image, _ in train_dataset.take(1):
plt.figure(figsize=(10, 10))
first_image = image[0]
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
augmented_image = data_augmentation(tf.expand_dims(first_image, 0))
plt.imshow(augmented_image[0] / 255)
plt.axis('off')
preprocess_input = tf.keras.applications.mobilenet_v2.preprocess_input
#rescale = tf.keras.layers.experimental.preprocessing.Rescaling(1./127.5, offset= -1)
# Create the base model from the pre-trained model MobileNet V2
IMG_SHAPE = IMG_SIZE + (3,)
base_model = tf.keras.models.load_model('SavedModels/2_Advanced_Images_3_TransferLearningAndFIneTuningAfterTF')
image_batch, label_batch = next(iter(train_dataset))
feature_batch = base_model(image_batch)
print(feature_batch.shape)
base_model.trainable = True
# Let's take a look to see how many layers are in the base model
print("Number of layers in the base model: ", len(base_model.layers))
# Fine-tune from this layer onwards
fine_tune_at = 100
# Freeze all the layers before the `fine_tune_at` layer
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
| 2.140625 | 2 |
PythonExercicio/jogodavelha.py | fotavio16/PycharmProjects | 0 | 12798712 | import random
# Pedir ao Jogador para escolher uma letra O ou X
def escolhaLetraJogador():
l = ""
while l != "O" and l != "X":
l = str(input('Escolha a letra que prefere jogar (O ou X): ')).upper()
if l == "O":
letras = ['O', "X"]
else:
letras = ['X', "O"]
return letras
# Sortear quem começa primeiro
def iniciaJogador():
if random.randint(1,2) == 1:
return True
else:
return False
def criaTabuleiro():
t = []
t.append('')
for i in range(9):
t.append(' ')
return t
# Mostrar o tabuleiro
def mostraTabuleiro(posi):
print(" | | ")
print(' {} | {} | {} '.format(posi[7],posi[8],posi[9]))
print(" | | ")
print("-----------")
print(" | | ")
print(' {} | {} | {} '.format(posi[4], posi[5], posi[6]))
print(" | | ")
print("-----------")
print(" | | ")
print(' {} | {} | {} '.format(posi[1], posi[2], posi[3]))
print(" | | ")
letras = escolhaLetraJogador()
vezJogador = iniciaJogador()
#tabuleiro = [' ','X',' ','O',' ','X','O',' ','O','X']
tabuleiro = criaTabuleiro()
mostraTabuleiro(tabuleiro)
# Vez do Jogador
# Mostrar o tabuleiro
# Receber o movimento do jogador
# Vez do Computador
# Definir movimento do computador
# 1) Executar movimento para vencer
# 2) Executar movimento para bloquaer o jogador de vencer na próxima jogada
# 3) Jogar nos cantos
# 4) Jogar no centro
# 5) Jogar nos lados
# Verifica se houve vencedor
# Verifica se houve empate
# Pergunta se o Jogador deseja jogar novamente
| 2.75 | 3 |
tools/cal_effect_field_tool.py | yuanliangxie/YOLOv3_simple_baseline | 1 | 12798720 | import torch.nn as nn
import torch
import numpy as np
import cv2 as cv
def calculate_EPR(model): #TODO:尝试通过加载预训练权重计算有效感受野
for module in model.modules():
try:
nn.init.constant_(module.weight, 0.05)
nn.init.zeros_(module.bias)
nn.init.zeros_(module.running_mean)
nn.init.ones_(module.running_var)
except Exception as e:
pass
if type(module) is nn.BatchNorm2d:
module.eval()
input = torch.ones(1, 3, 640, 640, requires_grad= True)
model.zero_grad()
features = model(input)
for i in range(len(features)):
# if i != len(features)-1:
# continue
x = features[i]
#g_x = torch.zeros(size=[1, 1, x.shape[2], x.shape[3]])
g_x = torch.zeros_like(x)
h, w = g_x.shape[2]//2, g_x.shape[3]//2
g_x[:, :, h, w] = 1
x.backward(g_x, retain_graph = True)
# x = torch.mean(x, 1, keepdim=True)
# fake_fp = x * g_x[0, 0, ...]
# fake_loss = torch.mean(fake_fp)
# fake_loss.backward(retain_graph=True)
show(input, i)
model.zero_grad()
input.grad.data.zero_()
cv.waitKey(2000)
cv.waitKey(0)
def cal_rf_wh(grad_input):
binary_map: np.ndarray = (grad_input[:, :] > 0.0)
x_cs: np.ndarray = binary_map.sum(-1) >= 1
y_cs: np.ndarray = binary_map.sum(0) >= 1
width = x_cs.sum()
height = y_cs.sum()
return (width, height)
def show(input, i):
grad_input = np.abs(input.grad.data.numpy())
grad_input = grad_input / np.max(grad_input)
grad_input = grad_input.mean(0).mean(0)
# 有效感受野 0.75 - 0.85
#grad_input = np.where(grad_input > 0.85,1,0)
#grad_input_ = np.where(grad_input > 0.75, 1, grad_input)
# effient_values = grad_input > 0.0
# samll_effient_values = grad_input <= 0.2
# grad_input[np.logical_and(effient_values, samll_effient_values)] = 0.1
#grad_input = grad_input * 100
width, height = cal_rf_wh(grad_input)
print("width:", width, "height:", height)
grad_input_ERF = np.where(grad_input>0.01, 1, 0)
width, height = cal_rf_wh(grad_input_ERF)
print("ERF_width:", width, "ERF_height:", height)
np.expand_dims(grad_input, axis=2).repeat(3, axis=2)
grad_input = (grad_input * 255).astype(np.uint8)
cv.imshow("receip_field"+str(i), grad_input)
#cv.imwrite("./receip_field"+str(i)+".png", grad_input)
| 1.945313 | 2 |
leetcode/majority-element.py | zhangao0086/Python-Algorithm | 3 | 12798728 | <reponame>zhangao0086/Python-Algorithm
#!/usr/bin/python3
# -*-coding:utf-8-*-
__author__ = "Bannings"
from typing import List
class Solution:
def majorityElement(self, nums: List[int]) -> int:
count, candidate = 0, 0
for num in nums:
if count == 0:
candidate = num
count += (1 if num == candidate else -1)
return candidate
if __name__ == '__main__':
assert Solution().majorityElement([3,2,3]) == 3
assert Solution().majorityElement([2,2,1,1,1,2,2]) == 2 | 2.59375 | 3 |
terminal/test zips/outlab_3_boilerplate_files/q4.py | Phantom-Troupe-CS251/RedPlag | 0 | 12798736 | <gh_stars>0
class Node(object):
"""
Node contains two objects - a left and a right child, both may be a Node or both None,
latter representing a leaf
"""
def __init__(self, left=None, right=None):
super(Node, self).__init__()
self.left = left
self.right = right
def __str__(self):
"""
Default inorder print
"""
if self.left is None and self.right is None:
return "( )"
else:
return "( " + str(self.left) + " " + str(self.right) + " )"
def __eq__(self, other):
if self.left is None and self.right is None:
return other.left is None and other.right is None
elif other.left is None and other.right is None:
return False
else:
return self.left == other.left and self.right == other.right
def mirrorTree(node):
"""
Returns the mirror image of the tree rooted at node
"""
pass
def allTrees(n):
"""
Returns a list of all unique trees with n internal nodes
"""
pass
def allSymTrees(n):
"""
Returns a list of all unique symmetrical trees with n internal nodes
"""
pass
if __name__ == '__main__':
for x in allSymTrees(int(input())):
print(x)
node = Node(Node(Node(), Node()), Node())
print(node) | 3.265625 | 3 |
ztfin2p3/calibration/flat.py | MickaelRigault/ztfin2p3 | 0 | 12798744 | <filename>ztfin2p3/calibration/flat.py
""" library to build the ztfin2p3 pipeline screen flats """
import os
import numpy as np
import dask
import dask.array as da
import warnings
from astropy.io import fits
from ztfimg.base import _Image_, FocalPlane
LED_FILTER = {"zg":[2,3,4,5],
"zr":[7,8,9,10],
"zi":[11,12,13],
}
def ledid_to_filtername(ledid):
""" """
for f_,v_ in LED_FILTER.items():
if int(ledid) in v_:
return f_
raise ValueError(f"Unknown led with ID {ledid}")
def get_build_datapath(date, ccdid=None, ledid=None, groupby="day"):
""" """
# IRSA metadata
from ..metadata import get_rawmeta
from ..io import get_filepath
meta = get_rawmeta("flat", date, ccdid=ccdid, ledid=ledid, getwhat="filepath", in_meta=True)
# Parsing out what to do:
if groupby == "day":
meta[groupby] = meta.filefracday.astype("str").str[:8]
elif groupby == "month":
meta[groupby] = meta.filefracday.astype("str").str[:6]
else:
raise ValueError(f"Only groupby day or month implemented: {groupby} given")
datapath = meta.groupby([groupby,"ccdid","ledid"])["filepath"].apply(list).reset_index()
datapath["filtername"] = datapath["ledid"].apply(ledid_to_filtername)
datapath["fileout"] = [get_filepath("flat", str(s_[groupby]),
ccdid=int(s_.ccdid), ledid=int(s_.ledid), filtername=s_.filtername)
for id_, s_ in datapath.iterrows()]
return datapath
def build_from_datapath(build_dataframe, assume_exist=False, inclheader=False, overwrite=True, **kwargs):
""" """
if not assume_exist:
from ztfquery import io
outs = []
for i_, s_ in build_dataframe.iterrows():
#
fileout = s_.fileout
os.makedirs(os.path.dirname(fileout), exist_ok=True) # build if needed
files = s_["filepath"]
if not assume_exist:
files = io.bulk_get_file(files)
#
bflat = FlatBuilder.from_rawfiles(files, persist=False)
data, header = bflat.build(set_it=False, inclheader=inclheader, **kwargs)
output = dask.delayed(fits.writeto)(fileout, data, header=header, overwrite=overwrite)
outs.append(output)
return outs
class Flat( _Image_ ):
SHAPE = 6160, 6144
QUADRANT_SHAPE = 3080, 3072
def __init__(self, data, header=None, use_dask=True):
""" """
_ = super().__init__(use_dask=use_dask)
self.set_data(data)
if header is not None:
self.set_header(header)
# ============== #
# I/O #
# ============== #
@classmethod
def from_filename(cls, filename, use_dask=True, assume_exist=True):
""" loads the object given the input file.
Parameters
----------
assume_exist: [bool]
Shall this run ztfquery.io.get_file() ?
"""
from ztfquery import io
basename = os.path.basename(filename)
if not basename.startswith("ztfin2p3"):
filename = io.get_file(filename)
if ".fits" in basename:
return cls.read_fits(filename, use_dask=use_dask)
else:
raise NotImplementedError(f"Only fits file loader implemented (read_fits) ; {filename} given")
@classmethod
def from_date(cls, date, ledid, ccdid, use_dask=True, **kwargs):
""" """
from ..io import get_filepath
filename = get_filepath("flat", date, ccdid=ccdid, ledid=ledid)
return cls.from_filename(filename, use_dask=use_dask, **kwargs)
@classmethod
def read_fits(cls, fitsfile, use_dask=True):
""" """
if use_dask:
data = da.from_delayed( dask.delayed(fits.getdata)(fitsfile),
shape=cls.SHAPE, dtype="float")
header= dask.delayed(fits.getheader)(fitsfile)
else:
data = fits.getdata(fitsfile)
header= fits.getheader(fitsfile)
this = cls(data=data, header=header, use_dask=use_dask)
this._filename = fitsfile
return this
@classmethod
def build_from_rawfiles(cls, rawfiles, **kwargs):
""" """
bflat = FlatBuilder.from_rawfiles(rawfiles, persist=False)
data, header = bflat.build(set_it=False, **kwargs)
return cls(data, header=None, use_dask=True)
# ============== #
# Method #
# ============== #
def get_quadrant_data(self, qid, **kwargs):
""" **kwargs goes to get_data() this then split the data.
Parameters
----------
qid: [int or None/'*']
which quadrant you want ?
- int: 1,2,3 or 4
- None or '*'/'all': all quadrant return as list [1,2,3,4]
**kwargs goes to get_data()
Returns
-------
ndarray (numpy or dask)
"""
if qid in ["*","all"]:
qid = None
if qid is not None:
qid = int(qid)
dataccd = self.get_data(**kwargs)
# this accounts for all rotation and rebin did before
qshape = np.asarray(np.asarray(dataccd.shape)/2, dtype="int")
if qid == 1:
data_ = dataccd[qshape[0]:, qshape[1]:]
elif qid == 2:
data_ = dataccd[qshape[0]:, :qshape[1]]
elif qid == 3:
data_ = dataccd[:qshape[0], :qshape[1]]
elif qid == 4:
data_ = dataccd[:qshape[0], qshape[1]:]
elif qid is None or qid in ["*","all"]:
data_ = [dataccd[qshape[0]:, qshape[1]:],
dataccd[qshape[0]:, :qshape[1]],
dataccd[:qshape[0], :qshape[1]],
dataccd[:qshape[0], qshape[1]:]
]
else:
raise ValueError(f"qid must be 1->4 {qid} given")
return data_
class FlatFocalPlane( FocalPlane ):
@classmethod
def from_filenames(cls, flatfilenames, use_dask=True, **kwargs):
""" """
this = cls(use_dask=use_dask)
for file_ in flatfilenames:
ccd_ = Flat.from_filename(file_, use_dask=use_dask, **kwargs)
ccdid = int(file_.split("_")[-3].replace("c",""))
this.set_ccd(ccd_, ccdid=ccdid)
this._filenames = flatfilenames
return this
@classmethod
def from_date(cls, date, ledid, use_dask=True, **kwargs):
""" """
from ..io import get_filepath
ccdids = np.arange(1,17)
filenames = [get_filepath("flat", date, ccdid=ccdid_, ledid=ledid)
for ccdid_ in ccdids]
return cls.from_filenames(filenames, use_dask=use_dask, **kwargs)
# ============= #
# Methods #
# ============= #
def get_quadrant_data(self, rcid, **kwargs):
""" """
ccdid, qid = self.rcid_to_ccdid_qid(rcid)
return self.get_ccd(ccdid).get_quadrant_data(qid, **kwargs)
def get_quadrant(self, *args, **kwargs):
""" """
raise NotImplemented("get_quadrant() is not usable as flat are CCD-base. See get_quadrant_data().")
# ==================== #
# #
# Flat Builder #
# #
# ==================== #
from .builder import CalibrationBuilder
class FlatBuilder( CalibrationBuilder ):
# -------- #
# BUILDER #
# -------- #
def build(self, corr_nl=True, corr_overscan=True, clipping=True,
set_it=False, inclheader=True, **kwargs):
""" """
return super().build(corr_nl=corr_nl,
corr_overscan=corr_overscan,
clipping=clipping,
set_it=set_it, inclheader=inclheader,
**kwargs)
def build_header(self, keys=None, refid=0, inclinput=False):
""" """
from astropy.io import fits
if keys is None:
keys = ["ORIGIN","OBSERVER","INSTRUME","IMGTYPE","EXPTIME",
"CCDSUM","CCD_ID","CCDNAME","PIXSCALE","PIXSCALX","PIXSCALY",
"FRAMENUM","ILUM_LED", "ILUMWAVE", "PROGRMID","FILTERID",
"FILTER","FILTPOS","RA","DEC", "OBSERVAT"]
header = self.imgcollection.get_singleheader(refid, as_serie=True)
if type(header) == dask.dataframe.core.Series:
header = header.compute()
header = header.loc[keys]
newheader = fits.Header(header.loc[keys].to_dict())
newheader.set(f"NINPUTS",self.imgcollection.nimages, "num. input images")
if inclinput:
basenames = self.imgcollection.filenames
for i, basename_ in enumerate(basenames):
newheader.set(f"INPUT{i:02d}",basename_, "input image")
return newheader
| 1.78125 | 2 |
src/pbn_api/migrations/0026_auto_20210816_0815.py | iplweb/django-bpp | 1 | 12798752 | <reponame>iplweb/django-bpp<gh_stars>1-10
# Generated by Django 3.0.14 on 2021-08-16 06:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("pbn_api", "0025_auto_20210809_0149"),
]
operations = [
migrations.AlterUniqueTogether(
name="oswiadczenieinstytucji",
unique_together=set(),
),
migrations.AlterUniqueTogether(
name="publikacjainstytucji",
unique_together=set(),
),
]
| 0.515625 | 1 |
rpicam/utils/telegram_poster.py | LokiLuciferase/rpicam | 0 | 12798760 | #!/usr/bin/env python3
import os
from typing import Union
from pathlib import Path
import requests
from rpicam.utils.logging_utils import get_logger
class TelegramPoster:
"""
Bare-bones class to post videos to a Telegram chat.
Uses per default credentials stored in environment.
"""
API_URL = 'https://api.telegram.org'
API_TOKEN_ENV_VAR = 'RPICAM_TG_API_TOKEN'
CHAT_ID_ENV_VAR = 'RPICAM_TG_CHAT_ID'
def __init__(self, api_token: str = None, chat_id: str = None):
if api_token is not None and chat_id is not None:
self.api_token = api_token
self.chat_id = chat_id
else:
self.api_token = os.getenv(self.API_TOKEN_ENV_VAR, None)
self.chat_id = os.getenv(self.CHAT_ID_ENV_VAR, None)
self._logger = get_logger(self.__class__.__name__, verb=True)
if self.api_token is None or self.chat_id is None:
raise RuntimeError('Could not find Telegram credentials in environment.')
def send_video(self, p: Union[Path, str]):
"""Post the given video to Telegram using stored credentials."""
p = Path(str(p)).resolve()
if not p.is_file():
raise RuntimeError(f'file not found: {p}')
url = f'{self.API_URL}/bot{self.api_token}/sendVideo'
files = {
'chat_id': (None, self.chat_id),
'video': (str(p), open(p, 'rb'))
}
r = requests.post(url, files=files)
if r.status_code != 200:
self._logger.error(f'Could not upload file. Exit code was {r.status_code}')
| 2.0625 | 2 |
sensehat/__init__.py | myDevicesIoT/cayennee-plugin-sensehat | 3 | 12798768 | """
This module provides a class for interfacing with the Sense HAT add-on board for Raspberry Pi.
"""
import os
from multiprocessing.managers import RemoteError
from myDevices.utils.logger import error, exception, info
from sensehat.manager import connect_client
class SenseHAT():
"""Class for interacting with a Sense HAT device"""
def __init__(self, use_emulator=False):
"""Initializes Sense HAT device.
Arguments:
use_emulator: True if the Sense HAT Emulator should be used. This requires the Emulator to be installed and running on the desktop.
"""
self.use_emulator = use_emulator
self.sense_hat = None
self.digital_value = 0
self.analog_value = 0.0
self.image_file = os.path.join('/etc/myDevices/plugins/cayenne-plugin-sensehat/data/image.png')
self.call_sense_hat_function('clear')
def init_sense_hat(self):
"""Initializes connection to Sense HAT service and gets a SenseHat shared object."""
if not self.sense_hat:
try:
self.manager = connect_client()
self.manager.use_emulator(self.use_emulator)
self.sense_hat = self.manager.SenseHat()
except ConnectionRefusedError as e:
info('Sense HAT service connection refused')
error(e)
except RemoteError as e:
error('Failed to connect to Sense HAT device')
def call_sense_hat_function(self, function_name, *args):
"""Calls a function of the SenseHat shared object.
Arguments:
function_name: Name of the function to call.
args: Arguments to pass to the function.
"""
self.init_sense_hat()
try:
if self.sense_hat is not None:
func = getattr(self.sense_hat, function_name)
value = func(*args)
return value
except EOFError as e:
error(e)
sense_hat = None
except AttributeError as e:
error(e)
sense_hat = None
def get_temperature(self):
"""Gets the temperature as a tuple with type and unit."""
return (self.call_sense_hat_function('get_temperature'), 'temp', 'c')
def get_humidity(self):
"""Gets the humidity as a tuple with type and unit."""
return (self.call_sense_hat_function('get_humidity'), 'rel_hum', 'p')
def get_pressure(self):
"""Gets the pressure as a tuple with type and unit."""
value = self.call_sense_hat_function('get_pressure')
if value is not None:
return (value * 100, 'bp', 'pa')
def get_acclerometer(self):
"""Gets the g-force as a tuple with type and unit."""
values = self.call_sense_hat_function('get_accelerometer_raw')
if values is not None:
g_force = []
g_force.append(values['x'])
g_force.append(values['y'])
g_force.append(values['z'])
return (g_force, 'accel', 'g')
def get_gyroscope(self):
"""Gets radians per second from the gyroscope."""
#Not currently supported in Cayenne
values = self.call_sense_hat_function('get_gyroscope_raw')
if values is not None:
rps = []
rps.append(values['x'])
rps.append(values['y'])
rps.append(values['z'])
return rps
def get_magnetometer(self):
"""Gets microteslas from the magnetometer."""
#Not currently supported in Cayenne
values = self.call_sense_hat_function('get_compass_raw')
if values is not None:
gyro = []
gyro.append(values['x'])
gyro.append(values['y'])
gyro.append(values['z'])
return gyro
def get_digital(self):
"""Gets the digital value as a tuple specifying this is a digital actuator."""
return (self.digital_value, 'digital_actuator')
def set_digital(self, value):
"""Displays an image on the Sense HAT LED matrix if the digital value is equal to True."""
self.digital_value = value
if self.digital_value:
self.call_sense_hat_function('load_image', self.image_file)
else:
self.call_sense_hat_function('clear')
def get_analog(self):
"""Gets the digital value as a tuple specifying this is an analog actuator."""
return (self.analog_value, 'analog_actuator')
def set_analog(self, value):
"""Displays the analog value on the Sense HAT LED matrix."""
self.analog_value = value
self.call_sense_hat_function('show_message', str(self.analog_value))
| 1.757813 | 2 |
preprocess/TripleClassificationData.py | lualiu/GanforKGE | 0 | 12798776 | <gh_stars>0
import os
import numpy as np
import torch
from utils.readdata import read_dicts_from_file,read_triples_from_file,turn_triples_to_label_dict
class TripleClassificationData(object):
def __init__(self,data_path,train_data_name,valid_data_name,test_data_name,with_reverse=False):
self.entity_dict,self.relation_dict = read_dicts_from_file(
[os.path.join(data_path,train_data_name),
os.path.join(data_path,valid_data_name),
os.path.join(data_path,test_data_name)],
with_reverse=with_reverse
)
self.entity_numbers = len(self.entity_dict.keys())
self.relation_numbers = len(self.relation_dict.keys())
self.train_triples_with_reverse = read_triples_from_file(os.path.join(data_path, train_data_name),
self.entity_dict, self.relation_dict,
with_reverse=with_reverse)
self.valid_triples_with_reverse,self.valid_triples_for_classification = self.read_triple_from_file(os.path.join(data_path, valid_data_name),
self.entity_dict, self.relation_dict,
with_reverse=with_reverse)
self.test_triples_with_reverse,self.test_triples_for_classification = self.read_triple_from_file(os.path.join(data_path, test_data_name),
self.entity_dict, self.relation_dict,
with_reverse=with_reverse)
self.train_numbers = len(self.train_triples_with_reverse)
self.train_triples_dict = turn_triples_to_label_dict(self.train_triples_with_reverse)
self.valid_triples_dict = turn_triples_to_label_dict(self.valid_triples_with_reverse)
self.test_triples_dict = turn_triples_to_label_dict(self.test_triples_with_reverse)
self.gold_triples_dict = dict(list(self.train_triples_dict.items()) +
list(self.valid_triples_dict.items()) +
list(self.test_triples_dict.items()))
#del self.train_triples_with_reverse
del self.valid_triples_dict
del self.test_triples_dict
self.train_triples_numpy_array = np.array(self.train_triples_with_reverse).astype(np.int32)
self.valid_triples_for_classification = np.array(self.valid_triples_for_classification).astype(np.int32)
self.test_triples_for_classification = np.array(self.test_triples_for_classification).astype(np.int32)
def read_triple_from_file(self,filename,entity_dict,relation_dict,with_reverse):
triples_list = []
classification_triples_label = []
with open(filename) as file:
for line in file:
head, relation, tail, label = line.strip().split('\t')
if int(label) == 1:
triples_list.append([
entity_dict[head],
relation_dict[relation],
entity_dict[tail]
])
if with_reverse:
relation_reverse = relation + '_reverse'
triples_list.append([
entity_dict[tail],
relation_dict[relation_reverse],
entity_dict[head]
])
classification_triples_label.append([
entity_dict[head],
relation_dict[relation],
entity_dict[tail],
label
])
return triples_list,classification_triples_label
def get_batch(self,batch_size):
random_index = np.random.permutation(self.train_numbers)
random_train_triple = self.train_triples_numpy_array[random_index]
pointer = 0
while pointer < self.train_numbers:
start_index = pointer
end_index = start_index + batch_size
if end_index >= self.train_numbers:
end_index = self.train_numbers
pointer = end_index
current_batch_size = end_index - start_index
new_batch_train_triple_true = random_train_triple[start_index:end_index,:].copy()
new_batch_train_triple_fake = random_train_triple[start_index:end_index,:].copy()
random_words = np.random.randint(0,self.entity_numbers,current_batch_size)
for index in range(current_batch_size):
while (new_batch_train_triple_fake[index,0],
new_batch_train_triple_fake[index,1],
random_words[index]) in self.train_triples_dict:
random_words[index] = np.random.randint(0,self.entity_numbers)
new_batch_train_triple_fake[index,2] = random_words[index]
yield torch.tensor(new_batch_train_triple_true).long().cuda(),torch.tensor(new_batch_train_triple_fake).long().cuda()
| 1.859375 | 2 |
LeetCode/852 Peak Index in a Mountain Array.py | gesuwen/Algorithms | 0 | 12798784 | # Binary Search
# Let's call an array A a mountain if the following properties hold:
#
# A.length >= 3
# There exists some 0 < i < A.length - 1 such that A[0] < A[1] < ... A[i-1] < A[i] > A[i+1] > ... > A[A.length - 1]
# Given an array that is definitely a mountain, return any i such that A[0] < A[1] < ... A[i-1] < A[i] > A[i+1] > ... > A[A.length - 1].
#
# Example 1:
#
# Input: [0,1,0]
# Output: 1
# Example 2:
#
# Input: [0,2,1,0]
# Output: 1
# Note:
#
# 3 <= A.length <= 10000
# 0 <= A[i] <= 10^6
# A is a mountain, as defined above.
class Solution:
def peakIndexInMountainArray(self, A):
"""
:type A: List[int]
:rtype: int
"""
lowerBound = 0
upperBound = len(A) - 1
while lowerBound <= upperBound:
m = (lowerBound + upperBound) // 2
if A[m] > A[m-1] and A[m] > A[m+1]:
return m
if A[m] < A[m+1]:
lowerBound = m
else:
upperBound = m
return m
| 3.09375 | 3 |
publisher/conf.py | hongsups/scipy_proceedings | 1 | 12798792 | <filename>publisher/conf.py
import glob
import os
import io
excludes = ['vanderwalt', 'bibderwalt']
# status_file_root possible values: draft, conference, ready
status_file_base = 'draft'
status_file_name = ''.join([status_file_base, '.sty'])
work_dir = os.path.dirname(__file__)
papers_dir = os.path.join(work_dir, '../papers')
output_dir = os.path.join(work_dir, '../output')
template_dir = os.path.join(work_dir, '_templates')
static_dir = os.path.join(work_dir, '_static')
css_file = os.path.join(static_dir, 'scipy-proc.css')
toc_list = os.path.join(static_dir, 'toc.txt')
build_dir = os.path.join(work_dir, '_build')
pdf_dir = os.path.join(build_dir, 'pdfs')
html_dir = os.path.join(build_dir, 'html')
bib_dir = os.path.join(html_dir, 'bib')
toc_conf = os.path.join(build_dir, 'toc.json')
proc_conf = os.path.join(work_dir, '../scipy_proc.json')
xref_conf = os.path.join(build_dir, 'doi_batch.xml')
status_file = os.path.join(static_dir, status_file_name)
if os.path.isfile(toc_list):
with io.open(toc_list, 'r', encoding='utf-8') as f:
dirs = f.read().splitlines()
else:
dirs = sorted([os.path.basename(d)
for d in glob.glob('%s/*' % papers_dir)
if os.path.isdir(d) and not any(e in d for e in excludes)])
| 1.179688 | 1 |
Portfolio_Strategies/vectorized_backtesting.py | vhn0912/Finance | 441 | 12798800 | import numpy as np
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
import datetime
from yahoo_fin import stock_info as si
plt.rcParams['figure.figsize'] = (15, 10)
tickers = si.tickers_dow()
individual_stock = input(f"Which of the following stocks would you like to backtest \n{tickers}\n:")
num_of_years = 1
start = datetime.date.today() - datetime.timedelta(days = int(365.25*num_of_years))
yf_prices = yf.download(tickers, start=start)
# Individual Stock Strategy
prices = yf_prices['Adj Close'][individual_stock]
rs = prices.apply(np.log).diff(1).fillna(0)
w1 = 5
w2 = 22
ma_x = prices.rolling(w1).mean() - prices.rolling(w2).mean()
pos = ma_x.apply(np.sign)
fig, ax = plt.subplots(2,1)
ma_x.plot(ax=ax[0], title=f'{individual_stock} Moving Average Crossovers and Positions')
pos.plot(ax=ax[1])
plt.show()
my_rs = pos.shift(1)*rs
plt.subplots()
my_rs.cumsum().apply(np.exp).plot(title=f'{individual_stock} MA Strategy Performance')
rs.cumsum().apply(np.exp).plot()
plt.legend([f'{individual_stock} MA Performace', f'{individual_stock} Buy and Hold Performnace'])
plt.show()
print (f'Performance Statistics for {individual_stock} ({num_of_years} years):')
print ('Moving Average Return: ' + str(100 * round(my_rs.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
print('Buy and Hold Return: ' + str(100 * round(rs.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
# Full Portfolio Strategy
prices = yf_prices['Adj Close']
rs = prices.apply(np.log).diff(1).fillna(0)
w1 = 5
w2 = 22
ma_x = prices.rolling(w1).mean() - prices.rolling(w2).mean()
pos = ma_x.apply(np.sign)
pos /= pos.abs().sum(1).values.reshape(-1,1)
fig, ax = plt.subplots(2,1)
ma_x.plot(ax=ax[0], title='Individual Moving Average Crossovers and Positions')
ax[0].legend(bbox_to_anchor=(1.1, 1.05))
pos.plot(ax=ax[1])
ax[1].legend(bbox_to_anchor=(1.1, 1.05))
plt.show()
my_rs = (pos.shift(1)*rs)
my_rs.cumsum().apply(np.exp).plot(title='Individual Stocks Strategy Performance')
plt.show()
print ('-' * 60)
print (f'Performance Statistics for {num_of_years} years:')
for i in range(len(tickers)):
print (f'Moving Average Return for {tickers[i]}: ' + str(100 * round(my_rs.cumsum().apply(np.exp)[tickers[i]].tolist()[-1], 4)) + '%')
i = i + 1
plt.subplots()
my_rs = (pos.shift(1)*rs).sum(1)
my_rs.cumsum().apply(np.exp).plot(title='Full Portfolio Strategy Performance')
rs.mean(1).cumsum().apply(np.exp).plot()
plt.legend(['Portfolio MA Performace', 'Buy and Hold Performnace'])
plt.show()
print ('-' * 60)
print (f'Performance Statistics for {tickers} ({num_of_years} years):')
print ('Moving Average Return: ' + str(100 * round(my_rs.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
print('Buy and Hold Return: ' + str(100 * round(rs.mean(1).cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
# Portfolio Tests
# Look-Ahead Bias
my_rs1 = (pos*rs).sum(1)
my_rs2 = (pos.shift(1)*rs).sum(1)
plt.subplots()
my_rs1.cumsum().apply(np.exp).plot(title='Full Portfolio Performance')
my_rs2.cumsum().apply(np.exp).plot()
plt.legend(['With Look-Ahead Bias', 'Without Look-Ahead Bias'])
plt.show()
print ('-' * 60)
print (f'Performance Statistics for {tickers} ({num_of_years} years):')
print ('With Look-Ahead Bias: ' + str(100 * round(my_rs1.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
print('Without Look-Ahead Bias: ' + str(100 * round(my_rs2.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
# Signal Lags
lags = range(1, 11)
lagged_rs = pd.Series(dtype=float, index=lags)
print ('-' * 60)
print (f'Lag Performance Statistics for {tickers} ({num_of_years} years):')
for lag in lags:
my_rs = (pos.shift(lag)*rs).sum(1)
my_rs.cumsum().apply(np.exp).plot()
lagged_rs[lag] = my_rs.sum()
print (f'Lag {lag} Return: ' + str(100 * round(my_rs.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
plt.title('Full Portfolio Strategy Performance with Lags')
plt.legend(lags, bbox_to_anchor=(1.1, 0.95))
plt.show()
# Transaction Costs
tc_pct = 0.01
delta_pos = pos.diff(1).abs().sum(1)
my_tcs = tc_pct*delta_pos
my_rs1 = (pos.shift(1)*rs).sum(1)
my_rs2 = (pos.shift(1)*rs).sum(1) - my_tcs
plt.subplots()
my_rs1.cumsum().apply(np.exp).plot()
my_rs2.cumsum().apply(np.exp).plot()
plt.title('Full Portfolio Performance')
plt.legend(['Without Transaction Costs', 'With Transaction Costs'])
plt.show()
print ('-' * 60)
print (f'Performance Statistics for {tickers} ({num_of_years} years):')
print ('Without Transaction Costs: ' + str(100 * round(my_rs1.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
print('With Transaction Costs: ' + str(100 * round(my_rs2.cumsum().apply(np.exp).tolist()[-1], 4)) + '%') | 1.929688 | 2 |