max_stars_repo_path
stringlengths 4
305
| max_stars_repo_name
stringlengths 4
130
| max_stars_count
int64 0
191k
| id
stringlengths 1
8
| content
stringlengths 6
1.02M
| score
float64 -1.16
4.16
| int_score
int64 0
4
|
---|---|---|---|---|---|---|
pypy/jit/codegen/detect_cpu.py | camillobruni/pygirl | 12 | 12799608 | <filename>pypy/jit/codegen/detect_cpu.py
"""
Processor auto-detection
"""
import sys, os
class ProcessorAutodetectError(Exception):
pass
def autodetect():
mach = None
try:
import platform
mach = platform.machine()
except ImportError:
pass
if not mach:
platform = sys.platform.lower()
if platform.startswith('win'): # assume an Intel Windows
return 'i386'
# assume we have 'uname'
mach = os.popen('uname -m', 'r').read().strip()
if not mach:
raise ProcessorAutodetectError, "cannot run 'uname -m'"
if mach == 'x86_64' and sys.maxint == 2147483647:
mach = 'x86' # it's a 64-bit processor but in 32-bits mode, maybe
try:
return {'i386': 'i386',
'i486': 'i386',
'i586': 'i386',
'i686': 'i386',
'i86pc': 'i386', # Solaris/Intel
'x86': 'i386', # Apple
'Power Macintosh': 'ppc',
}[mach]
except KeyError:
raise ProcessorAutodetectError, "unsupported processor '%s'" % mach
| 1.632813 | 2 |
best_practice_examples/multiple_state_variables_bp.py | kallelzied/PythonTutoriel | 0 | 12799616 | <reponame>kallelzied/PythonTutoriel<filename>best_practice_examples/multiple_state_variables_bp.py
"""multiple_state_variables_bp.py: Giving an example of best practicing with multiple state variables."""
__author__ = "<NAME>"
__copyright__ = """
Copyright 2018 multiple_state_variables_bp.py
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__ = "Apache 2.0"
def fibonacci_wrong_way(n):
x = 0
y = 1
l = []
for i in range(n):
l.append(x)
t = y
y = x + y
x = t
print(l)
def fibonacci_correct_way(n):
x, y, l = 0, 1, []
for i in range(n):
l.append(x)
x, y = y, x + y
print(l)
# Entry point
if __name__ == '__main__':
fibonacci_wrong_way(100)
fibonacci_correct_way(100)
| 2.4375 | 2 |
feedparser/datetimes/greek.py | verhovsky/feedparser | 0 | 12799624 | <reponame>verhovsky/feedparser<filename>feedparser/datetimes/greek.py
from __future__ import absolute_import, unicode_literals
import re
from .rfc822 import _parse_date_rfc822
# Unicode strings for Greek date strings
_greek_months = \
{ \
'\u0399\u03b1\u03bd': 'Jan', # c9e1ed in iso-8859-7
'\u03a6\u03b5\u03b2': 'Feb', # d6e5e2 in iso-8859-7
'\u039c\u03ac\u03ce': 'Mar', # ccdcfe in iso-8859-7
'\u039c\u03b1\u03ce': 'Mar', # cce1fe in iso-8859-7
'\u0391\u03c0\u03c1': 'Apr', # c1f0f1 in iso-8859-7
'\u039c\u03ac\u03b9': 'May', # ccdce9 in iso-8859-7
'\u039c\u03b1\u03ca': 'May', # cce1fa in iso-8859-7
'\u039c\u03b1\u03b9': 'May', # cce1e9 in iso-8859-7
'\u0399\u03bf\u03cd\u03bd': 'Jun', # c9effded in iso-8859-7
'\u0399\u03bf\u03bd': 'Jun', # c9efed in iso-8859-7
'\u0399\u03bf\u03cd\u03bb': 'Jul', # c9effdeb in iso-8859-7
'\u0399\u03bf\u03bb': 'Jul', # c9f9eb in iso-8859-7
'\u0391\u03cd\u03b3': 'Aug', # c1fde3 in iso-8859-7
'\u0391\u03c5\u03b3': 'Aug', # c1f5e3 in iso-8859-7
'\u03a3\u03b5\u03c0': 'Sep', # d3e5f0 in iso-8859-7
'\u039f\u03ba\u03c4': 'Oct', # cfeaf4 in iso-8859-7
'\u039d\u03bf\u03ad': 'Nov', # cdefdd in iso-8859-7
'\u039d\u03bf\u03b5': 'Nov', # cdefe5 in iso-8859-7
'\u0394\u03b5\u03ba': 'Dec', # c4e5ea in iso-8859-7
}
_greek_wdays = \
{ \
'\u039a\u03c5\u03c1': 'Sun', # caf5f1 in iso-8859-7
'\u0394\u03b5\u03c5': 'Mon', # c4e5f5 in iso-8859-7
'\u03a4\u03c1\u03b9': 'Tue', # d4f1e9 in iso-8859-7
'\u03a4\u03b5\u03c4': 'Wed', # d4e5f4 in iso-8859-7
'\u03a0\u03b5\u03bc': 'Thu', # d0e5ec in iso-8859-7
'\u03a0\u03b1\u03c1': 'Fri', # d0e1f1 in iso-8859-7
'\u03a3\u03b1\u03b2': 'Sat', # d3e1e2 in iso-8859-7
}
_greek_date_format_re = \
re.compile(r'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)')
def _parse_date_greek(dateString):
'''Parse a string according to a Greek 8-bit date format.'''
m = _greek_date_format_re.match(dateString)
if not m:
return
wday = _greek_wdays[m.group(1)]
month = _greek_months[m.group(3)]
rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \
{'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\
'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\
'zonediff': m.group(8)}
return _parse_date_rfc822(rfc822date)
| 1.28125 | 1 |
src/tests/test_bitfinex_algo.py | medvi/python-bitfinex_algo | 0 | 12799632 | <gh_stars>0
import logging
import unittest
from bitfinex_algo.cli import load_config, validate_config
from bitfinex_algo import cli as c
logger = logging.getLogger('bitfinex')
class ConfigTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
logging.disable(logging.CRITICAL)
@classmethod
def tearDownClass(cls):
logging.disable(logging.NOTSET)
def test_load_config(self):
self.assertIsNone(load_config('tests/config/invalid_config_1.yaml'))
self.assertDictEqual(
validate_config(load_config('tests/config/valid_config_5.yaml')),
{
c.LEVELS: [{
c.BUY_PRICE: 95,
c.SELL_PRICE: 100,
c.ORDER_SIZE: 100,
c.ORDER_COUNT: 2
}, {
c.BUY_PRICE: 100,
c.SELL_PRICE: 105,
c.ORDER_SIZE: 100,
c.ORDER_COUNT: 1
}],
c.UPDATE_FREQUENCY: 3,
}
)
def test_validate_config(self):
for i in range(2, 5):
with self.subTest(i=i):
config = load_config(f'tests/config/invalid_config_{i}.yaml')
self.assertIsNone(validate_config(config))
| 1.734375 | 2 |
ohsomeTools/OhsomeToolsPlugin.py | GIScience/ohsome-qgis-plugin | 3 | 12799640 | <reponame>GIScience/ohsome-qgis-plugin
# -*- coding: utf-8 -*-
"""
/***************************************************************************
ohsomeTools
A QGIS plugin
QGIS client to query the ohsome API
-------------------
begin : 2021-05-01
git sha : $Format:%H$
copyright : (C) 2021 by <NAME>
email : <EMAIL>
***************************************************************************/
This plugin provides access to the ohsome API (https://api.ohsome.org),
developed and maintained by the Heidelberg Institute for Geoinformation
Technology, HeiGIT gGmbH, Heidelberg, Germany.
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from qgis.core import QgsApplication
from .gui import OhsomeToolsDialog
from .proc import provider
class OhsomeTools:
"""QGIS Plugin Implementation."""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
self.dialog = OhsomeToolsDialog.OhsomeToolsDialogMain(iface)
self.provider = provider.OhsomeToolsProvider()
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
QgsApplication.processingRegistry().addProvider(self.provider)
self.dialog.initGui()
def unload(self):
"""remove menu entry and toolbar icons"""
QgsApplication.processingRegistry().removeProvider(self.provider)
self.dialog.unload()
| 1.023438 | 1 |
generalgui/properties/funcs.py | Mandera/generalgui | 1 | 12799648 | from generallibrary import getBaseClassNames, SigInfo, dict_insert, wrapper_transfer
def set_parent_hook(self, parent, _draw=True):
""" :param generalgui.MethodGrouper self:
:param generalgui.MethodGrouper parent: """
if _draw:
for part in self.get_children(depth=-1, include_self=True, gen=True):
part.draw_create()
assert "Contain" in getBaseClassNames(parent) or parent is None
class PartBaseClass:
def draw_create_hook(self, kwargs):
""" Used to decouple properties, called by draw_create which is called by init and set_parent. """
def draw_create_post_hook(self):
""" Called after widget is packed. """
def _deco_draw_queue(func):
""" Append one order to dict for this func call.
Creates a key with id of Part and func's name.
If key exists as an old order then it's removed.
Returns key unless draw_now is True. """
def _wrapper(*args, **kwargs):
sigInfo = SigInfo(func, *args, **kwargs)
methodGrouper = sigInfo["self"]
orders = methodGrouper.orders
key = methodGrouper.get_order_key(func)
if sigInfo["draw_now"]:
orders.pop(key, None) # This allows us to manually call draw_create(draw_now=True) after instantiating a Page instead of passing draw_now to Page.
sigInfo.call()
else:
orders[key] = sigInfo
return key
# Could possibly do something like this to skip queue instead of drawing instantly
# if sigInfo["draw_now"]:
# dict_insert(orders, **{key: sigInfo})
# else:
# orders[key] = sigInfo
return wrapper_transfer(func, _wrapper)
| 1.609375 | 2 |
utils/BaseFlags.py | cvsubmittemp/BraVL | 0 | 12799656 | <reponame>cvsubmittemp/BraVL
import os
import argparse
import torch
import scipy.io as sio
parser = argparse.ArgumentParser()
# TRAINING
parser.add_argument('--batch_size', type=int, default=512, help="batch size for training")
parser.add_argument('--initial_learning_rate', type=float, default=0.0001, help="starting learning rate")
parser.add_argument('--beta_1', type=float, default=0.9, help="default beta_1 val for adam")
parser.add_argument('--beta_2', type=float, default=0.999, help="default beta_2 val for adam")
parser.add_argument('--start_epoch', type=int, default=0, help="flag to set the starting epoch for training")
parser.add_argument('--end_epoch', type=int, default=100, help="flag to indicate the final epoch of training")
# DATA DEPENDENT
parser.add_argument('--class_dim', type=int, default=32, help="dimension of common factor latent space")
# SAVE and LOAD
parser.add_argument('--mm_vae_save', type=str, default='mm_vae', help="model save for vae_bimodal")
parser.add_argument('--load_saved', type=bool, default=False, help="flag to indicate if a saved model will be loaded")
# DIRECTORIES
# experiments
parser.add_argument('--dir_experiment', type=str, default='./logs', help="directory to save logs in")
parser.add_argument('--dataname', type=str, default='DIR-Wiki', help="dataset")
parser.add_argument('--sbj', type=str, default='sub-03', help="fmri subject")
parser.add_argument('--roi', type=str, default='LVC_HVC_IT', help="ROI")
parser.add_argument('--text_model', type=str, default='GPTNeo', help="text embedding model")
parser.add_argument('--image_model', type=str, default='pytorch/repvgg_b3g4', help="image embedding model")
parser.add_argument('--test_type', type=str, default='zsl', help='normal or zsl')
parser.add_argument('--aug_type', type=str, default='image_text', help='no_aug, image_text, image_only, text_only')
#multimodal
parser.add_argument('--method', type=str, default='joint_elbo', help='choose method for training the model')
parser.add_argument('--modality_jsd', type=bool, default=False, help="modality_jsd")
parser.add_argument('--modality_poe', type=bool, default=False, help="modality_poe")
parser.add_argument('--modality_moe', type=bool, default=False, help="modality_moe")
parser.add_argument('--joint_elbo', type=bool, default=False, help="modality_moe")
parser.add_argument('--poe_unimodal_elbos', type=bool, default=True, help="unimodal_klds")
parser.add_argument('--factorized_representation', action='store_true', default=False, help="factorized_representation")
# LOSS TERM WEIGHTS
parser.add_argument('--beta', type=float, default=0.0, help="default initial weight of sum of weighted divergence terms")
parser.add_argument('--beta_style', type=float, default=1.0, help="default weight of sum of weighted style divergence terms")
parser.add_argument('--beta_content', type=float, default=1.0, help="default weight of sum of weighted content divergence terms")
parser.add_argument('--lambda1', type=float, default=0.001, help="default weight of intra_mi terms")
parser.add_argument('--lambda2', type=float, default=0.001, help="default weight of inter_mi terms")
FLAGS = parser.parse_args()
data_dir_root = os.path.join('./data', FLAGS.dataname)
brain_dir = os.path.join(data_dir_root, 'brain_feature', FLAGS.roi, FLAGS.sbj)
image_dir_train = os.path.join(data_dir_root, 'visual_feature/ImageNetTraining', FLAGS.image_model+'-PCA', FLAGS.sbj)
text_dir_train = os.path.join(data_dir_root, 'textual_feature/ImageNetTraining/text', FLAGS.text_model, FLAGS.sbj)
train_brain = sio.loadmat(os.path.join(brain_dir, 'fmri_train_data.mat'))['data'].astype('double')
train_image = sio.loadmat(os.path.join(image_dir_train, 'feat_pca_train.mat'))['data'].astype('double')#[:,0:3000]
train_text = sio.loadmat(os.path.join(text_dir_train, 'text_feat_train.mat'))['data'].astype('double')
train_brain = torch.from_numpy(train_brain)
train_image = torch.from_numpy(train_image)
train_text = torch.from_numpy(train_text)
dim_brain = train_brain.shape[1]
dim_image = train_image.shape[1]
dim_text = train_text.shape[1]
parser.add_argument('--m1_dim', type=int, default=dim_brain, help="dimension of modality brain")
parser.add_argument('--m2_dim', type=int, default=dim_image, help="dimension of modality image")
parser.add_argument('--m3_dim', type=int, default=dim_text, help="dimension of modality text")
parser.add_argument('--data_dir_root', type=str, default=data_dir_root, help="data dir")
FLAGS = parser.parse_args()
print(FLAGS)
| 1.710938 | 2 |
debug.py | Epsilon-Lee/OpenNMT-V1 | 7 | 12799664 | <filename>debug.py
import torch
import torch.nn as nn
import onmt
from onmt.BleuCal import fetch_data
import sys
if torch.cuda.is_available():
torch.cuda.set_device(3)
checkpoint = torch.load('../Models/V1_IWSLT_Models/de2en_30k_bz64_bc5_bleu_26.06_e24.pt')
opt = checkpoint['opt']
# del(checkpoint)
opt.cuda = True
srcData, references = fetch_data('IWSLT/test.de.small.tok', 'IWSLT/test.en.small.tok')
encoder = onmt.Models.Encoder(opt, checkpoint['dicts']['src'])
decoder = onmt.Models.Decoder(opt, checkpoint['dicts']['tgt'])
model = onmt.Models.NMTModel(encoder, decoder)
model.load_state_dict(checkpoint['model'])
generator = nn.Sequential(
nn.Linear(opt.rnn_size, checkpoint['dicts']['tgt'].size()),
nn.LogSoftmax())
model.generator = generator
model.cuda()
opt.model = '../Models/V1_IWSLT_Models/de2en_30k_bz64_bc5_bleu_26.06_e24.pt'
translator = onmt.Translator(opt, model, checkpoint['dicts']['src'], checkpoint['dicts']['tgt'])
srcBatch, tgtBatch, candidate = [], [], []
lenSrcData = len(srcData)
for i, line in enumerate(srcData):
sys.stdout.write('\r')
sys.stdout.write("%s" % (str(i) + ' of ' + str(lenSrcData)))
sys.stdout.flush()
srcTokens = line.split()
srcBatch += [srcTokens]
if (i + 1) % opt.trans_batch_size == 0:
predBatch, _, _ = translator.translate(srcBatch, tgtBatch)
print 'predBatch:', len(predBatch)
for b in range(len(predBatch)):
candidate += [" ".join(predBatch[b][0]) + '\n']
srcBatch = []
elif (i + 1) == lenSrcData:
predBatch, _, _ = translator.translate(srcBatch, tgtBatch)
print 'predBatch:', len(predBatch)
for b in range(len(predBatch)):
candidate += [" ".join(predBatch[b][0]) + '\n']
srcBatch = []
else:
continue
print 'candidate length:', len(candidate)
print 'referece length', len(references[0]) | 1.515625 | 2 |
genestack_client/genestack_exceptions.py | genestack/python-client | 2 | 12799672 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
from urllib.error import URLError
MASTER_BRANCH = 'https://github.com/genestack/python-client/archive/master.zip'
PYPI_PACKAGE = 'genestack-client'
class GenestackBaseException(Exception):
"""
Base class for Genestack exceptions.
Use it to catch all exceptions raised explicitly by Genestack Python Client.
"""
pass
class GenestackException(GenestackBaseException):
"""
Client-side exception class.
Raise its instances (instead of :py:class:`~exceptions.Exception`)
if anything is wrong on client side.
"""
pass
class GenestackServerException(GenestackException):
"""
Server-side exception class.
Raised when Genestack server returns an error response
(error message generated by Genestack Java code, not an HTTP error).
"""
def __init__(self, message, path, post_data, debug=False, stack_trace=None):
"""
:param message: exception message
:type message: str
:param path: path after server URL of connection.
:type path: str
:param post_data: POST data (file or dict)
:type debug: bool
:param debug: flag if stack trace should be printed
:param stack_trace: server stack trace
:type stack_trace: str
"""
message = (message.decode('utf-8', 'ignore')
if isinstance(message, bytes) else message)
GenestackException.__init__(self, message, path, post_data, debug, stack_trace)
self.message = message
self.debug = debug
self.stack_trace = stack_trace
self.path = path
self.post_data = post_data
def __str__(self):
if isinstance(self.post_data, dict):
message = 'Got error "%s" at call of method "%s" of "%s"' % (
self.message,
self.post_data.get('method', '<unknown>'),
self.path
)
else:
# upload file
message = 'Got error "%s" at call of "%s"' % (
self.message,
self.path
)
if self.stack_trace:
if self.debug:
message += '\nStacktrace from server is:\n%s' % self.stack_trace
else:
message += '\nEnable debug option to retrieve traceback'
return message
class GenestackResponseError(GenestackBaseException, URLError):
"""
Wrapper for HTTP response errors.
Extends :py:class:`urllib2.URLError` for backward compatibility.
"""
def __init__(self, reason):
self.args = reason,
self.reason = reason
def __str__(self):
return '<urlopen error %s>' % self.reason
class GenestackConnectionFailure(GenestackBaseException, URLError):
"""
Wrapper for server connection failures.
Extends :py:class:`urllib2.URLError` for backward compatibility.
"""
def __init__(self, message):
self.message = "<connection failed %s>" % message
def __str__(self):
return self.message
class GenestackAuthenticationException(GenestackException):
"""
Exception thrown on an authentication error response from server.
"""
pass
class GenestackVersionException(GenestackException):
"""
Exception thrown if server requires a newer version on Python Client.
"""
def __init__(self, current_version, required_version=None):
"""
:param current_version: current version
:type current_version: distutils.version.StrictVersion
:param required_version: minimum required version
:type required_version: distutils.version.StrictVersion
"""
if required_version:
package = MASTER_BRANCH if required_version.prerelease else PYPI_PACKAGE
message = (
'Your Genestack Client version "{current_version}" is too old, '
'at least "{required_version}" is required.\n'
).format(current_version=current_version, required_version=required_version)
else:
package = PYPI_PACKAGE
message = 'Cannot get required version from server.\n'
message += (
'You can update client with the following command:\n'
' pip install {package} --upgrade'
).format(package=package)
super(GenestackVersionException, self).__init__(message)
| 1.515625 | 2 |
ServiceRelationExtraction/relationExtractService.py | black938/RelationExtractionProject | 0 | 12799680 | from concurrent import futures
import grpc
import relationExtractService_pb2
import relationExtractService_pb2_grpc
import tools
class relationExtractService(relationExtractService_pb2_grpc.relationExtractServiceServicer):
def ExtractTriple(self,request,context):
sentence = request.sentence
triples = tools.extract_items(sentence)
response = relationExtractService_pb2.relationExtractResponse()
for triple in triples:
data = response.triples.add()
data.sub=triple[0]
data.pred=triple[1]
data.obj=triple[2]
return response
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=8))
relationExtractService_pb2_grpc.add_relationExtractServiceServicer_to_server(relationExtractService(),server)
server.add_insecure_port("[::]:4232")
server.start()
server.wait_for_termination()
if __name__ == '__main__':
serve()
| 1.171875 | 1 |
tools/objects/object_gen.py | andyc655/gunyah-hypervisor | 61 | 12799688 | <gh_stars>10-100
#!/usr/bin/env python3
#
# © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from Cheetah.Template import Template
import argparse
import subprocess
import sys
class Object:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def type_enum(self):
return "OBJECT_TYPE_{:s}".format(self.name.upper())
def rcu_destroy_enum(self):
return "RCU_UPDATE_CLASS_{:s}_DESTROY".format(self.name.upper())
def main():
args = argparse.ArgumentParser()
mode_args = args.add_mutually_exclusive_group(required=True)
mode_args.add_argument('-t', '--template',
type=argparse.FileType('r', encoding="utf-8"),
help="Template file used to generate output")
args.add_argument('-o', '--output',
type=argparse.FileType('w', encoding="utf-8"),
default=sys.stdout, help="Write output to file")
args.add_argument("-f", "--formatter",
help="specify clang-format to format the code")
args.add_argument('input', metavar='INPUT', nargs='+', action='append',
help="List of objects to process")
options = args.parse_args()
object_list = [Object(o) for group in options.input for o in group]
output = "// Automatically generated. Do not modify.\n"
output += "\n"
ns = {'object_list': object_list}
output += str(Template(file=options.template, searchList=ns))
if options.formatter:
ret = subprocess.run([options.formatter], input=output.encode("utf-8"),
stdout=subprocess.PIPE)
output = ret.stdout.decode("utf-8")
if ret.returncode != 0:
raise Exception("failed to format output:\n ", ret.stderr)
options.output.write(output)
if __name__ == '__main__':
main()
| 1.828125 | 2 |
helper.py | Ellectronx/wsb-oceny | 5 | 12799696 |
import smtplib
from email.message import EmailMessage
from credent import secret
tb_headers=["id","przedmiot","wykladowca","forma_zaliczenia","rodz_zajec","ocena1","data1","ocena2","data2"]
def sendEmail(subject,eml_from,eml_to,message):
msg = EmailMessage()
msg.set_content(message)
msg['Subject'] = subject
msg['From'] = eml_from
msg['To'] = eml_to
# Send the message via SMTP server.
print("SENDING INFO EMAIL...")
try:
server = smtplib.SMTP(secret["smtp_host"], secret["smtp_port"])
server.ehlo()
server.login(secret["smtp_login"], secret["smtp_password"])
server.send_message(msg)
server.quit()
print("SENDING OK!")
except:
#raise
print("...sending email: somethin went wrong:(")
def preetyGrade(grade):
if grade=="-":
return "brak"
else:
return str(grade)
def compareT(T1,T2): #T1,T2 krotka z wierszem z bazy danych (wiersz tabeli z ocenami starymi/nowymi)
lenT1 = len(T1)
lenT2 = len(T2)
if lenT1!=9 and lenT2!=9:
return {"private":"Błąd E1. Nieodpowiednia ilość kolumn. Być może zmeniła się struktura strony źródłowej ?!","public":""}
if lenT2 > lenT1 and lenT1==0:
return {"private":"Dopisano nowy przedmiot: "+T2[1],"public":""}
if lenT1 == lenT2 and lenT1 == 9:
zm=""
L = len(T1)
for i in range(0,L):
if(T1[i]!=T2[i]):
zm = zm +"\r\nZmiana "+tb_headers[i]+" z "+preetyGrade(T1[i])+" na "+preetyGrade(T2[i])+", "
if len(zm)>1:
zm = zm[:-2]
return {"private":"Przedmiot: "+T1[1]+" ("+T1[3]+", "+T1[2]+")"+zm, "public":"Możliwa nowe oceny z przedmiotu: "+T1[1]+" ("+T1[3]+", "+T1[2]+") [powiadomienie automatyczne, grupa WZ_INiN3_PG2]"}
return {"private":"Nieokreślony błąd. Być moze załadowane zostały przedmioty z nowego semestru lub zmeniła się struktura strony źródłowej ?!","public":""}
| 1.992188 | 2 |
monet/visualize/force.py | flo-compbio/monet | 39 | 12799704 | <gh_stars>10-100
# Author: <NAME> <<EMAIL>>
# Copyright (c) 2021 <NAME>
#
# This file is part of Monet.
from typing import Tuple
import pandas as pd
import scanpy.tl as tl
import scanpy.pp as pp
import plotly.graph_objs as go
from ..core import ExpMatrix
from ..latent import PCAModel
from .cells import plot_cells
def force_plot(
matrix: ExpMatrix,
num_components: int = 50,
transform_name: str = 'freeman-tukey',
pca_model: PCAModel = None,
**kwargs) -> Tuple[go.Figure, pd.DataFrame]:
if pca_model is None:
pca_model = PCAModel(num_components=num_components, transform_name=transform_name)
pc_scores = pca_model.fit_transform(matrix)
else:
pc_scores = pca_model.transform(matrix)
adata = ExpMatrix(pc_scores.T).to_anndata()
adata.obsm['pc_scores'] = pc_scores.values
# determine nearest-neighbors
pp.neighbors(adata, use_rep='pc_scores')
tl.draw_graph(adata)
Y = adata.obsm['X_draw_graph_fa']
scores = pd.DataFrame(
index=adata.obs_names, columns=['Dim. 1', 'Dim. 2'], data=Y)
fig = plot_cells(scores, **kwargs)
return fig, scores
| 1.914063 | 2 |
pyppeteer_stealth/__init__.py | ramiezer2/pyppeteer_stealth | 1 | 12799712 | <reponame>ramiezer2/pyppeteer_stealth
from pyppeteer.page import Page
from .chrome_app import chrome_app
from .chrome_runtime import chrome_runtime
from .iframe_content_window import iframe_content_window
from .media_codecs import media_codecs
from .sourceurl import sourceurl
from .navigator_hardware_concurrency import navigator_hardware_concurrency
from .navigator_languages import navigator_languages
from .navigator_permissions import navigator_permissions
from .navigator_plugins import navigator_plugins
from .navigator_vendor import navigator_vendor
from .navigator_webdriver import navigator_webdriver
from .user_agent_override import user_agent_override
from .utils import with_utils
from .webgl_vendor import webgl_vendor
from .window_outerdimensions import window_outerdimensions
async def stealth(page: Page, **kwargs) -> None:
if not isinstance(page, Page):
raise ValueError("page must be pyppeteer.page.Page")
await with_utils(page, **kwargs)
await chrome_app(page, **kwargs)
await chrome_runtime(page, **kwargs)
await iframe_content_window(page, **kwargs)
await media_codecs(page, **kwargs)
await sourceurl(page, **kwargs)
await navigator_hardware_concurrency(page, **kwargs)
await navigator_languages(page, **kwargs)
await navigator_permissions(page, **kwargs)
await navigator_plugins(page, **kwargs)
await navigator_vendor(page, **kwargs)
await navigator_webdriver(page, **kwargs)
await user_agent_override(page, **kwargs)
await webgl_vendor(page, **kwargs)
await window_outerdimensions(page, **kwargs)
| 0.800781 | 1 |
clickatell/__init__.py | Kapooral/clickatell-python | 9 | 12799720 | import httplib2
import urllib
import json
import re
import sys
class Transport:
"""
Abstract representation of a transport class. Defines
the supported API methods
"""
endpoint = "platform.clickatell.com"
def __init__(self):
"""
Construct a new transportation instance.
:param boolean secure: Should we try and use a secure connection
"""
pass
def merge(self, *args):
"""
Merge multiple dictionary objects into one.
:param variadic args: Multiple dictionary items
:return dict
"""
values = []
for entry in args:
values = values + list(entry.items())
return dict(values)
def parseResponse(self, response):
"""
Parse the response from json.
Remapping error code and messages to be a level higher
"""
response['body'] = json.loads(response['body'])
response['messages'] = response['body']['messages']
response['error'] = response['body']['error']
del response['body']
return response
def request(self, action, data={}, headers={}, method='GET'):
"""
Run the HTTP request against the Clickatell API
:param str action: The API action
:param dict data: The request parameters
:param dict headers: The request headers (if any)
:param str method: The HTTP method
:return: The request response
"""
http = httplib2.Http()
body = urllib.urlencode(data) if (sys.version_info[0] < 3) else urllib.parse.urlencode(data)
url = 'https://' + self.endpoint + '/' + action
url = (url + '?' + body) if (method == 'GET') else url
resp, content = http.request(url, method, headers=headers, body=json.dumps(data))
return self.merge(resp, {'body': content})
def sendMessage(self, to, message, extra={}):
"""
Send a message.
:param list to: The number you want to send to (list of strings, or one string)
:param string message: The message you want to send
:param dict extra: Any extra parameters (see Clickatell documentation)
:return dict
:raises NotImplementedError
"""
raise NotImplementedError() | 2.296875 | 2 |
examples/nv-hpc-sdk_test.py | owainkenwayucl/gepy | 0 | 12799728 | <gh_stars>0
#/usr/bin/env python3
# This script does a test of a particular nvidia compiler on Myriad
import sys
import os
import copy
import time
import gepy
import gepy.executor
compiler_module = 'compilers/nvhpc/21.11'
repo = 'https://github.com/UCL-RITS/pi_examples.git'
if (len(sys.argv) > 1):
compiler_module = sys.argv[1]
print('Generating job scripts for compiler module: ' + compiler_module)
template_job = gepy.job(name='nvtest')
template_job.modules.append('personal-modules')
template_job.modules.append('testing-modules')
template_job.modules.append(compiler_module)
template_job.add_resource('gpu','1')
template_job.set_node_classes('EFL')
tmp_dir = 'nvtest_'+str(time.time())
os.mkdir(tmp_dir)
template_job.location=os.getcwd() + '/' + tmp_dir
status = gepy.executor.run(['git', 'clone', repo, tmp_dir + '/pi_examples'])
if (status.returncode != 0):
sys.exit('Error cloning repo: ' + status.stderr)
template_job.workload.append(gepy.serial_command('cd ', ['pi_examples']))
# Right, that's the repo cloned and a template job created.
doconc_job = copy.deepcopy(template_job)
cudaf_job = copy.deepcopy(template_job)
openmp_job = copy.deepcopy(template_job)
openacc_job = copy.deepcopy(template_job)
# do concurrent test
doconc_job.workload.append(gepy.serial_command('cd ', ['fortran_do_concurrent_pi_dir']))
doconc_job.workload.append(gepy.serial_command('make ', ['clean']))
doconc_job.workload.append(gepy.serial_command('make ', ['nvhpc']))
doconc_job.workload.append(gepy.serial_command('./pi', []))
doconc_job.name = template_job.name + 'doconc'
# cuda fortran test
cudaf_job.workload.append(gepy.serial_command('cd ', ['cudafortran_pi_dir']))
cudaf_job.workload.append(gepy.serial_command('make ', ['clean']))
cudaf_job.workload.append(gepy.serial_command('make ', []))
cudaf_job.workload.append(gepy.serial_command('./pi', []))
cudaf_job.name = template_job.name + 'cudaf'
# openmp fortran test
openmp_job.workload.append(gepy.serial_command('cd ', ['fortran_omp_pi_dir']))
openmp_job.workload.append(gepy.serial_command('make ', ['clean']))
openmp_job.workload.append(gepy.serial_command('make ', ['nvhpc_offload']))
openmp_job.workload.append(gepy.serial_command('./pi_gpu', []))
openmp_job.name = template_job.name + 'openmp'
# openacc fortran test
openacc_job.workload.append(gepy.serial_command('cd ', ['fortran_openacc_pi_dir']))
openacc_job.workload.append(gepy.serial_command('make ', ['clean']))
openacc_job.workload.append(gepy.serial_command('make ', ['-f' 'Makefile.myriad', 'pi']))
openacc_job.workload.append(gepy.serial_command('./pi', []))
openacc_job.name = template_job.name + 'openacc'
print('Submitting jobs')
j,t = gepy.executor.qsub(doconc_job.get_job_script())
j,t = gepy.executor.qsub(cudaf_job.get_job_script())
j,t = gepy.executor.qsub(openmp_job.get_job_script())
j,t = gepy.executor.qsub(openacc_job.get_job_script())
print('Done') | 1.609375 | 2 |
assignments/assignment2/layers.py | INeedTractorPlz/dlcourse_ai | 1 | 12799736 | <gh_stars>1-10
import numpy as np
from math import exp, log
def l2_regularization(W, reg_strength):
"""
Computes L2 regularization loss on weights and its gradient
Arguments:
W, np array - weights
reg_strength - float value
Returns:
loss, single value - l2 regularization loss
gradient, np.array same shape as W - gradient of weight by l2 loss
"""
# TODO: Copy from the previous assignment
loss = reg_strength*sum(sum(W**2));
grad = reg_strength*2*W;
return loss, grad
def cross_entropy_loss(probs, target_index):
'''
Computes cross-entropy loss
Arguments:
probs, np array, shape is either (N) or (batch_size, N) -
probabilities for every class
target_index: np array of int, shape is (1) or (batch_size) -
index of the true class for given sample(s)
Returns:
loss: single value
'''
# TODO implement cross-entropy
#print("probs:", probs);
return -log(probs[target_index - 1]);
def softmax_with_cross_entropy(predictions, target_index):
"""
Computes softmax and cross-entropy loss for model predictions,
including the gradient
Arguments:
predictions, np array, shape is either (N) or (N, batch_size) -
classifier output
target_index: np array of int, shape is (1) or (batch_size) -
index of the true class for given sample(s)
Returns:
loss, single value - cross-entropy loss
dprediction, np array same shape as predictions - gradient of predictions by loss value
"""
# TODO: Copy from the previous assignment
# TODO implement softmax with cross-entropy
#One-dimension option
if predictions.ndim == 1:
predictions_ = predictions - np.max(predictions);
dprediction = np.array(list(map(exp, predictions_)));
summ = sum(dprediction);
dprediction /= summ;
loss = cross_entropy_loss(dprediction, target_index);
dprediction[target_index - 1] -= 1;
return loss, dprediction;
else:
predictions_ = predictions - np.max(predictions, axis = 1)[:, np.newaxis];
exp_vec = np.vectorize(exp);
#print("predictions_:", predictions_);
dprediction = np.apply_along_axis(exp_vec, 1, predictions_);
#print("dprediction before division: ", dprediction);
summ = sum(dprediction.T);
#print("summ: ", summ);
dprediction /= summ[:, np.newaxis];
#print("dprediction after division: ", dprediction);
loss = np.array([cross_entropy_loss(x,y) for x,y in zip(dprediction, target_index)]);
#print("loss: ", loss);
#print("target_index - 1:", target_index - 1);
it = np.nditer(target_index - 1, flags = ['c_index'] )
while not it.finished:
#print("it[0] = ", it[0]);
dprediction[it.index, it[0]] -= 1
it.iternext()
dprediction /= len(target_index);
#print("dprediction after subtraction: ", dprediction);
return loss.mean(), dprediction;
raise Exception("Not implemented!")
class Param:
"""
Trainable parameter of the model
Captures both parameter value and the gradient
"""
def __init__(self, value):
#self.init = value.copy();
self.value = value;
self.grad = np.zeros_like(value);
class ReLULayer:
def __init__(self):
self.X = None
def forward(self, X):
# TODO: Implement forward pass
# Hint: you'll need to save some information about X
# to use it later in the backward pass
self.X = X;
return (X > 0)*X;
def backward(self, d_out):
"""
Backward pass
Arguments:
d_out, np array (batch_size, num_features) - gradient
of loss function with respect to output
Returns:
d_result: np array (batch_size, num_features) - gradient
with respect to input
"""
# TODO: Implement backward pass
# Your final implementation shouldn't have any loops
return (self.X > 0)*d_out;
def params(self):
# ReLU Doesn't have any parameters
return {}
class FullyConnectedLayer:
def __init__(self, n_input, n_output):
self.W = Param(0.01 * np.random.randn(n_input, n_output))
self.B = Param(0.01 * np.random.randn(1, n_output))
self.X = None
def forward(self, X):
# TODO: Implement forward pass
# Your final implementation shouldn't have any loops
self.X = X;
#if np.any(self.W.init != self.W.value) or np.any(self.B.init != self.B.value):
self.W.grad = np.zeros_like(self.W.value);
self.B.grad = np.zeros_like(self.B.value);
# self.W.init = self.W.value;
# self.B.init = self.B.value;
return np.dot(self.X, self.W.value) + self.B.value;
def backward(self, d_out):
"""
Backward pass
Computes gradient with respect to input and
accumulates gradients within self.W and self.B
Arguments:
d_out, np array (batch_size, n_output) - gradient
of loss function with respect to output
Returns:
d_result: np array (batch_size, n_input) - gradient
with respect to input
"""
# TODO: Implement backward pass
# Compute both gradient with respect to input
# and gradients with respect to W and B
# Add gradients of W and B to their `grad` attribute
# It should be pretty similar to linear classifier from
# the previous assignment
dW = np.dot(self.X.T, d_out);
dB = np.dot(np.ones((1, d_out.shape[0])), d_out);
d_input = np.dot(d_out, self.W.value.T);
self.W.grad += dW;
self.B.grad += dB;
return d_input;
def params(self):
return {'W': self.W, 'B': self.B}
| 2.578125 | 3 |
integrators.py | marghetis/npde | 37 | 12799744 | <reponame>marghetis/npde
import numpy as np
import tensorflow as tf
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import ops
from abc import ABC, abstractmethod
float_type = tf.float64
class Integrator(ABC):
""" Base class for integrators
"""
def __init__(self,model):
self.model= model
@abstractmethod
def forward(self):
pass
@abstractmethod
def _step_func(self):
pass
@abstractmethod
def _make_scan_func(self):
pass
class ODERK4(Integrator):
""" Runge-Kutta implementation for solving ODEs
"""
def __init__(self,model):
super().__init__(model)
def forward(self,x0,ts):
Nt = x0.shape[0]
Xs = np.zeros(Nt,dtype=np.object)
for i in range(Nt):
time_grid = ops.convert_to_tensor(ts[i], preferred_dtype=float_type, name='t')
y0 = ops.convert_to_tensor(x0[i,:].reshape((1,-1)), name='y0')
time_delta_grid = time_grid[1:] - time_grid[:-1]
scan_func = self._make_scan_func(self.model.f)
y_grid = functional_ops.scan(scan_func, (time_grid[:-1],time_delta_grid), y0)
y_s = array_ops.concat([[y0], y_grid], axis=0)
Xs[i] = tf.reshape(tf.squeeze(y_s),[len(ts[i]),self.model.D])
return Xs
def _step_func(self,f,dt,t,y):
dt = math_ops.cast(dt, y.dtype)
k1 = f(y, t)
k2 = f(y + dt*k1/2, t+dt/2)
k3 = f(y + dt*k2/2, t+dt/2)
k4 = f(y + dt*k3, t+dt)
return math_ops.add_n([k1, 2*k2, 2*k3, k4]) * (dt / 6)
def _make_scan_func(self,f):
def scan_func(y, t_dt):
t, dt = t_dt
dy = self._step_func(f, dt, t, y)
dy = math_ops.cast(dy, dtype=y.dtype)
return y + dy
return scan_func
class SDEEM(Integrator):
""" Euler-Maruyama implementation for solving SDEs
dx = f(x)*dt + g*sqrt(dt)
"""
def __init__(self,model,s=1):
super().__init__(model)
self.s = s
def forward(self,x0,ts,Nw=1):
Xs = np.zeros(len(ts),dtype=np.object)
for i in range(len(ts)):
t = np.linspace(0,np.max(ts[i]),(len(ts[i])-1)*self.s+1)
t = np.unique(np.sort(np.hstack((t,ts[i]))))
idx = np.where( np.isin(t,ts[i]) )[0]
t = np.reshape(t,[-1,1])
time_grid = ops.convert_to_tensor(t, preferred_dtype=float_type, name='t')
time_delta_grid = time_grid[1:] - time_grid[:-1]
y0 = np.repeat(x0[i,:].reshape((1,-1)),Nw,axis=0)
y0 = ops.convert_to_tensor(y0, name='y0')
scan_func = self._make_scan_func(self.model.f,self.model.diffus.g)
y_grid = functional_ops.scan(scan_func, (time_grid[:-1],time_delta_grid), y0)
ys = array_ops.concat([[y0], y_grid], axis=0)
Xs[i] = tf.transpose(tf.gather(ys,idx,axis=0),[1,0,2])
return Xs
def _step_func(self,f,g,t,dt,x):
dt = math_ops.cast(dt, x.dtype)
return f(x,t)*dt + g(x,t)*tf.sqrt(dt)
def _make_scan_func(self,f,g):
def scan_func(y, t_dt):
t,dt = t_dt
dy = self._step_func(f,g,t,dt,y)
dy = math_ops.cast(dy, dtype=y.dtype)
return y + dy
return scan_func | 1.679688 | 2 |
hangoutswordcount/hangoutswordcount.py | pinaky/utilities | 0 | 12799752 | # This program reads in a Google Hangouts JSON file and produces a wordcount
# Author: <NAME>
import json # JSON to handle Google's format
import re # regular expressions
# CHANGE THIS. For linux/mac, use '/home/user/restofpath/'
basepath = 'C:\\Users\\Pinaky\\Desktop\\cesmd\\gmail_hangout\\'
# INPUT: This is the input file path
jsonPath = basepath + 'Hangouts.json'
# OUTPUT: These are the output file paths. dict = sorted alphabetical; freq = sorted by frequency
mainDictPath = basepath + 'hangoutdict.txt'
mainFreqPath = basepath + 'hangoutfreq.txt'
# This is the path to a temporary intermediate file
tempPath = basepath + 'hangouttemp.txt'
# Read in the JSON file
jsonFile = open(jsonPath, 'r', encoding='utf8')
outFile = open(tempPath,'w', encoding='utf8')
# 'p' is the variable that contains all the data
p = json.load(jsonFile)
c = 0 # Count the number of chat messages
# This loops through Google's weird JSON format and picks out the chat text
for n in p['conversation_state']:
for e in n['conversation_state']['event']:
if 'chat_message' in e:
x = e['chat_message']['message_content']
if 'segment' in x:
xtype = x['segment'][0]['type']
xtext = x['segment'][0]['text'] + u" "
if xtype == u'TEXT':
# Write out the chat text to an intermediate file
outFile.write(xtext)
c += 1
print(u'Total number of chats: {0:d}'.format(c))
jsonFile.close()
outFile.close()
# The intermediate file has been written
# Now, run a wordcount
# Read in the intermediate file
inFile = open(tempPath,'r', encoding='utf8')
s = inFile.readlines()
inFile.close()
wordcount={} # The dictionary for wordcount
for l in range(len(s)):
line = s[l].lower().strip() # strip unnecessary white space
line = re.sub(u'[^A-Za-z]+', u' ', line) # keep only alphabets and remove the rest
for word in line.split():
if word not in wordcount:
wordcount[word] = 1
else:
wordcount[word] += 1
# Sort the wordcount like a dictionary and write to file
outFile = open(mainDictPath, 'w', encoding='utf8')
for k,v in sorted(wordcount.items()):
outFile.write(str(k))
outFile.write(u' ')
outFile.write(str(v))
outFile.write(u'\n')
outFile.close()
# Sort the wordcount in descending order of frequency and write to file
outFile = open(mainFreqPath, 'w', encoding='utf8')
for k, v in sorted(wordcount.items(), key=lambda w: w[1], reverse=True):
outFile.write(str(k))
outFile.write(u' ')
outFile.write(str(v))
outFile.write(u'\n')
outFile.close()
| 2.453125 | 2 |
jenkins/modules/jjb_afs/jjb_afs/afs.py | cwolferh/project-config | 0 | 12799760 | <reponame>cwolferh/project-config
# Copyright 2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from jenkins_jobs.errors import MissingAttributeError
def afs_publisher(parser, xml_parent, data):
for attr in ['site', 'source', 'target']:
if attr not in data:
raise MissingAttributeError(attr)
| 0.875 | 1 |
MagniPy/MassModels/Sersic.py | dangilman/MagniPy | 2 | 12799768 | import numpy as np
class Sersic:
def b(self,n):
return 1.9992*n - 0.3271 + 4*(405*n)**-1
def kappa(self,x, y, n_sersic, r_eff, k_eff, q, center_x=0, center_y=0):
bn = self.b(n_sersic)
r = (x**2+y**2*q**-2)**0.5
return k_eff*np.exp(-bn*((r*r_eff**-1)**(n_sersic**-1)-1))
| 1.734375 | 2 |
python3/leetcodepy/regular_expression_matching.py | qianbinbin/leetcode | 4 | 12799776 | """
Given an input string (s) and a pattern (p), implement regular expression matching with support for '.' and '*' where:
'.' Matches any single character.
'*' Matches zero or more of the preceding element.
The matching should cover the entire input string (not partial).
Example 1:
Input: s = "aa", p = "a"
Output: false
Explanation: "a" does not match the entire string "aa".
Example 2:
Input: s = "aa", p = "a*"
Output: true
Explanation: '*' means zero or more of the preceding element, 'a'. Therefore, by repeating 'a' once, it becomes "aa".
Example 3:
Input: s = "ab", p = ".*"
Output: true
Explanation: ".*" means "zero or more (*) of any character (.)".
Example 4:
Input: s = "aab", p = "c*a*b"
Output: true
Explanation: c can be repeated 0 times, a can be repeated 1 time. Therefore, it matches "aab".
Example 5:
Input: s = "mississippi", p = "mis*is*p*."
Output: false
Constraints:
0 <= s.length <= 20
0 <= p.length <= 30
s contains only lowercase English letters.
p contains only lowercase English letters, '.', and '*'.
It is guaranteed for each appearance of the character '*', there will be a previous valid character to match.
"""
class Solution1:
def __match(self, s: str, p: str) -> bool:
if p[0] == '\0':
return s[0] == '\0'
if s[0] == '\0':
return p[1] == '*' and self.isMatch(s, p[2:])
if p[1] == '*':
if p[0] == '.' or p[0] == s[0]:
return self.__match(s, p[2:]) or self.__match(s[1:], p)
return self.__match(s, p[2:])
return (p[0] == '.' or p[0] == s[0]) and self.__match(s[1:], p[1:])
def isMatch(self, s: str, p: str) -> bool:
return self.__match(s + '\0', p + '\0')
class Solution2:
def isMatch(self, s: str, p: str) -> bool:
m, n = len(s), len(p)
if n == 0:
return m == 0
dp = [[False] * (n + 1) for _ in range(m + 1)]
dp[0][0] = True
for j in range(2, n + 1, 2):
if p[j - 1] == '*':
dp[0][j] = True
else:
break
if m > 0:
dp[1][1] = p[0] == '.' or p[0] == s[0]
for i in range(1, m + 1):
for j in range(2, n + 1):
if p[j - 1] != '*':
dp[i][j] = (p[j - 1] == '.' or p[j - 1] == s[i - 1]) and dp[i - 1][j - 1]
else:
dp[i][j] = dp[i][j - 2] or ((p[j - 2] == '.' or p[j - 2] == s[i - 1]) and dp[i - 1][j])
return dp[m][n]
| 3.546875 | 4 |
firmware/bait/git_rev_macro.py | dkadish/BioAcousticIndexTool | 1 | 12799784 | <gh_stars>1-10
#!/Users/davk/anaconda/envs/platformio_setup python
import subprocess
revision = subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()
print('-DPIO_SRC_REV="%s"' % revision) | 0.84375 | 1 |
.k8s/scripts/delete-k8s-objects.py | fossabot/code-du-travail-numerique | 0 | 12799792 | <gh_stars>0
from subprocess import check_output
import hashlib
import os
import json
from urllib import request
# This script compares the active remote branches and active k8s tags.
# If a k8s tag doesn't match an active hashed remote branches name's, we delete all the k8s objects with this k8s tag.
github_token = os.environ["GITHUB_TOKEN"]
hash_size = int(os.environ["HASH_SIZE"])
def get_active_branches():
url = "https://api.github.com/repos/SocialGouv/code-du-travail-numerique/pulls".format(github_token)
req = request.Request(url, None, {"token": github_token})
response = request.urlopen(req)
active_branches = [branch.get("head").get("ref").encode() for branch in json.loads(response.read())]
return [
hashlib.sha1(branche).hexdigest()[:hash_size]
for branche in active_branches
]
def get_active_k8s_tags():
raw_k8s_tag_list = check_output("kubectl get pods -o go-template --template '{{range .items}}{{.metadata.labels.branch}}{{end}}'", shell=True).decode("utf-8")
k8s_tag_list = raw_k8s_tag_list.replace('<no value>','').split('cdtn-')
return [
k8s_tag
for k8s_tag in k8s_tag_list if k8s_tag
]
def delete_k8s_object(label):
k8s_object_list = ["service", "ingress", "configmap", "deployments", "statefulset", "pod"]
for k8s_object in k8s_object_list:
command_to_delete_k8s_object = ('kubectl delete '+ k8s_object +' --selector branch=cdtn-'+label)
check_output(command_to_delete_k8s_object, shell=True)
def get_k8s_tag_to_delete(active_k8s_tag_list=[], active_branch_list=[]):
k8s_tag_list_to_delete = []
active_tags = [
tag for tag in active_k8s_tag_list if tag != ""
]
deletable_tags = [
tag
for tag in active_tags
if tag not in active_branch_list
]
for tag in deletable_tags:
k8s_tag_list_to_delete.append(tag)
return k8s_tag_list_to_delete
if __name__ == '__main__':
for k8s_tag_to_delete in get_k8s_tag_to_delete(get_active_k8s_tags(), get_active_branches()):
delete_k8s_object(k8s_tag_to_delete)
print('k8s objects with label branch=cdtn-'+k8s_tag_to_delete+' have been deleted')
| 2.03125 | 2 |
Comportamentais/Template Method/main.py | DionVitor/design_pattern | 0 | 12799800 | <gh_stars>0
from abc import ABC, abstractmethod
def print_abstract(string):
print(f'\033[31m{string}\033[0;0m')
def print_concrete(string):
print(f'\033[32m{string}\033[0;0m')
class AbstractClass(ABC):
def template_method(self):
self.operation_one()
self.required_operation_one()
self.operation_two()
self.hook1()
self.required_operation_two()
self.operation_three()
self.hook2()
@staticmethod
def operation_one():
print_abstract('| Classe abstrata | Estou executando a operação 1.')
@staticmethod
def operation_two():
print_abstract('| Classe abstrata | Estou executando a operação 2.')
@staticmethod
def operation_three():
print_abstract('| Classe abstrata | Estou executando a operação 3.')
@abstractmethod
def required_operation_one(self):
pass
@abstractmethod
def required_operation_two(self):
pass
def hook1(self):
pass
def hook2(self):
pass
class ConcreteClass1(AbstractClass):
def required_operation_one(self):
print_concrete('| Classe concreta 1 | Operação requerida 1 implementada.')
def required_operation_two(self):
print_concrete('| Classe concreta 1 | Operação requerida 2 implementada.')
class ConcreteClass2(AbstractClass):
def required_operation_one(self):
print_concrete('| Classe concreta 2 | Operação requerida 1 implementada.')
def required_operation_two(self):
print_concrete('| Classe concreta 2 | Operação requerida 2 implementada.')
def hook1(self):
print_concrete('| Classe concreta 2 | Hook 1 implementado.')
def run(concrete_class): # Deve receber uma subclasse de AbstractClass!
concrete_class.template_method()
if __name__ == '__main__':
run(ConcreteClass1())
print('')
run(ConcreteClass2())
| 1.914063 | 2 |
test_model/models.py | Lairion/defaultproject | 0 | 12799808 | from django.db import models
# Create your models here.
class Category(models.Model):
"""
Description: Model Description
"""
name = models.CharField(max_length=50)
class Meta:
pass
class Skill(models.Model):
"""
Description: Model Description
"""
name = models.CharField(max_length=50)
category = models.ForeignKey('Category', on_delete=models.CASCADE)
class Meta:
pass | 1.273438 | 1 |
experiments/examplereader.py | abdelabdalla/deepmind-research | 0 | 12799816 | <reponame>abdelabdalla/deepmind-research
import functools
import json
import os
import tensorflow as tf
from learning_to_simulate import reading_utils
def _read_metadata(data_path):
with open(os.path.join(data_path, 'metadata.json'), 'rt') as fp:
return json.loads(fp.read())
data_path = "/tmp/WaterDrop"
metadata = _read_metadata(data_path)
ds = tf.data.TFRecordDataset([os.path.join(data_path, 'test.tfrecord')])
ds = ds.map(functools.partial(
reading_utils.parse_serialized_simulation_example, metadata=metadata))
n = ds.make_one_shot_iterator().get_next()
sess = tf.Session()
end = sum(1 for _ in tf.python_io.tf_record_iterator(os.path.join(data_path, 'test.tfrecord')))
value = []
for i in range(0, end):
print(str(i))
v = sess.run(n)
value.append(v)
| 1.726563 | 2 |
boost/libs/iterator/doc/generate.py | randolphwong/mcsema | 1,155 | 12799824 | <reponame>randolphwong/mcsema<filename>boost/libs/iterator/doc/generate.py
#!/usr/bin/python
# Copyright <NAME> 2004. Use, modification and distribution is
# subject to the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#
# Generate html, TeX, and PDF versions of all the source files
#
import os
import sys
from syscmd import syscmd
from sources import sources
if 0:
for s in sources:
syscmd('boosthtml %s' % s)
else:
extensions = ('html', 'pdf')
if len(sys.argv) > 1:
extensions = sys.argv[1:]
all = [ '%s.%s' % (os.path.splitext(s)[0],ext)
for ext in extensions
for s in sources
]
print 'make %s' % ' '.join(all)
syscmd('make %s' % ' '.join(all))
| 1.625 | 2 |
src/inpainting.py | DehuiYan/tumorDetection | 0 | 12799832 | <gh_stars>0
#!/usr/bin/env python
# coding=utf-8
'''
将生成patch聚合为癌症区域并嵌入到normal切块中
'''
import os
import cv2
import random
import tools
import numpy as np
import makevocxml
inputGenedir = '../../mydata/dcgan/dcgan_micro_512/'
inputNordir = '../../mydata/dcgan/normal_part/'
outputdir = '../../mydata/dcgan/virtual_dataset/'
outputbboxdir = '../../mydata/dcgan/virtual_dataset_bbox/'
outputxmldir = '../../mydata/dcgan/virtual_dataset_xml/'
outputmaskdir = '../../mydata/dcgan/virtual_dataset_mask/'
tools.mkdir(outputdir)
tools.mkdir(outputbboxdir)
tools.mkdir(outputxmldir)
tools.mkdir(outputmaskdir)
def make_region(gene_list, w_num, h_num):
vstack = []
for i in range(h_num):
hstack = []
for j in range(w_num):
img = cv2.imread(inputGenedir+gene_list[i*w_num+j])
hstack.append(img)
image = np.concatenate(hstack, axis=1)
vstack.append(image)
img_region = np.concatenate(vstack)
return img_region
def inpainting(gene_all_list, nor_all_list, gene_size, nor_size, number):
x = random.randint(0, nor_size-gene_size*2)
y = random.randint(0, nor_size-gene_size*2)
w_num_max = (nor_size-x)/gene_size
w_num = random.randint(1, w_num_max)
w = w_num * gene_size
h_num_max = (nor_size-y)/gene_size
h_num = random.randint(1, h_num_max)
h = h_num * gene_size
gene_list = random.sample(gene_all_list, w_num*h_num)
nor_list = random.sample(nor_all_list, 1)
img_region = make_region(gene_list, w_num, h_num)
img_part = cv2.imread(inputNordir+nor_list[0])
img_part[y:y+h, x:x+w] = img_region
number += 1
cv2.imwrite(outputdir+str(number)+'.jpg', img_part)
img_part_bbox = cv2.rectangle(img_part, (x,y), (x+w,y+h), (0,255,0), 8)
cv2.imwrite(outputbboxdir+str(number)+'.jpg', img_part_bbox)
bbox_label = []
bbox_label.append([x,y,x+w,y+h,w*h,1, 'Tumor'])
makevocxml.makexml(outputxmldir, number, img_part.shape, bbox_label)
mask = np.zeros((nor_size,nor_size,1), np.uint8)
mask_region = np.zeros((h,w,1), np.uint8)
mask_region[:] = 255
mask[y:y+h, x:x+w] = mask_region
cv2.imwrite(outputmaskdir+str(number)+'.jpg', mask)
return number
if __name__ == "__main__":
gene_size = 64
nor_size = 512
number = 0
total = 20
gene_all_list = []
nor_all_list = []
for parents, dirnames, filenames in os.walk(inputGenedir):
for f in filenames:
gene_all_list.append(f)
for parents, dirnames, filenames in os.walk(inputNordir):
for f in filenames:
nor_all_list.append(f)
for i in range(total):
number = inpainting(gene_all_list, nor_all_list, gene_size, nor_size, number)
| 1.703125 | 2 |
src/daemon/initialisation.py | 0CT3T/Daemon_Home_Integration | 1 | 12799840 | <reponame>0CT3T/Daemon_Home_Integration
from importlib.machinery import SourceFileLoader
from daemon.Configuration.Modele import *
from daemon.Configuration.configuration import configuration
#############################################################
#
# INITIALISATION
#############################################################
lobjet = {} #liste des classes
lmodrules = [] #liste des modules de regle
config = configuration()
# CHARGEMENT DU FICHIER de configuration des modules
with open(JSONdirectory + "module.json", "r") as fichier:
JSON = fichier.read()
config.load(JSON)
#import des classes et objet
for item in config.getlitem():
temp = getattr(SourceFileLoader(item,Moduledirectory +item+".py").load_module(), item)
lobjet[item] = temp()
| 1.101563 | 1 |
learn-to-code-with-python/32-PROJECT-Texas-Hold-Em-Poker/tests/test_card.py | MaciejZurek/python_practicing | 0 | 12799848 | import unittest
from poker.card import Card
class CardTest(unittest.TestCase):
def test_has_rank(self):
card = Card(rank = "Queen", suit = "Hearts")
self.assertEqual(card.rank, "Queen")
def test_has_suit(self):
card = Card(rank = "2", suit = "Clubs")
self.assertEqual(card.suit, "Clubs")
def test_knows_its_rank_index(self):
card = Card(rank = "Jack", suit = "Hearts")
self.assertEqual(card.rank_index, 9)
def test_has_string_representation_with_rank_and_suit(self):
card = Card("5", "Diamonds")
self.assertEqual(str(card), "5 of Diamonds")
def test_has_technical_representation(self):
card = Card("5", "Diamonds")
self.assertEqual(repr(card), "Card('5', 'Diamonds')")
def test_card_has_four_possible_suit_options(self):
self.assertEqual(
Card.SUITS,
("Hearts", "Clubs", "Spades", "Diamonds")
)
def test_card_has_thirteen_possible_rank_options(self):
self.assertEqual(
Card.RANKS,
(
"2", "3", "4", "5", "6", "7", "8", "9", "10",
"Jack", "Queen", "King", "Ace"
)
)
def test_card_only_allows_for_valid_rank(self):
with self.assertRaises(ValueError):
Card(rank = "Two", suit = "Hearts")
def test_card_only_allows_for_valid_suit(self):
with self.assertRaises(ValueError):
Card(rank = "2", suit = "Dots")
def test_can_create_standard_52_cards(self):
cards = Card.create_standard_52_cards()
self.assertEqual(len(cards), 52)
self.assertEqual(
cards[0],
Card(rank = "2", suit = "Hearts")
)
self.assertEqual(
cards[-1],
Card(rank = "Ace", suit = "Diamonds")
)
def test_figures_out_if_two_cards_are_equal(self):
self.assertEqual(
Card(rank = "2", suit = "Hearts"),
Card(rank = "2", suit = "Hearts")
)
def test_card_can_sort_itself_with_another_one(self):
queen_of_spades = Card(rank = "Queen", suit = "Spades")
king_of_spades = Card(rank = "King", suit = "Spades")
evaluation = queen_of_spades < king_of_spades
self.assertEqual(
evaluation,
True,
"The sort algorithm is not sorting the lower card first"
)
def test_sorts_cards(self):
two_of_spades = Card(rank = "2", suit = "Spades")
five_of_diamonds = Card(rank = "5", suit = "Diamonds")
five_of_hearts = Card(rank = "5", suit = "Hearts")
eight_of_hearts = Card(rank = "8", suit = "Hearts")
ace_of_clubs = Card(rank = "Ace", suit = "Clubs")
unsorted_cards = [
five_of_hearts,
five_of_diamonds,
two_of_spades,
ace_of_clubs,
eight_of_hearts
]
unsorted_cards.sort()
self.assertEqual(
unsorted_cards,
[
two_of_spades,
five_of_diamonds,
five_of_hearts,
eight_of_hearts,
ace_of_clubs
]
)
| 2.65625 | 3 |
vit/formatter/start_remaining.py | kinifwyne/vit | 179 | 12799856 | from vit.formatter.start import Start
class StartRemaining(Start):
def format_datetime(self, start, task):
return self.remaining(start)
| 0.941406 | 1 |
scripts/typing-summary.py | AlexWaygood/typing | 1,145 | 12799864 | <gh_stars>1000+
#!/usr/bin/env python3
"""
Generate a summary of last week's issues tagged with "topic: feature".
The summary will include a list of new and changed issues and is sent each
Monday at 0200 CE(S)T to the typing-sig mailing list. Due to limitation
with GitHub Actions, the mail is sent from a private server, currently
maintained by @srittau.
"""
from __future__ import annotations
import datetime
from dataclasses import dataclass
from typing import Any, Iterable, Sequence
import requests
ISSUES_API_URL = "https://api.github.com/repos/python/typing/issues"
ISSUES_URL = "https://github.com/python/typing/issues?q=label%3A%22topic%3A+feature%22"
ISSUES_LABEL = "topic: feature"
SENDER_EMAIL = "Typing Bot <<EMAIL>>"
RECEIVER_EMAIL = "<EMAIL>"
@dataclass
class Issue:
number: int
title: str
url: str
created: datetime.datetime
user: str
pull_request: bool = False
def main() -> None:
since = previous_week_start()
issues = fetch_issues(since)
new, updated = split_issues(issues, since)
print_summary(since, new, updated)
def previous_week_start() -> datetime.date:
today = datetime.date.today()
return today - datetime.timedelta(days=today.weekday() + 7)
def fetch_issues(since: datetime.date) -> list[Issue]:
"""Return (new, updated) issues."""
j = requests.get(
ISSUES_API_URL,
params={
"labels": ISSUES_LABEL,
"since": f"{since:%Y-%m-%d}T00:00:00Z",
"per_page": "100",
"state": "open",
},
headers={"Accept": "application/vnd.github.v3+json"},
).json()
assert isinstance(j, list)
return [parse_issue(j_i) for j_i in j]
def parse_issue(j: Any) -> Issue:
number = j["number"]
title = j["title"]
url = j["html_url"]
created_at = datetime.datetime.fromisoformat(j["created_at"][:-1])
user = j["user"]["login"]
pull_request = "pull_request" in j
assert isinstance(number, int)
assert isinstance(title, str)
assert isinstance(url, str)
assert isinstance(user, str)
return Issue(number, title, url, created_at, user, pull_request)
def split_issues(
issues: Iterable[Issue], since: datetime.date
) -> tuple[list[Issue], list[Issue]]:
new = []
updated = []
for issue in issues:
if issue.created.date() >= since:
new.append(issue)
else:
updated.append(issue)
new.sort(key=lambda i: i.number)
updated.sort(key=lambda i: i.number)
return new, updated
def print_summary(
since: datetime.date, new: Sequence[Issue], changed: Sequence[Issue]
) -> None:
print(f"From: {SENDER_EMAIL}")
print(f"To: {RECEIVER_EMAIL}")
print(f"Subject: Opened and changed typing issues week {since:%G-W%V}")
print()
print(generate_mail(new, changed))
def generate_mail(new: Sequence[Issue], changed: Sequence[Issue]) -> str:
if len(new) == 0 and len(changed) == 0:
s = (
"No issues or pull requests with the label 'topic: feature' were opened\n"
"or updated last week in the typing repository on GitHub.\n\n"
)
else:
s = (
"The following is an overview of all issues and pull requests in the\n"
"typing repository on GitHub with the label 'topic: feature'\n"
"that were opened or updated last week, excluding closed issues.\n\n"
"---------------------------------------------------\n\n"
)
if len(new) > 0:
s += "The following issues and pull requests were opened last week: \n\n"
s += "".join(generate_issue_text(issue) for issue in new)
s += "\n---------------------------------------------------\n\n"
if len(changed) > 0:
s += "The following issues and pull requests were updated last week: \n\n"
s += "".join(generate_issue_text(issue) for issue in changed)
s += "\n---------------------------------------------------\n\n"
s += (
"All issues and pull requests with the label 'topic: feature'\n"
"can be viewed under the following URL:\n\n"
)
s += ISSUES_URL
return s
def generate_issue_text(issue: Issue) -> str:
s = f"#{issue.number:<5} "
if issue.pull_request:
s += "[PR] "
s += f"{issue.title}\n"
s += f" opened by @{issue.user}\n"
s += f" {issue.url}\n"
return s
if __name__ == "__main__":
main()
| 2.359375 | 2 |
scripts/scraper.py | brainglobe/brainglobe-web | 1 | 12799872 | <reponame>brainglobe/brainglobe-web<filename>scripts/scraper.py
from loguru import logger
from rich import print
from rich.table import Table
from mdutils.mdutils import MdUtils
import semanticscholar as sch
from myterial import pink, blue_light
'''
Searches google scholar for papers using brainglobe's tools
'''
AUTHORS = (
'34308754', # <NAME>
'3853277', # <NAME>
'8668066', # <NAME>
)
KEYWORDS = ('brainglobe', 'brainrender', 'cellfinder', 'brainreg')
def fetch_citations():
'''
Fetches citations semantic scholar, for each author in the list
get all publications and only keep the ones relevant for brainglobe.
Then, use these publications to find papers citing them
'''
citations = []
brainglobe_papers = dict(
id = [],
year = [],
title = [],
authors = [],
link=[],
)
citing_brainglobe = dict(
id = [],
year = [],
title = [],
authors = [],
link=[],
)
# loop over authors
logger.info('Getting brainglobe papers')
for author_n, author_id in enumerate(AUTHORS):
added = 0
logger.debug(f'Fetching for author {author_n+1}/{len(AUTHORS)}')
author = sch.author(author_id)
logger.debug(f'Found {len(author["papers"])} papers for {author["name"]}')
if not len(author.keys()):
raise ValueError('Could not fetch author data, probably an API timeout error, wait a bit.')
# loop over papers
for paper in author['papers']:
paper = sch.paper(paper['paperId'])
if not paper or paper['abstract'] is None:
logger.debug(f' skipping paper {paper["title"]} because it has not abstract')
continue
matched_keywords = [kw for kw in KEYWORDS if kw in paper['abstract'].lower()]
# add it to the list of brainglobe papers
if matched_keywords:
if paper['corpusId'] in brainglobe_papers['id']:
logger.debug(f' skipping paper: {paper["title"]} to avoid duplicates')
continue # skip duplicates
logger.debug(f'Found brainglobe paper: "{paper["title"]}" @ "{paper["venue"]}" with |{paper["numCitedBy"]}| citations')
brainglobe_papers['id'].append(paper['corpusId'])
brainglobe_papers['year'].append(str(paper['year']))
brainglobe_papers['authors'].append([auth['name'] for auth in paper['authors']])
brainglobe_papers['title'].append(paper['title'])
brainglobe_papers['link'].append(paper['url'])
citations.append(paper['citations'])
added += 1
else:
logger.debug(f'Paper NOT belonging to brainglobe: "{paper["title"]}" @ "{paper["venue"]}" with |{paper["numCitedBy"]}| citations')
logger.debug(f'Added {added}/{len(author["papers"])} papers for {author["name"]}')
logger.info(f'Found {len(brainglobe_papers["id"])} brainglobe papers')
logger.info('Getting papers citing our work')
for paper_citations in citations:
for paper in paper_citations:
if paper['paperId'] in citing_brainglobe['id']:
continue # avoid duplicates
citing_brainglobe['id'].append(paper['paperId'])
citing_brainglobe['year'].append(str(paper['year']))
citing_brainglobe['title'].append(paper['title'])
citing_brainglobe['authors'].append([auth['name'] for auth in paper['authors']])
citing_brainglobe['link'].append(paper['url'])
logger.info(f'Found {len(citing_brainglobe["id"])} papers citing brainglobe')
return {**brainglobe_papers, **citing_brainglobe}
def print_citations(citations):
'''
prints a list of citations as a rich tble
'''
tb = Table(box=None, header_style=f'bold {pink}')
tb.add_column('Year', justify='right', style='dim')
tb.add_column('Title', style=blue_light)
tb.add_column('Authors')
for n in range(len(citations['id'])):
tb.add_row(
citations['year'][n],
citations['title'][n],
', '.join(citations['authors'][n]),
)
print(tb)
def make_citations_markdown(citations):
'''
Replaces ./_pages/references.md to update with the most recent
citations of papers using/citing brainglobe
'''
logger.debug('Updating markdown file')
# create markdown file
mdFile = MdUtils(file_name='_pages/references.md')
# add metadata & header
mdFile.write(text="""
---
permalink: /references
author_profile: true
title: "References"
---
""")
mdFile.new_header(level=1, title='Papers citing BrainGlobe tools ')
years = sorted(set(citations['year']))
for adding_year in years:
mdFile.new_header(level=2, title=adding_year)
# add papers
for n in range(len(citations['id'])):
year = citations['year'][n]
link = citations['link'][n]
if year != adding_year:
continue
mdFile.new_header(level=3, title=
mdFile.new_inline_link(link=link, text=citations['title'][n])
)
# add 'in the press'
mdFile.write("""
# BrainGlobe reported in press/online
### [Why These Python Coders are Joining the napari Community](https://cziscience.medium.com/why-these-python-coders-are-joining-the-napari-community-c0af6bb6ee3a)
_Chan Zuckerberg Science Initiative (Medium), June 2021_
### [Using deep learning to aid 3D cell detection in whole brain microscopy images](https://www.sainsburywellcome.org/web/blog/using-deep-learning-aid-3d-cell-detection-whole-brain-microscopy-images)
_Sainsbury Wellcome Centre Blog, June 2021_
### [Brainrender: visualising brain data in 3D](https://www.sainsburywellcome.org/web/blog/brainrender-visualising-brain-data-3d)
_Sainsbury Wellcome Centre Blog, March 2021_
### [Cellfinder: Harnessing the power of deep learning to map the brain](https://www.sainsburywellcome.org/web/blog/cellfinder-harnessing-power-deep-learning-map-brain)
_Sainsbury Wellcome Centre Blog, April 2020_
### [The best neuroscience stories from April 2020](https://www.scientifica.uk.com/neurowire/the-best-neuroscience-stories-from-april-2020)
_NeuroWire (Scientifica), April 2020_
""")
# save
mdFile.create_md_file()
# remove extra empty lines at top of file
with open('_pages/references.md', 'r') as fin:
content = fin.read()
with open('_pages/references.md', 'w') as fout:
fout.write(content.replace('\n\n\n\n', ''))
if __name__ == '__main__':
citations = fetch_citations()
# print_citations(citations)
make_citations_markdown(citations) | 1.617188 | 2 |
src/user_lib/connection_manager.py | crehmann/CO2Logger | 0 | 12799880 | from utime import ticks_ms
import network
import time
from umqtt.simple import MQTTClient
STATE_DISCONNECTED = 0
STATE_WLAN_CONNECTING = 1
STATE_WLAN_CONNECTED = 2
STATE_MQTT_CONNECTING = 3
STATE_MQTT_CONNECTED = 4
WLAN_CONNECTION_TIMEOUT_MS = 30 * 1000
MQTT_CONNECTION_TIMEOUT_MS = 30 * 1000
class ConnectionManager:
def __init__(self):
self._wlan = network.WLAN(network.STA_IF)
self._wlanSsid = None
self._wlanPassword = None
self._wlanConnectingTimestamp = None
self._mqtt = None
self._mqttConnectingTimestamp = None
self._state = STATE_DISCONNECTED
self._data = {}
def configureWlan(self, ssid, password):
self._wlanSsid = ssid
self._wlanPassword = password
def configureMqtt(self, mqttClientId, mqttServer, mqttUsername, mqttPassword):
self._mqtt = MQTTClient(mqttClientId, mqttServer, 0, mqttUsername, mqttPassword)
def initConnection(self):
if self._state == STATE_DISCONNECTED:
self.__connectWlan()
def publish(self, topic, data):
# keeping only the latest value
self._data[topic] = data
self.__flush()
def update(self):
if self._state > STATE_WLAN_CONNECTING \
and not self._wlan.isconnected:
self._state = STATE_DISCONNECTED
if self._state == STATE_WLAN_CONNECTING:
self.__updateWlanConnectingState()
if self._state == STATE_WLAN_CONNECTED:
self.__updateWlanConnectedState()
if self._state == STATE_MQTT_CONNECTING:
self.__updateMqttConnectingState()
def __connectWlan(self):
if self._wlanSsid:
print("connecting to wlan...")
self._wlanConnectingTimestamp = ticks_ms()
self._state = STATE_WLAN_CONNECTING
try:
self._wlan.active(True)
self._wlan.disconnect()
self._wlan.connect(self._wlanSsid, self._wlanPassword)
except Exception as ex:
self.__printException(ex)
def __updateWlanConnectingState(self):
if ticks_ms() - self._wlanConnectingTimestamp > WLAN_CONNECTION_TIMEOUT_MS:
print("Could not connect to wlan. Falling back to disconnected state")
self._state = STATE_DISCONNECTED
elif self._wlan.isconnected() \
and not self._wlan.ifconfig()[0]=='0.0.0.0':
self._state = STATE_WLAN_CONNECTED
print("wlan connected")
def __updateWlanConnectedState(self):
if self._mqtt:
print("connecting to mqtt")
self._state = STATE_MQTT_CONNECTING
self._mqttConnectingTimestamp = ticks_ms()
try:
self._mqtt.connect()
except Exception as ex:
self.__printException(ex)
def __updateMqttConnectingState(self):
if ticks_ms() - self._mqttConnectingTimestamp > MQTT_CONNECTION_TIMEOUT_MS:
print("MQTT connection failed.")
self._state = STATE_WLAN_CONNECTED
else:
try:
self._mqtt.ping()
self._state = STATE_MQTT_CONNECTED
self.__flush()
print("mqtt connection established")
except Exception as ex:
self.__printException(ex)
def __flush(self):
if self._state == STATE_MQTT_CONNECTED:
try:
for key in list(self._data):
self._mqtt.publish(key, self._data[key])
del self._data[key]
except Exception as ex:
self._state = STATE_WLAN_CONNECTED
self.__printException(ex)
def __printException(self, ex):
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
| 2.25 | 2 |
doc/examples/nonlinear_from_rest/submit_multiple_check_RB.py | snek5000/snek5000-cbox | 0 | 12799888 | import numpy as np
from fluiddyn.clusters.legi import Calcul2 as Cluster
from critical_Ra_RB import Ra_c_RB as Ra_c_RB_tests
prandtl = 1.0
dim = 2
dt_max = 0.005
end_time = 30
nb_procs = 10
nx = 8
order = 10
stretch_factor = 0.0
Ra_vert = 1750
x_periodicity = False
z_periodicity = False
cluster = Cluster()
cluster.commands_setting_env = [
"PROJET_DIR=/fsnet/project/meige/2020/20CONVECTION",
"source /etc/profile",
"source $PROJET_DIR/miniconda3/etc/profile.d/conda.sh",
"conda activate env-snek",
"export NEK_SOURCE_ROOT=$HOME/Dev/snek5000/lib/Nek5000",
"export PATH=$PATH:$NEK_SOURCE_ROOT/bin",
"export FLUIDSIM_PATH=$PROJET_DIR/numerical/",
]
for aspect_ratio, Ra_c_test in Ra_c_RB_tests.items():
ny = int(nx * aspect_ratio)
if nx * aspect_ratio - ny:
continue
Ra_vert_nums = np.logspace(np.log10(Ra_c_test), np.log10(1.04 * Ra_c_test), 4)
for Ra_vert_num in Ra_vert_nums:
command = (
f"run_simul_check_from_python.py -Pr {prandtl} -nx {nx} --dim {dim} "
f"--order {order} --dt-max {dt_max} --end-time {end_time} -np {nb_procs} "
f"-a_y {aspect_ratio} --stretch-factor {stretch_factor} "
f"--Ra-vert {Ra_vert_num}"
)
if x_periodicity:
command += " --x-periodicity"
elif z_periodicity:
command += " --z-periodicity"
print(command)
name_run = f"RB_asp{aspect_ratio:.3f}_Ra{Ra_vert_num:.3e}_Pr{prandtl:.2f}_msh{nx*order}x{round(nx*aspect_ratio)*order}"
cluster.submit_script(
command,
name_run=name_run,
nb_cores_per_node=nb_procs,
omp_num_threads=1,
ask=False,
)
| 1.5 | 2 |
dataconverter.py | zhang96/CSVToJSON | 1 | 12799896 | <gh_stars>1-10
# Simple Python program that converts CSV to JSON for my MongoDB project.
# - It converts all the csv files under the directory at once.
import csv
import json
import glob
for files in glob.glob("*.csv"):
csvfile = open(files, 'r')
jsonfile = open(files[:-4] + '.json', 'w')
reader = csv.reader(open(files, 'rU'))
fieldnames = ()
out = 0
for row in reader:
temp = []
# print row
fieldnames = row
break
counter = 0
out = ""
for row in reader:
if counter == 0:
# skip
print "0"
else:
# print row
itemCounter = 0
temp = "{"
for item in row:
a = item
# print a
nameCounter = 0
for item in fieldnames:
if itemCounter == nameCounter:
# print item
temp += ' "'
temp += item
temp += '"'
temp += ': "'
temp += a
temp += '",'
nameCounter += 1
itemCounter += 1
temp = temp[:-1]
temp += "}"
# print temp
out += temp
out += ", "
counter += 1
out = out[:-1]
jsonfile.write(out)
print "End of Execution"
| 2.859375 | 3 |
jarbas_hive_mind/settings.py | flo-mic/HiveMind-core | 43 | 12799904 | <gh_stars>10-100
from os import makedirs
from os.path import isdir, join, expanduser
DATA_PATH = expanduser("~/jarbasHiveMind")
if not isdir(DATA_PATH):
makedirs(DATA_PATH)
CERTS_PATH = join(DATA_PATH, "certs")
if not isdir(CERTS_PATH):
makedirs(CERTS_PATH)
DB_PATH = join(DATA_PATH, "database")
if not isdir(DB_PATH):
makedirs(DB_PATH)
CLIENTS_DB = "sqlite:///" + join(DB_PATH, "clients.db")
DEFAULT_PORT = 5678
USE_SSL = True
LOG_BLACKLIST = []
MYCROFT_WEBSOCKET_CONFIG = {
"host": "0.0.0.0",
"port": 8181,
"route": "/core",
"ssl": False
}
| 1 | 1 |
StkAutomation/IntegrationCertification/IntegrationCert.py | jgonzalesAGI/STKCodeExamples | 0 | 12799912 | # -*- coding: utf-8 -*-
"""
Created on Mon May 4 09:33:16 2020
@author: jvergere
Ideas: Something similar to the Iridium Constellation:
66 Sats
781 km (7159 semimajor axis)
86.4 inclination
6 Orbit planes 30 degrees apart
11 in each plane
"""
import datetime as dt
import numpy as np
import os
#Need to cleanup this file before running each time,
#or refactor code to avoid writing to file in append mode
if os.path.exists("MaxOutageData.txt"):
os.remove("MaxOutageData.txt")
from comtypes.client import CreateObject # Will allow you to launch STK
#from comtypes.client import GetActiveObject #Will allow you to connect a running instance of STK
#Start the application, it will return a pointer to the Application Interface
app = CreateObject("STK12.Application")
#app = GetActiveObject("STK12.Application")
#app is a pointer to IAgUiApplication
#type info is available with python builtin type method
#type(app)
#More info is available via python built in dir method, which will list
#all the available properties and methods available
#dir(app)
#Additional useful information is available via the python builtin help
#help(app)
app.Visible = True
app.UserControl = True
root = app.Personality2 #root ->IAgStkObjectRoot
#These are not available to import until this point if this is the first time
#running STK via COM with python....it won't hurt to leave them there, but after running once they can be
#included at the top with all the other import statements
from comtypes.gen import STKUtil
from comtypes.gen import STKObjects
root.NewScenario("NewTestScenario")
scenario = root.CurrentScenario #scenario -> IAgStkObject
scenario2 = scenario.QueryInterface(STKObjects.IAgScenario) #scenaro2 -> IAgScenario
scenario2.StartTime = "1 Jun 2016 16:00:00.000"
scenario2.StopTime = "2 Jun 2016 16:00:00.000"
root.Rewind()
#Insert Facilites from text file using connect. Each line of the text file is
#formatted:
#FacName,Longitude,Latitude
with open("Facilities.txt", "r") as faclist:
for line in faclist:
facData = line.strip().split(",")
insertNewFacCmd = "New / */Facility {}".format(facData[0])
root.ExecuteCommand(insertNewFacCmd)
setPositionCmd = "SetPosition */Facility/{} Geodetic {} {} Terrain".format(facData[0], facData[2], facData[1])
root.ExecuteCommand(setPositionCmd)
setColorCommand = "Graphics */Facility/{} SetColor blue".format(facData[0])
root.ExecuteCommand(setColorCommand)
#Create sensor constellation, used later to hold all the sensor objects
sensorConst = scenario.Children.New(STKObjects.eConstellation, "SensorConst")
sensorConst2 = sensorConst.QueryInterface(STKObjects.IAgConstellation)
#Build satellite constellation, attach sensors, assign sensor to constellation object
i = 1
for RAAN in range(0,180,45): # 4 orbit planes
j = 1
for trueAnomaly in range(0,360,45): # 8 sats per plane
#insert satellite
newSat = scenario.Children.New(STKObjects.eSatellite, "Sat{}{}".format(i,j))
newSat2 = newSat.QueryInterface(STKObjects.IAgSatellite)
#change some basic display attributes
newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Color = 65535
newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Line.Width = STKObjects.e1
newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Inherit = False
newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesOrbit).IsGroundTrackVisible = False
#Buildup Initial State using TwoBody Propagator and Classical Orbital Elements
keplarian = newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.ConvertTo(STKUtil.eOrbitStateClassical).QueryInterface(STKObjects.IAgOrbitStateClassical)
keplarian.SizeShapeTpye = STKObjects.eSizeShapeSemimajorAxis
keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).SemiMajorAxis = 7159
keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).Eccentricity = 0
keplarian.Orientation.Inclination = 86.4
keplarian.Orientation.ArgOfPerigee = 0
keplarian.Orientation.AscNodeType = STKObjects.eAscNodeRAAN
keplarian.Orientation.AscNode.QueryInterface(STKObjects.IAgOrientationAscNodeRAAN).Value = RAAN
keplarian.LocationType = STKObjects.eLocationTrueAnomaly
keplarian.Location.QueryInterface(STKObjects.IAgClassicalLocationTrueAnomaly).Value = trueAnomaly + (45/2)*(i%2) #Stagger TrueAnomalies for every other orbital plane
newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.Assign(keplarian)
newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).Propagate()
#Attach sensors to each satellite
sensor = newSat.Children.New(STKObjects.eSensor,"Sensor{}{}".format(i,j))
sensor2 = sensor.QueryInterface(STKObjects.IAgSensor)
sensor2.CommonTasks.SetPatternSimpleConic(62.5, 2)
#Add the sensor to the SensorConstellation
sensorConst2.Objects.Add("Satellite/Sat{0}{1}/Sensor/Sensor{0}{1}".format(i,j))
#Adjust the translucenty of the sensor projections
sensor2.VO.PercentTranslucency = 75
sensor2.Graphics.LineStyle = STKUtil.eDotted
j+=1
i+=1
#Create a Chain object for each Facility to the constellation.
facCount = scenario.Children.GetElements(STKObjects.eFacility).Count
for i in range(facCount):
#Create Chain
facName = scenario.Children.GetElements(STKObjects.eFacility).Item(i).InstanceName
chain = scenario.Children.New(STKObjects.eChain, "{}ToSensorConst".format(facName))
chain2 = chain.QueryInterface(STKObjects.IAgChain)
#Modify some display properties
chain2.Graphics.Animation.Color = 65280
chain2.Graphics.Animation.LineWidth = STKObjects.e1
chain2.Graphics.Animation.IsHighlightVisible = False
#Add objects to the chain
chain2.Objects.Add("Facility/{}".format(facName))
chain2.Objects.Add("Constellation/SensorConst")
#Get complete chain access data
compAcc = chain.DataProviders.Item("Complete Access").QueryInterface(STKObjects.IAgDataPrvInterval).Exec(scenario2.StartTime,scenario2.StopTime)
el = compAcc.DataSets.ElementNames
numRows = compAcc.DataSets.RowCount
maxOutage = []
#Save out the report to a text file
with open("{}CompleteChainAccess.txt".format(facName),"w") as dataFile:
dataFile.write("{},{},{},{}\n".format(el[0],el[1],el[2],el[3]))
for row in range(numRows):
rowData = compAcc.DataSets.GetRow(row)
dataFile.write("{},{},{},{}\n".format(rowData[0],rowData[1],rowData[2],rowData[3]))
dataFile.close()
#Get max outage time for each chain, print to console and save to file
with open("MaxOutageData.txt", "a") as outageFile:
if numRows == 1:
outageFile.write("{},NA,NA,NA\n".format(facName))
print("{}: No Outage".format(facName))
else:
#Get StartTimes and StopTimes as lists
startTimes = list(compAcc.DataSets.GetDataSetByName("Start Time").GetValues())
stopTimes = list(compAcc.DataSets.GetDataSetByName("Stop Time").GetValues())
#convert to from strings to datetimes
startDatetimes = np.array([dt.datetime.strptime(startTime[:-3], "%d %b %Y %H:%M:%S.%f") for startTime in startTimes])
stopDatetimes = np.array([dt.datetime.strptime(stopTime[:-3], "%d %b %Y %H:%M:%S.%f") for stopTime in stopTimes])
outages = startDatetimes[1:] - stopDatetimes[:-1]
maxOutage = np.amax(outages).total_seconds()
start = stopTimes[np.argmax(outages)]
stop = startTimes[np.argmax(outages)+1]
outageFile.write("{},{},{},{}\n".format(facName,maxOutage,start,stop))
print("{}: {} seconds from {} until {}".format(facName, maxOutage, start, stop))
root.Rewind()
root.Save() | 1.710938 | 2 |
Ex5.py | zelfg/Exercicios_LP_1B | 0 | 12799920 | algo = bool(input("Digite alguma coisa: "))
print("O valor {} é int?".format(algo).isnumeric()) | 1.390625 | 1 |
old/cartpole_lib/cartpole_ppo.py | mmolnar0/sgillen_research | 0 | 12799928 | from baselines.common.cmd_util import make_mujoco_env
from baselines.common import tf_util as U
from baselines import logger
from baselines.ppo1 import pposgd_simple
from cartpole.cartpole_sim import cartpole_policy
def train(env_id, num_timesteps, seed=0):
U.make_session(num_cpu=1).__enter__()
def policy_fn(name, ob_space, ac_space):
return cartpole_policy.CartPolePolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=6,
num_hid_layers=2)
env = make_mujoco_env(env_id, seed)
pi = pposgd_simple.learn(env, policy_fn,
max_timesteps=num_timesteps,
timesteps_per_actorbatch=2048,
clip_param=0.2, entcoeff=0.0,
optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64,
gamma=0.99, lam=0.95, schedule='linear',
)
env.close()
return pi
if __name__ == '__main__':
logger.configure(dir = "./tensorboard_test", format_strs=["tensorboard"] )
pi = train('InvertedPendulum-v2', num_timesteps=5000, seed=0)
| 1.5 | 2 |
object_classification/batchbald_redux/batchbald.py | YilunZhou/optimal-active-learning | 10 | 12799936 | # AUTOGENERATED! DO NOT EDIT! File to edit: 01_batchbald.ipynb (unless otherwise specified).
__all__ = ['compute_conditional_entropy', 'compute_entropy', 'CandidateBatch', 'get_batchbald_batch', 'get_bald_batch']
# Cell
from dataclasses import dataclass
from typing import List
import torch
import math
from tqdm.auto import tqdm
from toma import toma
from batchbald_redux import joint_entropy
# Cell
def compute_conditional_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor:
N, K, C = probs_N_K_C.shape
entropies_N = torch.empty(N, dtype=torch.double)
pbar = tqdm(total=N, desc="Conditional Entropy", leave=False)
@toma.execute.chunked(probs_N_K_C, 1024)
def compute(probs_n_K_C, start: int, end: int):
nats_n_K_C = probs_n_K_C * torch.log(probs_n_K_C)
nats_n_K_C[probs_n_K_C ==0] = 0.
entropies_N[start:end].copy_(-torch.sum(nats_n_K_C, dim=(1, 2)) / K)
pbar.update(end - start)
pbar.close()
return entropies_N
def compute_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor:
N, K, C = probs_N_K_C.shape
entropies_N = torch.empty(N, dtype=torch.double)
pbar = tqdm(total=N, desc="Entropy", leave=False)
@toma.execute.chunked(probs_N_K_C, 1024)
def compute(probs_n_K_C, start: int, end: int):
mean_probs_n_C = probs_n_K_C.mean(dim=1)
nats_n_C = mean_probs_n_C * torch.log(mean_probs_n_C)
nats_n_C[mean_probs_n_C ==0] = 0.
entropies_N[start:end].copy_(-torch.sum(nats_n_C, dim=1))
pbar.update(end - start)
pbar.close()
return entropies_N
# Internal Cell
# Not publishing these at the moment.
def compute_conditional_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor:
N, K, C = logits_N_K_C.shape
entropies_N = torch.empty(N, dtype=torch.double)
pbar = tqdm(total=N, desc="Conditional Entropy", leave=False)
@toma.execute.chunked(logits_N_K_C, 1024)
def compute(logits_n_K_C, start: int, end: int):
nats_n_K_C = logits_n_K_C * torch.exp(logits_n_K_C)
entropies_N[start:end].copy_(
-torch.sum(nats_n_K_C, dim=(1, 2)) / K)
pbar.update(end - start)
pbar.close()
return entropies_N
def compute_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor:
N, K, C = logits_N_K_C.shape
entropies_N = torch.empty(N, dtype=torch.double)
pbar = tqdm(total=N, desc="Entropy", leave=False)
@toma.execute.chunked(logits_N_K_C, 1024)
def compute(logits_n_K_C, start: int, end: int):
mean_logits_n_C = torch.logsumexp(logits_n_K_C, dim=1) - math.log(K)
nats_n_C = mean_logits_n_C * torch.exp(mean_logits_n_C)
entropies_N[start:end].copy_(
-torch.sum(nats_n_C, dim=1))
pbar.update(end - start)
pbar.close()
return entropies_N
# Cell
@dataclass
class CandidateBatch:
scores: List[float]
indices: List[int]
def get_batchbald_batch(probs_N_K_C: torch.Tensor,
batch_size: int,
num_samples: int,
dtype=None,
device=None) -> CandidateBatch:
N, K, C = probs_N_K_C.shape
batch_size = min(batch_size, N)
candidate_indices = []
candidate_scores = []
if batch_size == 0:
return CandidateBatch(candidate_scores, candidate_indices)
conditional_entropies_N = compute_conditional_entropy(probs_N_K_C)
batch_joint_entropy = joint_entropy.DynamicJointEntropy(num_samples,
batch_size - 1,
K,
C,
dtype=dtype,
device=device)
# We always keep these on the CPU.
scores_N = torch.empty(N, dtype=torch.double, pin_memory=torch.cuda.is_available())
for i in tqdm(range(batch_size), desc="BatchBALD", leave=False):
if i > 0:
latest_index = candidate_indices[-1]
batch_joint_entropy.add_variables(
probs_N_K_C[latest_index:latest_index + 1])
shared_conditinal_entropies = conditional_entropies_N[
candidate_indices].sum()
batch_joint_entropy.compute_batch(probs_N_K_C,
output_entropies_B=scores_N)
scores_N -= conditional_entropies_N + shared_conditinal_entropies
scores_N[candidate_indices] = -float('inf')
candidate_score, candidate_index = scores_N.max(dim=0)
candidate_indices.append(candidate_index.item())
candidate_scores.append(candidate_score.item())
return CandidateBatch(candidate_scores, candidate_indices)
# Cell
def get_bald_batch(probs_N_K_C: torch.Tensor,
batch_size: int,
dtype=None,
device=None) -> CandidateBatch:
N, K, C = probs_N_K_C.shape
batch_size = min(batch_size, N)
candidate_indices = []
candidate_scores = []
scores_N = -compute_conditional_entropy(probs_N_K_C)
scores_N += compute_entropy(probs_N_K_C)
candiate_scores, candidate_indices = torch.topk(scores_N, batch_size)
return CandidateBatch(candiate_scores.tolist(), candidate_indices.tolist()) | 2.171875 | 2 |
datasets/datasets.py | pengpeg/PFAN_MX | 0 | 12799944 | # -*- coding: utf-8 -*-
# @Time : 2020/2/12 15:47
# @Author : Chen
# @File : datasets.py
# @Software: PyCharm
import os, warnings
from mxnet.gluon.data import dataset, sampler
from mxnet import image
import numpy as np
class IdxSampler(sampler.Sampler):
"""Samples elements from [0, length) randomly without replacement.
Parameters
----------
length : int
Length of the sequence.
"""
def __init__(self, indices_selected):
if isinstance(indices_selected, list):
indices_selected = np.array(indices_selected)
self._indices_selected = indices_selected
self._length = indices_selected.shape[0]
def __iter__(self):
indices = self._indices_selected
np.random.shuffle(indices)
return iter(indices)
def __len__(self):
return self._length
class ImageFolderDataset(dataset.Dataset):
"""A dataset for loading image files stored in a folder structure.
like::
root/car/0001.jpg
root/car/xxxa.jpg
root/car/yyyb.jpg
root/bus/123.jpg
root/bus/023.jpg
root/bus/wwww.jpg
Parameters
----------
root : str
Path to root directory.
flag : {0, 1}, default 1
If 0, always convert loaded images to greyscale (1 channel).
If 1, always convert loaded images to colored (3 channels).
transform : callable, default None
A function that takes data and label and transforms them::
transform = lambda data, label: (data.astype(np.float32)/255, label)
Attributes
----------
synsets : list
List of class names. `synsets[i]` is the name for the integer label `i`
items : list of tuples
List of all images in (filename, label) pairs.
"""
def __init__(self, root, flag=1, transform=None, pseudo_labels=None):
self._root = os.path.expanduser(root)
self._flag = flag
self._transform = transform
self._exts = ['.jpg', '.jpeg', '.png']
self._list_images(self._root)
self._pseudo_labels = pseudo_labels
def _list_images(self, root):
self.synsets = []
self.items = []
for folder in sorted(os.listdir(root)):
path = os.path.join(root, folder)
if not os.path.isdir(path):
warnings.warn('Ignoring %s, which is not a directory.'%path, stacklevel=3)
continue
label = len(self.synsets)
self.synsets.append(folder)
for filename in sorted(os.listdir(path)):
filename = os.path.join(path, filename)
ext = os.path.splitext(filename)[1]
if ext.lower() not in self._exts:
warnings.warn('Ignoring %s of type %s. Only support %s'%(
filename, ext, ', '.join(self._exts)))
continue
self.items.append((filename, label))
def __getitem__(self, idx):
img = image.imread(self.items[idx][0], self._flag)
label = self.items[idx][1]
if self._transform is not None:
return self._transform(img, label)
if self._pseudo_labels is not None:
pseudo_label = self._pseudo_labels[idx]
return img, label, idx, pseudo_label
return img, label, idx
def __len__(self):
return len(self.items)
| 2.234375 | 2 |
top_secret/_vault.py | trym-inc/top-secret | 0 | 12799952 | from typing import List, Dict, Callable
from .cast_handlers import bool_cast_handler
from .exceptions import CastHandlerMissingError
from .exceptions import SecretMissingError
from .exceptions import SecretSourceMissing
from .secret_sources import BaseSecretSource
from .secret_sources import EnvironmentVariableSecretSource
class NoDefault:
pass
DEFAULT_SECRET_SOURCES = [
EnvironmentVariableSecretSource()
]
DEFAULT_CAST_HANDLERS = {
bool: bool_cast_handler,
}
class Vault:
_cache = {}
cast_handlers: 'Dict[Callable]' = {}
secret_sources: 'List[BaseSecretSource]' = []
preprocessors: 'List[Callable[str, str]]' = []
def __init__(self, secret_sources=None, cast_handlers=None, preprocessors=None):
if cast_handlers is None:
cast_handlers = {}
if secret_sources is None:
secret_sources = []
if preprocessors is None:
preprocessors = []
self.default_secret_sources = secret_sources
self.default_cast_handlers = cast_handlers
self.default_preprocessors = preprocessors
self.reset()
def add_secret_source(self, source: 'BaseSecretSource'):
if source in self.secret_sources:
return
self.secret_sources.append(source)
def clear_secret_sources(self):
self.secret_sources = []
def reset_secret_sources(self):
self.secret_sources = list(self.default_secret_sources)
def add_cast_handler(self, handler_key, handler):
self.cast_handlers[handler_key] = handler
def clear_cast_handlers(self):
self.cast_handlers = {}
def reset_cast_handlers(self):
self.cast_handlers = {**self.default_cast_handlers}
def add_preprocessor(self, fn):
self.preprocessors.append(fn)
def clear_preprocessors(self):
self.preprocessors = []
def reset_preprocessors(self):
self.preprocessors = list(self.default_preprocessors)
def clear_cache(self):
self._cache = {}
def reset(self):
self.reset_secret_sources()
self.reset_cast_handlers()
self.reset_preprocessors()
self.clear_cache()
def get(
self,
name,
default=NoDefault,
*,
source=None,
preprocessors=None,
cast_to=None,
no_cache=False,
cache_result=True
):
if no_cache is False and name in self._cache:
return self._cache[name]
value = self._get_from_source(name, default, source)
value = self._preprocess(value, preprocessors)
value = self._cast_to(value, cast_to, default)
if cache_result:
self._cache[name] = value
return value
def _get_from_source(self, name, default, source):
if source is not None:
return source.get(name)
if not self.secret_sources:
raise SecretSourceMissing
for source in self.secret_sources:
try:
value = source.get(name)
break
except SecretMissingError:
pass
else:
if default is NoDefault:
raise SecretMissingError(name)
else:
value = default
return value
def _preprocess(self, value, preprocessors):
if preprocessors is None:
preprocessors = self.preprocessors
else:
preprocessors = preprocessors
for preprocessor in preprocessors:
value = preprocessor(value)
return value
def _cast_to(self, value, cast_to, default):
if value is default:
return value
if cast_to is not None:
handler = self.cast_handlers.get(cast_to, cast_to)
if not callable(handler):
raise CastHandlerMissingError(
f'Cast handler: {handler!r}, is not registered.'
)
value = handler(value)
return value
vault = Vault(DEFAULT_SECRET_SOURCES, DEFAULT_CAST_HANDLERS)
| 1.367188 | 1 |
API/controller/routes.py | GeoscienceAustralia/FSDF-Roads | 1 | 12799960 | from flask import Blueprint, request, Response, render_template
from model.roads import Roads
from pyldapi import ContainerRenderer
import conf
import ast
import folium
print(__name__)
routes = Blueprint('controller', __name__)
DEFAULT_ITEMS_PER_PAGE=50
@routes.route('/', strict_slashes=True)
def home():
return render_template('home.html')
@routes.route('/rds/')
def roads():
# Search specific items using keywords
search_string = request.values.get('search')
try:
# get the register length from the online DB
sql = 'SELECT COUNT(*) FROM "transportroads"'
if search_string:
sql += '''WHERE UPPER(cast("id" as text)) LIKE '%{search_string}%' OR UPPER("name") LIKE '%{search_string}%';
'''.format(search_string=search_string.strip().upper())
no_of_items = conf.db_select(sql)[0][0]
page = int(request.values.get('page')) if request.values.get('page') is not None else 1
per_page = int(request.values.get('per_page')) \
if request.values.get('per_page') is not None else DEFAULT_ITEMS_PER_PAGE
offset = (page - 1) * per_page
# get the id and name for each record in the database
sql = '''SELECT "id", "name" FROM "transportroads"'''
if search_string:
sql += '''WHERE UPPER(cast("id" as text)) LIKE '%{search_string}%' OR UPPER("name") LIKE '%{search_string}%'
'''.format(search_string=search_string.strip().upper())
sql += '''ORDER BY "name"
OFFSET {} LIMIT {}'''.format(offset, per_page)
items = []
for item in conf.db_select(sql):
items.append(
(item[0], item[1])
)
except Exception as e:
print(e)
return Response('The Roads database is offline', mimetype='text/plain', status=500)
return ContainerRenderer(request=request,
instance_uri=request.url,
label='Roads Register',
comment='A register of Roads',
parent_container_uri='http://linked.data.gov.au/def/placenames/PlaceName',
parent_container_label='QLD_Roads',
members=items,
members_total_count=no_of_items,
profiles=None,
default_profile_token=None,
super_register=None,
page_size_max=1000,
register_template=None,
per_page=per_page,
search_query=search_string,
search_enabled=True
).render()
@routes.route('/map')
def show_map():
'''
Function to render a map around the specified line
'''
name = request.values.get('name')
coords_list = ast.literal_eval(request.values.get('coords'))[0]
# swap x & y for mapping
points = []
for coords in coords_list:
points.append(tuple([coords[1], coords[0]]))
ave_lat = sum(p[0] for p in points) / len(points)
ave_lon = sum(p[1] for p in points) / len(points)
# create a new map object
folium_map = folium.Map(location=[ave_lat, ave_lon], zoom_start=15)
tooltip = 'Click for more information'
folium.PolyLine(points, color="red", weight=2.5, opacity=1, popup = name, tooltip=tooltip).add_to(folium_map)
return folium_map.get_root().render()
@routes.route('/rds/<string:roads_id>')
def road(roads_id):
roads = Roads(request, request.base_url)
return roads.render()
| 1.671875 | 2 |
diventi/landing/migrations/0009_auto_20180220_0745.py | flavoi/diven | 2 | 12799968 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-02-20 06:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('landing', '0008_remove_featurescover_active'),
]
operations = [
migrations.CreateModel(
name='PresentationCover',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.URLField(verbose_name='image')),
('label', models.CharField(blank=True, max_length=50, verbose_name='label')),
('label_it', models.CharField(blank=True, max_length=50, null=True, verbose_name='label')),
('label_en', models.CharField(blank=True, max_length=50, null=True, verbose_name='label')),
('section', models.CharField(choices=[('DES', 'description'), ('FEA', 'features')], default='DES', max_length=3)),
('default', models.BooleanField(default=False)),
],
options={
'verbose_name': 'Presentation Cover',
'verbose_name_plural': 'Presentation Covers',
},
),
migrations.RemoveField(
model_name='presentation',
name='features_cover',
),
migrations.DeleteModel(
name='FeaturesCover',
),
migrations.AddField(
model_name='presentation',
name='presentation_covers',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='landing.PresentationCover', verbose_name='presentation cover'),
),
]
| 1.070313 | 1 |
utils/ImageProcesser.py | LLRukia/kkrbot | 0 | 12799976 | import os
import re
import uuid
import globals
from PIL import Image, ImageDraw, ImageFont
from utils.Asset import ImageAsset
SPACING = 5
back_regex = re.compile(r'back_([0-9]*)\.jpg')
BACK_PIC_UNIT_WIDTH, BACK_PIC_UNIT_HEIGHT = 140, 130
BACK_PIC_NUM_EACH_LINE = 5
def bg_image_gen(back_number, s):
def half_en_len(s):
return (len(s) + (len(s.encode(encoding='utf-8')) - len(s)) // 2) // 2
back_number = f'back_{back_number}'
img_path = os.path.join(globals.staticpath, f'bg/{back_number}.jpg')
im_src = Image.open(img_path)
if back_number in [f'back_{n}' for n in [38, 46, 47, 51, 52, 53]]:
real_width = max(3, im_src.width // max(6, half_en_len(s)) * 4 // 5)
font = ImageFont.truetype(os.path.join(globals.staticpath, 'simhei.ttf'), real_width)
real_height = real_width + SPACING
im = Image.new('RGB', (im_src.width, im_src.height), (255, 255, 255))
im.paste(im_src)
text_width = im_src.width
draw = ImageDraw.Draw(im)
sz = draw.textsize(s, font=font)
x = (text_width - sz[0]) / 2
y = im_src.height - real_height
draw.text((x, y), s, fill=(245, 255, 250), font=font)
elif back_number in [f'back_{n}' for n in [33]]:
real_width = max(3, im_src.width // max(6, half_en_len(s)) * 4 // 5)
font = ImageFont.truetype(os.path.join(globals.staticpath, 'simhei.ttf'), real_width)
real_height = real_width + SPACING
im = Image.new('RGB', (im_src.width, im_src.height), (255, 255, 255))
im.paste(im_src)
text_width = im_src.width
draw = ImageDraw.Draw(im)
sz = draw.textsize(s, font=font)
x = (text_width - sz[0]) / 2
y = im_src.height - 2 * real_height
draw.text((x, y), s, fill=(245, 255, 250), font=font)
elif back_number in [f'back_{n}' for n in [50]]:
real_width = max(3, im_src.width // max(6, half_en_len(s)) * 4 // 5)
font = ImageFont.truetype(os.path.join(globals.staticpath, 'simhei.ttf'), real_width)
real_height = real_width + SPACING
im = Image.new('RGB', (im_src.width, im_src.height), (255, 255, 255))
im.paste(im_src)
text_width = im_src.width
draw = ImageDraw.Draw(im)
sz = draw.textsize(s, font=font)
x = (text_width - sz[0]) / 2
y = 5
draw.text((x, y), s, fill=(23, 0, 0), font=font)
else:
real_width = max(3, im_src.width // max(6, half_en_len(s)))
font = ImageFont.truetype(os.path.join(globals.staticpath, 'simhei.ttf'), real_width)
real_height = real_width + SPACING
im = Image.new('RGB', (im_src.width, im_src.height + real_height), (255, 255, 255))
im.paste(im_src)
text_width = im_src.width
draw = ImageDraw.Draw(im)
sz = draw.textsize(s, font=font)
x = (text_width - sz[0]) / 2
y = im_src.height
draw.text((x, y), s, fill=(23, 0, 0), font=font)
return im
def get_back_pics():
raw = ImageAsset.get('back_catalogue')
if raw:
return raw
back_pic_set = set()
for _, _, files in os.walk(os.path.join(globals.staticpath, 'bg')):
for f in files:
if f.startswith('back_') and f.endswith('.jpg'):
num = int(back_regex.findall(f)[0])
back_pic_set.add(num)
cur_back_pic_nums = len(back_pic_set)
if cur_back_pic_nums == 0:
return
im = Image.new('RGB', (BACK_PIC_NUM_EACH_LINE * BACK_PIC_UNIT_WIDTH, BACK_PIC_UNIT_HEIGHT * (((cur_back_pic_nums - 1) // BACK_PIC_NUM_EACH_LINE) + 1)), (255, 255, 255))
for i, num in enumerate(back_pic_set):
im_o = bg_image_gen(num, f'底图 {num}')
im_o = im_o.resize((BACK_PIC_UNIT_WIDTH, BACK_PIC_UNIT_HEIGHT))
box = (i % BACK_PIC_NUM_EACH_LINE * BACK_PIC_UNIT_WIDTH, i // BACK_PIC_NUM_EACH_LINE * BACK_PIC_UNIT_HEIGHT)
im.paste(im_o, box)
return ImageAsset.image_raw(im, 'back_catalogue')
def merge_image(rsn, rarity, attribute, band_id, thumbnail=True, trained=False, return_fn=False):
if thumbnail:
try:
if return_fn:
fn = os.path.join(globals.datapath, 'image', f'auto_reply/cards/thumb/m_{rsn}_{"normal" if not trained else "after_training"}.png')
if os.access(fn, os.R_OK):
return fn
attribute_icon = Image.open(os.path.join(globals.asset_resource_path, f'{attribute}.png'))
band_icon = Image.open(os.path.join(globals.asset_resource_path, f'band_{band_id}.png'))
if not trained:
back_image = Image.open(f'{os.path.join(globals.asset_card_thumb_path, f"{rsn}_normal.png")}')
star = Image.open(os.path.join(globals.asset_resource_path, 'star.png')).resize((32, 32), Image.ANTIALIAS)
else:
back_image = Image.open(f'{os.path.join(globals.asset_card_thumb_path, f"{rsn}_after_training.png")}')
star = Image.open(os.path.join(globals.asset_resource_path, 'star_trained.png')).resize((32, 32), Image.ANTIALIAS)
if rarity == 1:
frame = Image.open(os.path.join(globals.asset_resource_path, f'card-1-{attribute}.png'))
else:
frame = Image.open(os.path.join(globals.asset_resource_path, f'card-{rarity}.png'))
back_image.paste(frame, (0, 0), mask=frame)
back_image.paste(band_icon, (0, 0), mask=band_icon)
back_image.paste(attribute_icon, (180 - 50, 0), mask=attribute_icon)
for i in range(rarity):
back_image.paste(star, (2, 170 - 27 * (i + 1)), mask=star)
if return_fn:
fn = os.path.join(globals.datapath, 'image', f'auto_reply/cards/thumb/m_{rsn}_{"normal" if not trained else "after_training"}.png')
back_image.save(fn)
return fn
return back_image
except:
import sys
sys.excepthook(*sys.exc_info())
return None
else:
fn = os.path.join(globals.datapath, 'image', f'auto_reply/cards/m_{rsn}_{"normal" if not trained else "after_training"}.png')
if os.access(fn, os.R_OK):
return fn
try:
OUT_WIDTH, OUT_HEIGHT = 1364, 1020
INNER_WIDTH, INNER_HEIGHT = 1334, 1002
STAR_SIZE, ICON_SIZE = 100, 150
TOP_OFFSET, RIGHT_OFFSET, BOTTOM_OFFSET, LEFT_OFFSET = 22, 165, 20, 10
STAT_STEP = 95
back_image = Image.new('RGB', (OUT_WIDTH, OUT_HEIGHT))
attribute_icon = Image.open(os.path.join(globals.asset_resource_path, f'{attribute}.png')).resize((ICON_SIZE, ICON_SIZE), Image.ANTIALIAS)
band_icon = Image.open(os.path.join(globals.asset_resource_path, f'band_{band_id}.png')).resize((ICON_SIZE, ICON_SIZE), Image.ANTIALIAS)
if not trained:
card = Image.open(f'{os.path.join(globals.asset_card_path, f"{rsn}_card_normal.png")}')
star = Image.open(os.path.join(globals.asset_resource_path, 'star.png')).resize((STAR_SIZE, STAR_SIZE), Image.ANTIALIAS)
else:
card = Image.open(f'{os.path.join(globals.asset_card_path, f"{rsn}_card_after_training.png")}')
star = Image.open(os.path.join(globals.asset_resource_path, 'star_trained.png')).resize((STAR_SIZE, STAR_SIZE), Image.ANTIALIAS)
if rarity == 1:
frame = Image.open(os.path.join(globals.asset_resource_path, f'frame-1-{attribute}.png')).resize((OUT_WIDTH, OUT_HEIGHT), Image.ANTIALIAS)
else:
frame = Image.open(os.path.join(globals.asset_resource_path, f'frame-{rarity}.png')).resize((OUT_WIDTH, OUT_HEIGHT), Image.ANTIALIAS)
back_image.paste(card, ((OUT_WIDTH - INNER_WIDTH) // 2, (OUT_HEIGHT - INNER_HEIGHT) // 2), mask=card)
back_image.paste(frame, (0, 0), mask=frame)
back_image.paste(band_icon, (LEFT_OFFSET, TOP_OFFSET), mask=band_icon)
back_image.paste(attribute_icon, (OUT_WIDTH - RIGHT_OFFSET, TOP_OFFSET), mask=attribute_icon)
for i in range(rarity):
back_image.paste(star, (LEFT_OFFSET, OUT_HEIGHT - BOTTOM_OFFSET - STAT_STEP * (i + 1)), mask=star)
back_image.save(fn)
return fn
except:
return ''
def white_padding(width, height):
return Image.new('RGB', (width, height), (255, 255, 255))
def thumbnail(**options):
# images: a list of Image objects, or a list of lists(tuples) of Image objects
# labels: a list of strings shown at the bottom
# image_style: if not assigned, take the params of the first image; if both assigned, will be forced to resize
# width: width of each image, if not assigned, will be min(scaled value by height, 180)
# height: height of each image, if not assigned, will be min(scaled value by width, 180)
# label_style:
# font_size: font_size of each label
# col_num (images are arranged row by row)
# col_space: (space between two columns)
# row_space (space between two rows, if labels exist, it means the space between the label of row1 and the image of row2)
images = options['images']
first_image = images[0]
if not isinstance(first_image, Image.Image):
if isinstance(first_image, (list, tuple)):
first_image = first_image[0]
if not isinstance(first_image, Image.Image):
raise Exception('images must be a list of Image objects, or a list of lists(tuples) of Image objects')
else:
raise Exception('images must be a list of Image objects, or a list of lists(tuples) of Image objects')
else:
images = [[im] for im in images]
if not options.get('image_style'):
box_width, box_height = first_image.size
else:
if options['image_style'].get('width') and options['image_style'].get('height'):
box_width, box_height = options['image_style']['width'], options['image_style']['height']
images = [[im.resize((box_width, box_height)) for im in im_list] for im_list in images]
elif options['image_style'].get('width') and not options['image_style'].get('height'):
images = [[im.resize((options['image_style']['width'], options['image_style']['width'] * im.size[1] // im.size[0])) for im in im_list] for im_list in images]
box_width, box_height = options['image_style']['width'], max([im.size[1] for im_list in images for im in im_list])
elif not options['image_style'].get('width') and options['image_style'].get('height'):
images = [[im.resize((options['image_style']['height'] * im.size[0] // im.size[1], options['image_style']['height'])) for im in im_list] for im_list in images]
box_width, box_height = max([im.size[0] for im_list in images for im in im_list]), options['image_style']['height']
col_num = options.get('col_num', 4)
row_num = (len(images) - 1) // col_num + 1
col_space = options.get('col_space', 0)
row_space = options.get('row_space', 0)
if options.get('labels'):
font = ImageFont.truetype(os.path.join(globals.staticpath, 'simhei.ttf'), options.get('label_style', {}).get('font_size', 20))
all_chars = set()
max_label_width = 0
for label in options['labels']:
max_label_width = max(max_label_width, ImageDraw.Draw(Image.new('RGB', (0, 0))).textsize(label, font=font)[0])
all_chars |= set(label)
label_height = ImageDraw.Draw(Image.new('RGB', (0, 0))).textsize(''.join(all_chars), font=font)[1]
box_width = max(box_width * len(images[0]), max_label_width) // len(images[0])
back_image = Image.new('RGB', (
col_num * len(images[0]) * box_width + (col_num - 1) * col_space,
(box_height + label_height) * row_num + row_num * row_space,
), (255, 255, 255))
draw = ImageDraw.Draw(back_image)
labels = options['labels']
for r in range(row_num):
for c in range(col_num):
if r * col_num + c >= len(images):
break
image_group = images[r * col_num + c]
for i, im in enumerate(image_group):
back_image.paste(im, (
(len(image_group) * c + i) * box_width + (box_width - im.size[0]) // 2 + col_space * c,
r * (box_height + label_height + row_space)
))
sz = draw.textsize(labels[r * col_num + c], font=font)
draw.text((
len(image_group) * c * box_width + (len(image_group) * box_width - sz[0]) // 2 + c * col_space, r * (box_height + label_height + row_space) + box_height
), labels[r * col_num + c], fill=(0, 0, 0), font=font)
else:
back_image = Image.new('RGB', (
col_num * len(images[0]) * box_width + (col_num - 1) * col_space,
box_height * row_num + (row_num - 1) * row_space
), (255, 255, 255))
draw = ImageDraw.Draw(back_image)
for r in range(row_num):
for c in range(col_num):
if r * col_num + c >= len(images):
break
image_group = images[r * col_num + c]
for i, im in enumerate(image_group):
back_image.paste(im, (
(len(image_group) * c + i) * box_width + (box_width - im.size[0]) // 2 + c * col_space * int(i == len(image_group) - 1),
r * (box_height + row_space)
))
return ImageAsset.image_raw(back_image)
def open_nontransparent(filename):
try:
image = Image.open(filename).convert('RGBA')
new_image = Image.new('RGBA', image.size, (255, 255, 255, 255))
new_image.paste(image, (0, 0), image)
return new_image
except:
pass
def manual():
raw = ImageAsset.get('manual')
if raw:
return raw
row_space = 20
col_space = 50
font = ImageFont.truetype(os.path.join(globals.staticpath, 'simhei.ttf'), 20)
lines = [
'ycm/有车吗: 查询车牌(来源: https://bandoristation.com/)',
'底图目录: 查询底图目录(是的,不仅功能一样,连图都盗过来了,虽然还没更新。底图31,Tsugu!.jpg)',
'底图+数字: 切换底图',
'xx.jpg: 图片合成',
'',
'以下查询功能数据来源Bestdori',
'查卡 [稀有度] [颜色] [人物] [乐团] [技能类型]: 按条件筛选符合要求的卡片,同类条件取并集,不同类条件取交集。例如: 查卡 4x pure ksm 分',
'查卡+数字: 按id查询单卡信息',
'无框+数字: 按id查询单卡无框卡面',
'活动列表 [活动类型]: 按条件筛选符合要求的活动,活动类型包括“一般活动”,“竞演LIVE”或“对邦”,“挑战LIVE”或“CP”,“LIVE试炼”,“任务LIVE”',
'活动+数字 [服务器]: 按id查询单活动信息,默认国服,可选“日服”,“国际服”,“台服”,“国服”,“韩服”',
'卡池列表 [卡池类型]: 按条件筛选符合要求的卡池,卡池类型包括“常驻”或“无期限”,“限时”或“限定”或“期间限定”,“特殊”(该条件慎加,因为没啥特别的卡池),“必4”',
'卡池+数字 [服务器]: 按id查询单卡池信息,默认国服,可选“日服”,“国际服”,“台服”,“国服”,“韩服”',
'',
'以下查询功能数据来源bilibili开放的豹跳接口,慎用',
'查抽卡名字 名字: 查用户名称包含该名字的玩家出的4星',
]
line_height = ImageDraw.Draw(Image.new('RGB', (0, 0))).textsize('底图目录', font=font)[1]
image = Image.new('RGB', (ImageDraw.Draw(Image.new('RGB', (0, 0))).textsize(max(lines, key=lambda line: len(line)),
font=font)[0] + 2 * col_space, (line_height + row_space) * len(lines)), (255, 255, 255))
draw = ImageDraw.Draw(image)
line_pos = row_space
for i, line in enumerate(lines):
sz = draw.textsize(line, font=font)
draw.text((col_space, line_pos), line, fill=(0, 0, 0), font=font)
line_pos += sz[1] + row_space
return ImageAsset.image_raw(image, 'manual')
def compress(infile, mb=None, step=10, quality=80, isabs=False):
if not isabs:
absinfile = os.path.join(globals.datapath, 'image', infile)
else:
absinfile = infile
outfile = infile[infile.rfind('/') + 1:infile.rfind('.')] + '-c.jpg'
absoutfile = os.path.join(globals.datapath, 'image', outfile)
if os.path.exists(absoutfile):
return outfile
if mb is None:
im = Image.open(absinfile)
im = im.convert('RGB')
im.save(absoutfile, quality=quality)
return absoutfile
o_size = os.path.getsize(absinfile) / 1024
if o_size <= mb:
return infile
while o_size > mb:
im = Image.open(absinfile)
im = im.convert('RGB')
im.save(absoutfile, quality=quality)
if quality - step < 0:
break
quality -= step
o_size = os.path.getsize(absoutfile) / 1024
return absoutfile
| 1.914063 | 2 |
dimensionality_reduction/LDA.py | jonathangouvea/PatternRecognition | 0 | 12799984 | import numpy as np
from numpy import linalg as LA
class LDA():
def __init__(self, dim = 2):
self.dim = dim
self.matrixTransf = None
def fit_transform(self, X, labels):
positive = []
negative = []
for i in range(len(labels)):
if labels[i] == 1:
positive.append(X[i])
else:
negative.append(X[i])
positive = np.array(positive)
negative = np.array(negative)
media_pos = np.mean(positive, axis = 0)
media_neg = np.mean(negative, axis = 0)
cov_pos = np.cov(positive.T)
cov_neg = np.cov(negative.T)
SW = cov_pos + cov_neg
sub = (media_pos - media_neg)
print(SW.shape)
print(sub.shape)
wLDA = np.matmul(LA.pinv(SW), sub)
self.matrixTransf = np.array(wLDA)
print("Matriz de transformação")
print(self.matrixTransf)
res = np.matmul(X, self.matrixTransf.T)
return res
| 2.28125 | 2 |
introducing-python-answers/chapter10.py | DailyYu/python-study | 1 | 12799992 | <reponame>DailyYu/python-study
# Q1
from datetime import date
now = date.today()
now_string = now.isoformat()
with open('today.txt', 'w') as file:
print(now, file=file)
# Q2
today_string = None
with open('today.txt') as file:
today_string = file.read()
print(today_string)
# Q3
from datetime import datetime
format = '%Y-%m-%d\n'
print(datetime.strptime(today_string, format))
# Q4
import os
print(os.listdir('.'))
# Q5
print(os.listdir('..'))
# Q6
import multiprocessing
def print_current_time(seconds):
from time import sleep
sleep(seconds)
print(f'Wait for {seconds} seconds, Current time is {datetime.today().time()}')
import random
# 由于Windows下multiprocess会执行整个代码块,所以会引起循环创建进程的问题
# 这需要下面的代码来避免
if __name__ == '__main__':
for n in range(3):
seconds = random.random()
process = multiprocessing.Process(target=print_current_time, args=(seconds,))
process.start()
# Q7
my_birthday = date(1993, 8, 13)
print(my_birthday)
# Q8
# 星期从零开始计数
print(my_birthday.weekday())
# 星期从一开始计数
print(my_birthday.isoweekday())
# Q9
from datetime import timedelta
ten_thousand_day_after_my_birthday = my_birthday + timedelta(days=10000)
print(ten_thousand_day_after_my_birthday)
| 2.25 | 2 |