blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
816403dc9d93b4276bffb4d8e162c51ea13231b8 | 0be45470f15f12872d81f98c72e3b8528100ad27 | /pointCollection/tools/RDE.py | 563e437d633d241e661519931619d6cf3b3cf410 | [
"MIT"
] | permissive | SmithB/pointCollection | 19a43bb19b1753542f693645fe4f537c2dbf7af9 | 026a60eb7e2fbe5333c7a30bd8299dda44c5878e | refs/heads/master | 2023-08-23T18:56:49.943934 | 2023-08-18T16:41:12 | 2023-08-18T16:41:12 | 220,045,965 | 4 | 8 | MIT | 2023-07-03T15:47:58 | 2019-11-06T16:51:04 | Jupyter Notebook | UTF-8 | Python | false | false | 584 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 23 16:31:30 2017
@author: ben
"""
import numpy as np
def RDE(x):
xs=x.copy()
xs=np.isfinite(xs) # this changes xs from values to a boolean
if np.sum(xs)<2 :
return np.nan
ind=np.arange(0.5, np.sum(xs))
LH=np.interp(np.array([0.16, 0.84])*np.sum(xs), ind, np.sort(x[xs]))
#print('LH =',LH)
return (LH[1]-LH[0])/2. # trying to get some kind of a width of the data ~variance
#import scipy.stats as stats
#def RDE(x):
# return (stats.scoreatpercentile(x, 84 )-stats.scoreatpercentile(x, 16))/2. | [
"[email protected]"
] | |
480127fceb33213f368de855a806d8bd709a0909 | 2136c75df909b40c2667679b2ba4740d8b50a299 | /test.py | 86957845b7c17d62c3ce76575f6a1f07d42c824f | [] | no_license | jianglikun/preMLI | 19e91935266539afa15cb86a3e62608840c775d1 | 54b48fba7adf7fb232ac1a2cec883c596d49d3a3 | refs/heads/main | 2023-09-04T17:32:13.657101 | 2021-11-10T08:27:42 | 2021-11-10T08:27:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,748 | py | import os
os.environ["CUDA_VISIBLE_DEVICES"]="4"
from model import get_model
from model import get_model_max
from model import get_model_C_mul
from model import get_model_C_sub
import tensorflow as tf
import numpy as np
from sklearn.metrics import roc_auc_score,average_precision_score, f1_score
from sklearn.metrics import accuracy_score,recall_score
def stat(y_label,y_pred):
# print('y_label=',y_label)
# print('y_pred=',y_pred)
threshold = 0.5
auc = roc_auc_score(y_label, y_pred)
aupr = average_precision_score(y_label, y_pred)
for i in range(len(y_pred)):
if y_pred[i][0] >= threshold:
y_pred[i][0] = 1
if y_pred[i][0] < threshold:
y_pred[i][0] = 0
TP = 0
TN = 0
FP = 0
FN = 0
for i in range(len(y_pred)):
if y_pred[i][0] == 0 and y_label[i] == 0:
TN = TN + 1
if y_pred[i][0] == 1 and y_label[i] == 1:
TP = TP + 1
if y_pred[i][0] == 0 and y_label[i] == 1:
FN = FN + 1
if y_pred[i][0] == 1 and y_label[i] == 0:
FP = FP + 1
specificity = TN/(TN+FP)
recall = recall_score(y_label,y_pred)
acc = accuracy_score(y_label,y_pred)
f1 = f1_score(y_label, y_pred)
acc = round(acc, 4)
auc = round(auc,4)
aupr = round(aupr, 4)
f1 = round(f1,4)
return acc,auc,aupr,f1,recall,specificity
##########################
datatype = 2021
kmer = 3
##########################
for m in range(100):
model=None
model=get_model()
model.load_weights('./model/3mer2021/Solanum lycopersicumModel%s.h5'%m)
if datatype == 2020:
names = ['Arabidopsis lyrata','Solanum lycopersicum']
elif datatype == 2021:
names = ['aly','mtr','stu','bdi']
for name in names:
Data_dir='/home/yxy/Project/002/processData/3mer/'
if datatype == 2020:
test=np.load(Data_dir+'5mer%s_test.npz'%name)
elif datatype == 2021:
test=np.load(Data_dir+'%s%stest2021.npz'%(name,kmer))
X_mi_tes,X_lnc_tes,y_tes=test['X_mi_tes'],test['X_lnc_tes'],test['y_tes']
print("****************Testing %s specific model on %s cell line****************"%(m,name))
y_pred = model.predict([X_mi_tes,X_lnc_tes])
auc = roc_auc_score(y_tes, y_pred)
aupr = average_precision_score(y_tes, y_pred)
f1 = f1_score(y_tes, np.round(y_pred.reshape(-1)))
print("AUC : ", auc)
print("AUPR : ", aupr)
print("f1_score", f1)
acc,auc,aupr,f1,recall,specificity = stat(y_tes, y_pred)
print("ACC : ", acc,"auc : ", auc,"aupr :" , aupr,"f1 : ", f1,"recall : ",recall,"specificity : ",specificity)
| [
"[email protected]"
] | |
055aabb9ef9a32291d0e6edb97d8a581f7df3962 | 2509936d814fb6cdd283c2549c518c8dfad9450c | /api/staticdata/regimes/migrations/0010_merge_20221214_1035.py | 81daedc733cfa1f2e70025a26480bb78e0acf8fd | [
"MIT"
] | permissive | uktrade/lite-api | 19f829119fa96de3f4862eb233845508b0fef7eb | b35792fc981220285ed9a7b3659aba460f1b207a | refs/heads/dev | 2023-08-25T10:11:17.594001 | 2023-08-24T14:24:43 | 2023-08-24T14:24:43 | 172,914,199 | 4 | 3 | MIT | 2023-09-14T17:36:47 | 2019-02-27T12:46:22 | Python | UTF-8 | Python | false | false | 277 | py | # Generated by Django 3.2.16 on 2022-12-14 10:35
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("regimes", "0009_update_cwc_shortened_names"),
("regimes", "0009_update_nsg_regimes"),
]
operations = []
| [
"[email protected]"
] | |
a0a4cd2c1fb76ac7c81e08fdfe66c737297cdeff | d2f0b661b1bf9511ffecba97ed41b860c511ff29 | /example/migrations/0007_artproject_description.py | 8dec01243a56fa8ef98b10afa649634b9e1bf223 | [
"BSD-2-Clause"
] | permissive | private-forks/django-rest-framework-json-api | d5ed6e8f631dc04a18a36599691373b9a4608ace | ae98a93c94f1591fcfc3675106fe37fafdb9e510 | refs/heads/master | 2022-11-08T05:55:01.817631 | 2020-06-28T09:00:51 | 2020-06-28T22:44:58 | 275,545,319 | 1 | 0 | BSD-2-Clause | 2020-06-28T08:54:17 | 2020-06-28T08:54:17 | null | UTF-8 | Python | false | false | 405 | py | # Generated by Django 2.2.2 on 2019-06-07 06:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('example', '0006_auto_20181228_0752'),
]
operations = [
migrations.AddField(
model_name='artproject',
name='description',
field=models.CharField(max_length=100, null=True),
),
]
| [
"[email protected]"
] | |
261d9931d6de9ca4963ef41a7e663cbf564d51d7 | c7cd5800ec5b64a943960a0fcac499ef7b9ed00e | /cibblbibbl/tournament/export/awards/bbcode.py | 9e2f35998fe2b6622dd0e2fe17e829615b9ce24f | [] | no_license | FUMBBLPlus/cibblbibbl | cadd5dc33d4f15bc70d600abf83a475c79f9caa9 | 77953afd6fb4f2460c30c9d41f5dbe1dc5654daa | refs/heads/master | 2021-07-12T12:56:32.947340 | 2020-09-29T21:43:59 | 2020-09-29T21:43:59 | 205,587,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,111 | py | import collections
import cibblbibbl
from cibblbibbl import bbcode
from .plaintext import kreasontrans, nreasontrans
def _diedstr(dRPP, killerId, reason):
if killerId:
killer = cibblbibbl.player.player(killerId)
oppoTe = dRPP[killer]["team"]
return (
f'{kreasontrans.get(reason, reason)}'
f'{bbcode.player(killer)} ({_teamstr(killer, oppoTe)})'
)
else:
return f', {nreasontrans.get(reason, reason)}'
def _playersseq(T, source_playersseq):
StarPlayer = cibblbibbl.player.StarPlayer
players = []
for Pl in sorted(source_playersseq, key=lambda Pl: Pl.name):
if Pl.achievements:
prestige = sum(
A.prestige(T.season, maxtournament=T)
for A in Pl.achievements
)
if prestige or isinstance(Pl, StarPlayer):
players.append([Pl, prestige])
elif isinstance(Pl, StarPlayer):
players.append([Pl, 0])
return players
def _teamstr(player, team):
if isinstance(player, cibblbibbl.player.StarPlayer):
return "Star Player"
elif isinstance(player, cibblbibbl.player.MercenaryPlayer):
return "Mercenary"
else:
return bbcode.team(team)
def bbcode_section(s):
return bbcode.size(bbcode.b(bbcode.i(s)), 12)
def export(T):
cls_StarPlayer = cibblbibbl.player.StarPlayer
cls_RaisedDeadPlayer = cibblbibbl.player.RaisedDeadPlayer
dTAv1 = T.teamachievementvalues(False, False, False, False)
dPAv1 = T.playerachievementvalues()
dRPP = T.rawplayerperformances()
dPP = T.playerperformances()
achievements = sorted(T.achievements)
d_achievements = collections.defaultdict(dict)
for A in achievements:
d_achievements[A.clskey()][A.subject] = A
prev_tournament = {}
for Te in T.teams():
prev_tournament[Te] = Te.prev_tournament(T)
parts = []
parts.append("[block=center]")
nrsuffix = {1: "st", 2: "nd", 3: "rd"}
for d in reversed(T.standings()):
nr = d["nr"]
if nr is None:
continue
Te = d["team"]
nrstr = f'{nr}{nrsuffix.get(nr, "th")} place: '
nrstr = bbcode.i(nrstr)
part = nrstr + bbcode.team(Te)
if nr == 1:
part = bbcode.size(bbcode.b(part), 12)
parts.append(part + "\n")
tp_keys = ("tp_admin", "tp_match", "tp_standings")
dtp = {k: 0 for k in tp_keys}
for k in dtp:
A = d_achievements.get(k, {}).get(Te)
if A:
dtp[k] = A.prestige(T.season, maxtournament=T)
prestige = sum(dtp.values())
if T.friendly == "no":
preststr = f'Prestige Points Earned: {prestige}'
dTTAv1 = dTAv1[Te]
dTPAv1 = dPAv1[Te]
T0 = prev_tournament[Te]
if T0:
dPAv0 = T0.playerachievementvalues()
dTPAv0 = dPAv0[Te]
else:
dTPAv0 = 0
achiev = dTTAv1 + dTPAv1 - dTPAv0
if achiev:
sign = ("+" if -1 < achiev else "")
preststr += f' (and {sign}{achiev} Achiev.)'
parts.append(preststr + "\n")
parts.append("\n")
parts.append("[/block]")
parts.append("\n")
As = sorted(
A for A in T.achievements
if not A.clskey().startswith("tp")
and A.get("status", "proposed") in {"awarded", "proposed"}
and not isinstance(A.subject, cls_RaisedDeadPlayer)
)
if As:
parts.append(bbcode_section("Achievements") + "\n")
parts.append(bbcode.hr() + "\n")
items = []
prev_clskey = None
for A in As:
item = A.export_bbcode()
if item is None:
continue
clskey = A.clskey()
if clskey != prev_clskey:
if items:
parts.append(bbcode.list_(items) + "")
parts.append("\n")
parts.append("[block=center]")
logo_url = A.get("logo_url")
if logo_url:
parts.append(bbcode.img(logo_url) + "\n")
parts.append(bbcode.b(bbcode.i(A["name"])) + "\n")
parts.append("\n")
descr = bbcode.i(A["description"])
parts.append(
"[block=automargin width=67%]"
+ descr
+ "[/block]"
)
parts.append("[/block]")
prev_clskey = clskey
items = []
items.append(item)
else:
if items:
parts.append(bbcode.list_(items) + "")
deadplayers = _playersseq(T, T.deadplayers())
transferred = T.transferredplayers()
trplayers = _playersseq(T, transferred)
retiredplayers = T.retiredplayers(dPP=dPP)
retplayers = _playersseq(T, retiredplayers)
if deadplayers or trplayers or retplayers:
if As:
parts.append("\n")
parts.append("\n")
stitle = (
"Players with achievements"
" that changed their forms and/or teams"
)
parts.append(bbcode_section(stitle) + "\n")
parts.append(bbcode.hr() + "\n")
if deadplayers:
parts.append(
bbcode.center(
bbcode.img("/i/607211") + "\n"
+ bbcode.b(bbcode.i("Died"))
)
)
items = []
for Pl, prestige in deadplayers:
d = dPP[Pl]
matchId, half, turn, reason, killerId = d["dead"]
Ma = cibblbibbl.match.Match(matchId)
Te = d["team"]
s = ""
s += f'{bbcode.player(Pl)} ({_teamstr(Pl, Te)})'
if prestige:
s += f' ({prestige} Achiev.)'
s += _diedstr(dRPP, killerId, reason)
s += f' [{bbcode.match(Ma, "match")}]'
items.append(s)
parts.append(bbcode.list_(items) + "")
if trplayers:
if deadplayers:
parts.append("\n")
parts.append(
bbcode.center(
bbcode.img("/i/607210") + "\n"
+ bbcode.b(bbcode.i(
"Transferred and/or Transformed"
))
)
)
items = []
for Pl, prestige in trplayers:
matchId, half, turn, reason, killerId = transferred[Pl]
Ma = cibblbibbl.match.Match(matchId)
teams = Ma.teams
Te = dRPP[Pl]["team"]
s = ""
s += f'{bbcode.player(Pl)} ({_teamstr(Pl, Te)})'
if prestige:
s += f' ({prestige} Achiev.)'
s += _diedstr(dRPP, killerId, reason)
nextsparts = []
for Pl1 in Pl.nexts:
name = bbcode.player(Pl1)
if isinstance(Pl1, cls_RaisedDeadPlayer):
if Pl1.next is not None:
Pl1 = Pl1.next
name = bbcode.player(Pl1)
else:
plparts = str(Pl1).split()
plparts[0] = Pl1.prevreason
name = " ".join(plparts)
try:
nextTe = dRPP[Pl1]["team"]
except KeyError:
nextTe = Pl1.team
nextsparts.append(
f'to {bbcode.team(nextTe)}'
f' as {name}'
)
s += f', joined {" and ".join(nextsparts)}'
items.append(s)
parts.append(bbcode.list_(items))
if retplayers:
if deadplayers or trplayers:
parts.append("\n")
parts.append(
bbcode.center(
bbcode.img("/i/607209") + "\n"
+ bbcode.b(bbcode.i("Retired"))
)
)
items = []
for Pl, prestige in retplayers:
d = retiredplayers[Pl]
Te = d["team"]
s = f'{bbcode.player(Pl)} ({bbcode.team(Te)})'
s += f' ({prestige} Achiev.)'
items.append(s)
parts.append(bbcode.list_(items))
s = "".join(parts)
return s
| [
"[email protected]"
] | |
dfe9ebf962360f745fb6ad57ec1438bac7c4f29a | 3c65b9732508a99a6da712e759581f05c91673bb | /data_augmentation_test.py | 2f919b40fe5bae698ea4704b11145a1527556a41 | [] | no_license | ymguo/DataAugmentation | 2d7329f4ecfcf33c7783b8b65d86385363630b16 | a271b6d5adf8fd31a2d5619b4fbe6bc2df9b338a | refs/heads/master | 2022-01-25T00:06:14.793651 | 2019-07-21T04:39:06 | 2019-07-21T04:39:06 | 198,011,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,216 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 5 20:46:03 2019
@author: ymguo
Combine image crop, color shift, rotation and perspective transform together
to complete a data augmentation script.
"""
import skimage.io as io
import numpy as np
import cv2
import random
import os
import glob
from matplotlib import pyplot as plt
#from skimage import data_dir
#from PIL import Image
def data_augmentation(f):
# img = io.imread(f) # 依次读取rgb图片
img = f
# image crop
img_crop = img[0:300, 0:450]
# color shift
def random_light_color(img):
# brightness
B, G, R = cv2.split(img)
b_rand = random.randint(-50, 50)
if b_rand == 0:
pass
elif b_rand > 0:
lim = 255 - b_rand
B[B > lim] = 255 # 防止超过255 越界
B[B <= lim] = (b_rand + B[B <= lim]).astype(img.dtype)
elif b_rand < 0:
lim = 0 - b_rand
B[B < lim] = 0 # 防止小于0 越界
B[B >= lim] = (b_rand + B[B >= lim]).astype(img.dtype)
g_rand = random.randint(-50, 50)
if g_rand == 0:
pass
elif g_rand > 0:
lim = 255 - g_rand
G[G > lim] = 255
G[G <= lim] = (g_rand + G[G <= lim]).astype(img.dtype)
elif g_rand < 0:
lim = 0 - g_rand
G[G < lim] = 0
G[G >= lim] = (g_rand + G[G >= lim]).astype(img.dtype)
r_rand = random.randint(-50, 50)
if r_rand == 0:
pass
elif r_rand > 0:
lim = 255 - r_rand
R[R > lim] = 255
R[R <= lim] = (r_rand + R[R <= lim]).astype(img.dtype)
elif r_rand < 0:
lim = 0 - r_rand
R[R < lim] = 0
R[R >= lim] = (r_rand + R[R >= lim]).astype(img.dtype)
img_merge = cv2.merge((B, G, R)) # 融合
# img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR) ?
return img_merge
img_color_shift = random_light_color(img_crop)
# rotation
M = cv2.getRotationMatrix2D((img_color_shift.shape[1] / 2, img_color_shift.shape[0] / 2), 30, 0.85) # center, angle, scale
img_rotate = cv2.warpAffine(img_color_shift, M, (img_color_shift.shape[1], img_color_shift.shape[0])) # warpAffine函数:把旋转矩阵作用到图形上
# perspective transform
def random_warp(img, row, col):
height, width, channels = img.shape
# warp:
random_margin = 60
x1 = random.randint(-random_margin, random_margin)
y1 = random.randint(-random_margin, random_margin)
x2 = random.randint(width - random_margin - 1, width - 1)
y2 = random.randint(-random_margin, random_margin)
x3 = random.randint(width - random_margin - 1, width - 1)
y3 = random.randint(height - random_margin - 1, height - 1)
x4 = random.randint(-random_margin, random_margin)
y4 = random.randint(height - random_margin - 1, height - 1)
dx1 = random.randint(-random_margin, random_margin)
dy1 = random.randint(-random_margin, random_margin)
dx2 = random.randint(width - random_margin - 1, width - 1)
dy2 = random.randint(-random_margin, random_margin)
dx3 = random.randint(width - random_margin - 1, width - 1)
dy3 = random.randint(height - random_margin - 1, height - 1)
dx4 = random.randint(-random_margin, random_margin)
dy4 = random.randint(height - random_margin - 1, height - 1)
pts1 = np.float32([[x1, y1], [x2, y2], [x3, y3], [x4, y4]])
pts2 = np.float32([[dx1, dy1], [dx2, dy2], [dx3, dy3], [dx4, dy4]])
M_warp = cv2.getPerspectiveTransform(pts1, pts2)
img_warp = cv2.warpPerspective(img, M_warp, (width, height))
return img_warp
img_warp = random_warp(img_rotate, img_rotate.shape[0], img_rotate.shape[1])
return img_warp
# 获取待处理文件夹下的所有图片
# glob.glob 返回所有匹配的文件路径列表,只有一个参数pathname。
paths = glob.glob(os.path.join('/Users/ymguo/CVsummer/jpg_before/','*.jpg'))
paths.sort() # 排序
print(paths)
i = 0
for path in paths:
im = cv2.imread(path) # 依次读取图片
# pic_after = []
pic_after = data_augmentation(im)
print(i)
plt.imshow(pic_after)
plt.show()
# 依次存储处理后并重命名的图片到新的文件夹下
io.imsave("/Users/ymguo/CVsummer/pic_after/"+np.str(i)+'.jpg',pic_after)
i += 1
#print(pic_after.dtype)
#print(pic_after.shape)
'''一些不太正确的尝试'''
#def file_name(file_dir):
# for root, dirs, files in os.walk(file_dir):
# count = 1
# #当前文件夹所有文件
# for i in files:
# im=Image.open(i)
# out=data_augmentation(im)
# out.save('/Users/ymguo/CVsummer/image/'+str(count)+'.png','PNG')
# count+=1
# print(i)
#
#file_name("/Users/ymguo/CVsummer/coll_after/")#当前文件夹
#file_name('./')#当前文件夹
#srcImgFolder = "/Users/ymguo/CVsummer/coll_after"
#def data(dir_proc):
# for file in os.listdir(dir_proc):
# fullFile = os.path.join(dir_proc, file)
# if os.path.isdir(fullFile):
# data_augmentation(fullFile)
#
#
#if __name__ == "__main__":
# data(srcImgFolder)
#str=data_dir+'/*.png'
#coll_before = io.ImageCollection(str)
#coll_after = io.ImageCollection(str,load_func=data_augmentation)
# coll = io.ImageCollection(str)
# skimage.io.ImageCollection(load_pattern,load_func=None)
# 回调函数默认为imread(),即批量读取图片。
#print(len(coll_after)) # 处理后的图片数量
#print(coll_before[1].shape)
#
#plt.imshow(coll_before[1])
#plt.show()
#plt.imshow(coll_after[1])
#plt.show()
#io.imshow(coll_before[10])
#io.imshow(coll_after[10])
#cv2.imshow('raw pic', coll_before[10])
#cv2.imshow('pic after data augmentation', coll_after[10])
#key = cv2.waitKey(0)
#if key == 27:
# cv2.destroyAllWindows()
# 循环保存c处理后的图片
#for i in range(len(coll_after)):
# io.imsave("/Users/ymguo/CVsummer/coll_after/"+np.str(i)+'.png',coll_after[i])
| [
"[email protected]"
] | |
231a0e1fcc8967f9072dfe360b036cfcdba74643 | c105797a5b6f5aca0b892ccdadbb2697f80fb3ab | /python_base/base7/base7_3.py | 7a29be88d6785d292d6f115f65d970948129502d | [] | no_license | jj1165922611/SET_hogwarts | 6f987c4672bac88b021069c2f947ab5030c84982 | fbc8d7363af0a4ac732d603e2bead51c91b3f1f7 | refs/heads/master | 2023-01-31T19:41:27.525245 | 2020-12-15T13:43:45 | 2020-12-15T13:43:45 | 258,734,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,144 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2020-07-21
# @Author : Joey Jiang
# @File : base7_3.py
# @Software : PyCharm
# @Description: python控制流语法
# 1.1、分支结构
import random
a = 0
if a == 0:
print("a=0")
else:
print("a!=0")
# 1.2、多重分支
a = 1
if a == 1:
print("a=1")
elif a == 2:
print("a=2")
elif a == 3:
print("a==3")
else:
print("a!=1、2、3")
# 1.3、练习
# 分别使用分支嵌套以及多重分支去实现分段函数求值
# 3x - 5 (x>1)
# f(x)= x + 2 (-1<=x<=1)
# 5x + 3(x<-1)
# 1.3.1分支嵌套
x = -2
if x > 1:
print(3 * x - 5)
else:
if x >= -1:
print(x + 2)
else:
print(5 * x + 3)
# 1.3.2多重分支
if x > 1:
print(3 * x - 5)
elif x >= -1:
print(x + 2)
else:
print(5 * x + 3)
# 2.1练习
# 计算1~100的和
sum1 = 0
for i in range(1, 101):
sum1 = sum1 + i
print(sum1)
# 2.2练习
# 加入分支结构实现1~100之间偶数的求和
sum2 = 0
for i in range(1, 101):
if i % 2 == 0:
sum2 = sum2 + i
print(sum2)
# 2.3练习
# 使用python实现1~100之间偶数求和
sum3 = 0
for i in range(2, 101):
if i % 2 == 0:
sum3 = sum3 + i
print(sum3)
# 3、While循环
# 3.1、While Else
while_a = 1
while while_a == 1:
print("while_a=1")
while_a = while_a + 1
else:
print("while_a!=1")
print(while_a)
# 3.2、简单语句组
flag = 10
while flag == 10:
flag = flag + 1
else:
print(flag)
# 4、break语句
for i in range(4):
if i == 2:
break
print("i=", i)
# 5、continue语句
for j in range(4):
if j == 2:
continue
print("j=", j)
# 6、练习
"""
猜数字游戏,计算机出一个1~100之间的随机数由人来猜,
计算机根据人猜的数字分别给出提示大一点/小一点/猜对了
"""
guess_number = random.randint(1, 100)
print(guess_number)
while True:
number = int(input("请输入一个1~100之间的整数>"))
if number == guess_number:
print("猜对了")
break
elif number > guess_number:
print("大一点")
else:
print("小一点")
| [
"[email protected]"
] | |
7023f012b6c5a43944d35e548688f1490ce69930 | e5579c01730cbea79f9987173abacd691e09bdb0 | /lesson2_2_step8.py | f30d9ec86cba3690d38947c523b30b31e1fa220a | [] | no_license | FerrariHD/StepikSeleniumPython | dd8a29c00a30f39269b89378e77624fb1ea0f993 | c0282ae31c41453f58a2945bd762986d0289f78d | refs/heads/master | 2022-12-16T20:22:53.574099 | 2020-09-18T18:42:27 | 2020-09-18T18:42:27 | 295,847,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | from selenium import webdriver
import time
import math
import os
try:
link = "http://suninjuly.github.io/file_input.html"
browser = webdriver.Chrome()
browser.get(link)
input1 = browser.find_element_by_name("firstname")
input1.send_keys("test")
input2 = browser.find_element_by_name("lastname")
input2.send_keys("test")
input3 = browser.find_element_by_name("email")
input3.send_keys("test")
fileButton = browser.find_element_by_id("file")
current_dir = os.path.abspath(os.path.dirname(__file__))
file_path = os.path.join(current_dir, 'answer.txt')
fileButton.send_keys(file_path)
button = browser.find_element_by_tag_name("button")
button.click()
finally:
# ожидание чтобы визуально оценить результаты прохождения скрипта
time.sleep(10)
# закрываем браузер после всех манипуляций
browser.quit()
file_path = os.path.join(current_dir, 'output.txt') | [
"[email protected]"
] | |
c149958de91bc27cede1a4a1969a5214a02e1a34 | 6bdb39e5c0a5342e4f9bbf7f3d94b274bc377a83 | /manage.py | 0f2c9b1eabfb475b6fa8289dec7d46236bc189a1 | [] | no_license | callprog/CallProgram | 6204d5e86e522afdd4369a9c86222e1236ae1b88 | a5d8d09f22484b9328cd32e141c92781726f4dfa | refs/heads/master | 2020-03-20T01:10:28.539789 | 2018-06-12T13:33:18 | 2018-06-12T13:33:18 | 137,068,636 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CallProgramNG.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
2051813217f0f82765b5c450ac3ffdf946dae9d5 | c3a892c113d9c1feefb0c2df6696f49912cc42b9 | /src/eposfederator/libs/base/schema.py | 7359b6140fbb68e37df4732994acbc5aeb869088 | [
"MIT"
] | permissive | mlmarius/eposfederatorwebapi | 2b89eaf2d7fd29d9714fea3331ad7bac5794c795 | 471e56a641ed53edd0cd9ffa8ab7792b9777507a | refs/heads/master | 2020-03-25T20:38:49.034655 | 2019-05-07T08:28:39 | 2019-05-07T08:28:39 | 144,139,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | import marshmallow
class Schema(marshmallow.Schema):
class Meta(object):
strict = True
dateformat = "%Y-%m-%dT%H:%M:%S"
| [
"[email protected]"
] | |
79848a0117879783d1f2f0c37b6a8586c18147c6 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/IPV6-TCP-MIB.py | ae7c821868888b0850cd5394fcb2bb61fbdbaeb3 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 5,095 | py | #
# PySNMP MIB module IPV6-TCP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/IPV6-TCP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:45:44 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint")
Ipv6Address, Ipv6IfIndexOrZero = mibBuilder.importSymbols("IPV6-TC", "Ipv6Address", "Ipv6IfIndexOrZero")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, experimental, ObjectIdentity, Gauge32, Counter64, Counter32, Bits, NotificationType, IpAddress, ModuleIdentity, Integer32, iso, TimeTicks, Unsigned32, mib_2, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "experimental", "ObjectIdentity", "Gauge32", "Counter64", "Counter32", "Bits", "NotificationType", "IpAddress", "ModuleIdentity", "Integer32", "iso", "TimeTicks", "Unsigned32", "mib-2", "MibIdentifier")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
ipv6TcpMIB = ModuleIdentity((1, 3, 6, 1, 3, 86))
ipv6TcpMIB.setRevisions(('2017-02-22 00:00', '1998-01-29 00:00',))
if mibBuilder.loadTexts: ipv6TcpMIB.setLastUpdated('201702220000Z')
if mibBuilder.loadTexts: ipv6TcpMIB.setOrganization('IETF IPv6 MIB Working Group')
tcp = MibIdentifier((1, 3, 6, 1, 2, 1, 6))
ipv6TcpConnTable = MibTable((1, 3, 6, 1, 2, 1, 6, 16), )
if mibBuilder.loadTexts: ipv6TcpConnTable.setStatus('obsolete')
ipv6TcpConnEntry = MibTableRow((1, 3, 6, 1, 2, 1, 6, 16, 1), ).setIndexNames((0, "IPV6-TCP-MIB", "ipv6TcpConnLocalAddress"), (0, "IPV6-TCP-MIB", "ipv6TcpConnLocalPort"), (0, "IPV6-TCP-MIB", "ipv6TcpConnRemAddress"), (0, "IPV6-TCP-MIB", "ipv6TcpConnRemPort"), (0, "IPV6-TCP-MIB", "ipv6TcpConnIfIndex"))
if mibBuilder.loadTexts: ipv6TcpConnEntry.setStatus('obsolete')
ipv6TcpConnLocalAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 6, 16, 1, 1), Ipv6Address())
if mibBuilder.loadTexts: ipv6TcpConnLocalAddress.setStatus('obsolete')
ipv6TcpConnLocalPort = MibTableColumn((1, 3, 6, 1, 2, 1, 6, 16, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)))
if mibBuilder.loadTexts: ipv6TcpConnLocalPort.setStatus('obsolete')
ipv6TcpConnRemAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 6, 16, 1, 3), Ipv6Address())
if mibBuilder.loadTexts: ipv6TcpConnRemAddress.setStatus('obsolete')
ipv6TcpConnRemPort = MibTableColumn((1, 3, 6, 1, 2, 1, 6, 16, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)))
if mibBuilder.loadTexts: ipv6TcpConnRemPort.setStatus('obsolete')
ipv6TcpConnIfIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 6, 16, 1, 5), Ipv6IfIndexOrZero())
if mibBuilder.loadTexts: ipv6TcpConnIfIndex.setStatus('obsolete')
ipv6TcpConnState = MibTableColumn((1, 3, 6, 1, 2, 1, 6, 16, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("closed", 1), ("listen", 2), ("synSent", 3), ("synReceived", 4), ("established", 5), ("finWait1", 6), ("finWait2", 7), ("closeWait", 8), ("lastAck", 9), ("closing", 10), ("timeWait", 11), ("deleteTCB", 12)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipv6TcpConnState.setStatus('obsolete')
ipv6TcpConformance = MibIdentifier((1, 3, 6, 1, 3, 86, 2))
ipv6TcpCompliances = MibIdentifier((1, 3, 6, 1, 3, 86, 2, 1))
ipv6TcpGroups = MibIdentifier((1, 3, 6, 1, 3, 86, 2, 2))
ipv6TcpCompliance = ModuleCompliance((1, 3, 6, 1, 3, 86, 2, 1, 1)).setObjects(("IPV6-TCP-MIB", "ipv6TcpGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ipv6TcpCompliance = ipv6TcpCompliance.setStatus('obsolete')
ipv6TcpGroup = ObjectGroup((1, 3, 6, 1, 3, 86, 2, 2, 1)).setObjects(("IPV6-TCP-MIB", "ipv6TcpConnState"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ipv6TcpGroup = ipv6TcpGroup.setStatus('obsolete')
mibBuilder.exportSymbols("IPV6-TCP-MIB", ipv6TcpConnTable=ipv6TcpConnTable, ipv6TcpConnEntry=ipv6TcpConnEntry, ipv6TcpMIB=ipv6TcpMIB, ipv6TcpGroups=ipv6TcpGroups, ipv6TcpConnIfIndex=ipv6TcpConnIfIndex, tcp=tcp, ipv6TcpConnRemPort=ipv6TcpConnRemPort, ipv6TcpConformance=ipv6TcpConformance, PYSNMP_MODULE_ID=ipv6TcpMIB, ipv6TcpConnState=ipv6TcpConnState, ipv6TcpConnRemAddress=ipv6TcpConnRemAddress, ipv6TcpConnLocalPort=ipv6TcpConnLocalPort, ipv6TcpCompliances=ipv6TcpCompliances, ipv6TcpConnLocalAddress=ipv6TcpConnLocalAddress, ipv6TcpCompliance=ipv6TcpCompliance, ipv6TcpGroup=ipv6TcpGroup)
| [
"[email protected]"
] | |
fd7b02271082742e01607763ac277a5e0176c40f | e0d468eb0ff270e7a3df2785916cd6310d39862c | /app/views.py | c81bbc9cbe323b6636bf93ed179fbd3a6d69303d | [] | no_license | angeldsLee/miniblog | 3f1911d86875a53c84800ce317c7eb1ecc3cce60 | d380f2abc53983bf1a24d94c06eee244a2850f09 | refs/heads/master | 2021-01-22T22:50:13.033776 | 2017-04-20T14:55:43 | 2017-04-20T14:55:43 | 85,586,529 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,300 | py | from flask import render_template, flash, redirect, session, url_for, request, g
from flask_login import login_user, logout_user, current_user, login_required
from app import app, db, lm, oid
from .forms import LoginForm, EditForm
from .models import User
from datetime import datetime
@lm.user_loader
def laod_user(id):
return User.query.get(int(id))
@app.before_request
def before_request():
g.user = current_user
if g.user.is_authenticated: # before_request handler will update the time in the database.
g.user.last_seen = datetime.utcnow()
db.session.add(g.user)
db.session.commit()
@app.route('/')
@app.route('/index')
@login_required
def index():
user = g.user
posts = [ # fake array of posts
{
'author': {'nickname': 'John'},
'body': 'Beautiful day in Portland!'
},
{
'author': {'nickname': 'Susan'},
'body': 'The Avengers movie was so cool!'
}
]
return render_template('index.html',
title='home',
user=user,
posts=posts)
@app.route('/login', methods=['GET', 'POST'])
@oid.loginhandler
def login():
if g.user is not None and g.user.is_authenticated: # if a user is already logged in
return redirect(url_for('index'))
form = LoginForm()
# print "form = LoginForm()"
if form.validate_on_submit():
# print "validate_on_submit"
session['remember_me'] = form.remember_me.data
return oid.try_login(form.openid.data, ask_for=['nickname', 'email']) # trigger authentication
# print "not pass validate_on_submit"
return render_template('login.html',
title='Sign In',
form=form,
providers=app.config['OPENID_PROVIDERS'])
@app.route('/edit', methods=['GET', 'POST'])
@login_required
def edit():
form = EditForm(g.user.nickname)
if form.validate_on_submit():
g.user.nickname = form.nickname.data
g.user.about_me = form.about_me.data
db.session.add(g.user)
db.session.commit()
flash('your changes have been saved')
return redirect(url_for('edit'))
else:
form.nickname.data = g.user.nickname
form.about_me.data = g.user.about_me
return render_template('edit.html', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/user/<nickname>')
@login_required
def user(nickname):
user = User.query.filter_by(nickname=nickname).first()
# print "dsli user"
# print user
if user == None:
flash('User %s not found.' % nickname)
return redirect(url_for('index'))
posts = [
{'author' : user, 'body' : 'test post #1'},
{'author' : user, 'body' : 'test post #2'}
]
return render_template('user.html',
user=user,
posts=posts)
@oid.after_login
def after_login(resp):
if resp.email is None or resp.email == "":
flash('Invalid login, please try again.')
return redirect(url_for('login'))
user = User.query.filter_by(email=resp.email).first() # search our database for the email provided
if user is None: # add a new user to our database
nickname = resp.nickname
if nickname is None or nickname == "":
nickname = resp.email.split('@')[0]
nickname = User.make_unique_nickname(nickname)
user = User(nickname=nickname, email=resp.email)
db.session.add(user)
db.session.commit()
remember_me = False
if 'remember_me' in session:
remember_me = session['remember_me']
session.pop('remember_me', None)
login_user(user, remember = remember_me)
# return redirect(url_for('index'))
return redirect(request.args.get('next') or url_for('index'))
# redirect to the next page, or the index page if a next page was not provided in the request
@app.errorhandler(404)
def not_found_error(error):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_error(error):
db.session.rollback()
return render_template('500.html'), 500
| [
"[email protected]"
] | |
42f0deaf250627b10751156d712d786cdc96ee26 | 6bf1b595a7f4d3cbf0995455869d438a7d0e0624 | /lingvo/tasks/milan/score_functions.py | 9c4ce867b372dfed657bec15a96096952923b006 | [
"Apache-2.0"
] | permissive | huaxz1986/lingvo | 889abc82b1bab6f37ba861c41eb480b7e89362c0 | b83984577610423e3b1c6b04ca248cd23f2842f7 | refs/heads/master | 2022-05-15T03:29:56.903688 | 2022-04-02T01:41:25 | 2022-04-02T01:41:25 | 173,536,461 | 1 | 0 | Apache-2.0 | 2019-03-03T05:52:01 | 2019-03-03T05:52:01 | null | UTF-8 | Python | false | false | 1,664 | py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of combination functions for dual-encoder models."""
from lingvo import compat as tf
from lingvo.core import base_layer
class DotProductScoreFunction(base_layer.BaseLayer):
"""Performs dot product combination between two encoded vectors."""
@classmethod
def Params(cls):
p = super().Params()
p.name = 'dot_product_score_function'
return p
def FProp(self, theta, x, y):
"""Computes pair-wise dot product similarity.
Args:
theta: NestedMap of variables belonging to this layer and its children.
x: batch of encoded representations from modality x. A float32 Tensor of
shape [x_batch_size, encoded_dim]
y: batch of encoded representations from modality y. A float32 Tensor of
shape [y_batch_size, encoded_dim]
Returns:
Pairwise dot products. A float32 Tensor with shape
`[x_batch_size, y_batch_size]`.
"""
return tf.matmul(x, y, transpose_b=True)
| [
"[email protected]"
] | |
199bd2a9ea174fb41cb6198e1b4b418d51b38f64 | 6ff44f941c5e6486eae3e26a4e3371b2c6b547c4 | /orchestrator/core/orc_server/orchestrator/migrations/0002_protocol_port.py | 5ad436d0e44e8ee4c019e148288518e9ae44dd69 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | g2-inc/openc2-oif-orchestrator | 819ec4a02dd9c475e1279fc38b53a05e39f021b8 | 85102bb41aa0d558a3fa088e4fd6f51613599ad0 | refs/heads/master | 2020-05-17T09:49:39.800493 | 2020-04-30T19:10:24 | 2020-04-30T19:10:24 | 183,642,877 | 1 | 0 | Apache-2.0 | 2019-04-26T14:27:16 | 2019-04-26T14:27:15 | null | UTF-8 | Python | false | false | 550 | py | # Generated by Django 2.2 on 2019-05-07 14:52
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orchestrator', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='protocol',
name='port',
field=models.IntegerField(default=8080, help_text='Port of the transport', validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(65535)]),
),
]
| [
"[email protected]"
] | |
bea0c9e990f035b353a5425eae9b9e5b8e57f3f9 | 2b92ca59d2c2e0604b70593f88dd33b32b10475e | /sonosweb/views/site.py | b35f25b1d07803984bd0ead3528a2569b71b9033 | [] | no_license | suspendlabs/sonosweb | 3a88f5117a807a91925563d0491274044b208fd1 | 8f84848417153ee569ab2d7fdead51c4547265a6 | refs/heads/master | 2021-06-02T14:40:02.461159 | 2014-07-18T20:52:44 | 2014-07-18T20:52:44 | 21,994,025 | 0 | 0 | null | 2021-03-19T22:34:58 | 2014-07-18T20:49:05 | CSS | UTF-8 | Python | false | false | 869 | py | import os, sys
from flask import Flask, request, redirect, url_for, \
abort, render_template, jsonify, send_from_directory, \
Response, g, Blueprint, current_app
import sonos-web
site = Blueprint('site', __name__)
@site.route('/humans.txt')
def humans():
return send_from_directory(os.path.join(current_app.root_path, 'public'),
'humans.txt', mimetype='text/plain')
@site.route('/robots.txt')
def robots():
return send_from_directory(os.path.join(current_app.root_path, 'public'),
'robots.txt', mimetype='text/plain')
@site.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(current_app.root_path, 'public'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
@site.route('/', defaults={'path': 'index'})
@site.route('/<path:path>')
def index(path):
return render_template('index.html')
| [
"[email protected]"
] | |
f61e00a26f51de4e0150ca445d8bd53bc3045f81 | f97d5227578006a90585297dd7f90f49d7255f49 | /manage.py | a1f0a9a282c3e67bccf38a0b80d11bbca039c6c2 | [] | no_license | vincentomo/loloClinic | 8d68fbe9872eb698a75d3c370ed221892a36b2b5 | 2c018cacea408ba7df76697b00f40d5f2923840d | refs/heads/master | 2020-05-01T14:41:53.907964 | 2019-03-25T11:20:16 | 2019-03-25T11:20:16 | 177,527,316 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'loloAfya.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
351ef3112a8105eea8a02b98a6ff6303a19eee43 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_Integration/trend_LinearTrend/cycle_30/ar_/test_artificial_128_Integration_LinearTrend_30__100.py | 7a5e907e035774475c35332c1022bd9fc95546df | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 275 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 30, transform = "Integration", sigma = 0.0, exog_count = 100, ar_order = 0); | [
"[email protected]"
] | |
1de76a3c674ec03834912eadda7f87f625592642 | b2b55f2aea101c050852de97e06b230e1f918014 | /number35.py | 2aad75d7b78934274817a120461e25eb854ec844 | [] | no_license | hansol4412/algorithm | d505e202969417fdb0da10421989ef84aa937dbe | a6194b55b1a8f2cf21c91b1f02d92abb3bd10cd2 | refs/heads/master | 2023-03-22T11:45:01.257350 | 2021-03-21T11:19:42 | 2021-03-21T11:19:42 | 268,396,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | # 35. Special Sort(구글)
# N개의 정수가 입력되면 양의 정수와 음의 정수가 섞인 숫자들을 음의 정수는 왼쪽으로 양의 정수는 오른족으로 나눠라
# 입력된 음과 양의 정수의 순서는 입력된 순서를 유지한다.
a=[]
n=int(input("정렬할 숫자의 갯수를 입력하시오:"))
for i in range(0,n):
a.append(int(input()))
for i in range(0,n-1):
for j in range(0,(n-i)-1):
if(a[j]>0 and a[j+1]<0):
temp=a[j]
a[j]=a[j+1]
a[j+1]=temp
print(a)
| [
"[email protected]"
] | |
8158442771c431dd35672a9edc586edd0fe33d1d | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/leetCode/BreadthFirstSearch/103_BinaryTreeZigzagLevelOrderTraversal.py | 4445a0088162de197a6843a1be5b63a07388215c | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 797 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
c.. Solution o..
___ zigzagLevelOrder root
__ n.. root:
r_ []
left2right = 1
# 1. scan the level from left to right. -1 reverse.
ans, stack, temp # list, [root], []
_____ stack:
temp = [node.val ___ node __ stack]
stack = [child ___ node __ stack
___ child __ (node.left, node.right) __ child]
ans += [temp[::left2right]] # Pythonic way
left2right *= -1
r_ ans
"""
[]
[1]
[1,2,3]
[0,1,2,3,4,5,6,null,null,7,null,8,9,null,10]
"""
| [
"[email protected]"
] | |
31068cd2c89faea0c9efdff5214f7c0d9abac707 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_suffered.py | f5ba9fb4722605fcd51182e2e5bcc1348faf8603 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
#calss header
class _SUFFERED():
def __init__(self,):
self.name = "SUFFERED"
self.definitions = suffer
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['suffer']
| [
"[email protected]"
] | |
c613b9cab6606968167047711c8e1420c7f594ce | b4276ef90a4db14d8091a092292aeabe9a0d8eee | /state_scrapper/testCode/testPipes.py | cea579c5018f762bf705eaf80de4efb3b2e30c33 | [
"CC0-1.0"
] | permissive | nikmend/state-scrapper | df78dfd17ab8bc9d96e24fe8eb8fdbba872b0728 | 39c902320ea97605857ef74789e578dbdb7ccfd0 | refs/heads/master | 2022-12-01T13:40:04.827422 | 2020-08-08T10:28:38 | 2020-08-08T10:28:38 | 280,551,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | class myclass():
myUrls = ['asdasd',]
def addVals(self):
for i in range(1,7):
self.myUrls.append(i)
def start(self):
for i in self.myUrls:
print(i)
self.addVals()
asda = myclass()
asda.start() | [
"[email protected]"
] | |
18418bc2a39d5aeb5d6d8aaa063f549811e5c5cf | 9c7f47b2f31ea4ae55e33c706efe524eb62ff177 | /HT_11/HT_11_1.py | 3fdafbfbc8e4ba14a7b22a1f6076c98a1208a2cc | [] | no_license | Kantarian/GITHUB | 05b6d5425b345667a4188ced23da76ed337b910a | fa047cbb2beb9bf372b22596bea8aaef80423872 | refs/heads/main | 2023-02-14T16:57:50.229446 | 2021-01-13T15:43:48 | 2021-01-13T15:43:48 | 311,783,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,146 | py | #1. Створити клас Calc, який буде мати атребут last_result та 4 методи. Методи повинні виконувати математичні операції з 2-ма числами, а саме додавання, віднімання,
# множення, ділення.
# - Якщо під час створення екземпляру класу звернутися до атребута last_result він повинен повернути пусте значення
# - Якщо використати один з методів - last_result повенен повернути результат виконання попереднього методу.
# - Додати документування в клас (можете почитати цю статтю: https://realpython.com/documenting-python-code/ )
class Calc():
def __init__(self,a,b,last_result = None):
self.a=a
self.b=b
self.last_result = last_result
def add(self):
self.last_result = self.a+self.b
return self.last_result
def mul(self):
self.last_result = self.a*self.b
return self.last_result
def div(self):
self.last_result = self.a/self.b
return self.last_result
def sub(self):
self.last_result = self.a-self.b
return self.last_result
a=int(input("Enter first number: "))
b=int(input("Enter second number: "))
obj=Calc(a,b)
choice=1
while choice!=0:
print("0. Exit")
print("1. Add")
print("2. Subtraction")
print("3. Multiplication")
print("4. Division")
print("5. Last result")
choice=int(input("Enter choice: "))
if choice==1:
print("Result: ",obj.add())
elif choice==2:
print("Result: ",obj.sub())
elif choice==3:
print("Result: ",obj.mul())
elif choice==4:
print("Result: ",round(obj.div(),2))
elif choice==5:
print("Last Result: ",round(obj.last_result))
elif choice==0:
print("Exiting!")
else:
print("Invalid choice!!")
| [
"[email protected]"
] | |
7b6e3b35ab50202f0d0329da43dc44db350f48bb | 7ce42f107d763547268fb0752973d5e9af4a72c9 | /interpolate_gp_psf.py | a373c244273036288bcba47ca0efb67b447be6f0 | [] | no_license | nhurleywalker/MWA_Bits | 4ba6d6480d40689114233a4adfaddc95ccefe9df | 3d0bf7caeab397dd3469c1da3f183d800a7c9046 | refs/heads/master | 2023-06-22T09:43:33.817258 | 2023-06-15T09:05:52 | 2023-06-15T09:05:52 | 120,707,499 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,422 | py | #!/usr/bin/env python
# Read in the PSF map
# FInd all the areas which are Galactic latitude less than 10 degrees
# INterpolate the PSF
# Write out the new map
# Then when I rerun the flux-calibration, using the PSF map, it should be correct
import numpy as np
from astropy.io import fits
from astropy import wcs
from optparse import OptionParser
from astropy.coordinates import SkyCoord
from astropy import units as u
from scipy.interpolate import griddata
#import matplotlib.pyplot as plt
#import matplotlib.image as mpimg
usage="Usage: %prog [options] <file>\n"
parser = OptionParser(usage=usage)
parser.add_option('--psf',type="string", dest="psf",
help="The filename of the psf image you want to read in.")
parser.add_option('--output',type="string", dest="output", default="interpolated_GP_PSF.fits",
help="The filename of the output interpolated PSF image.")
(options, args) = parser.parse_args()
latitude=-26.70331940
# Read in the PSF
psf = fits.open(options.psf)
a = psf[0].data[0]
b = psf[0].data[1]
pa = psf[0].data[2]
blur = psf[0].data[3]
# Diagnostic plot
#plt.imshow(a,vmin=0.05,vmax=0.2)
#plt.colorbar()
#plt.savefig("original_a.png")
w_psf = wcs.WCS(psf[0].header,naxis=2)
#create an array but don't set the values (they are random)
indexes = np.empty( (psf[0].data.shape[1]*psf[0].data.shape[2],2),dtype=int)
#since I know exactly what the index array needs to look like I can construct
# it faster than list comprehension would allow
#we do this only once and then recycle it
idx = np.array([ (j,0) for j in xrange(psf[0].data.shape[2])])
j=psf[0].data.shape[2]
for i in xrange(psf[0].data.shape[1]):
idx[:,1]=i
indexes[i*j:(i+1)*j] = idx
# The RA and Dec co-ordinates of each location in the PSF map
# Each one is a 1D array of shape 64800 (from 180 (Dec) x 360 (RA))
ra_psf,dec_psf = w_psf.wcs_pix2world(indexes,1).transpose()
# A 1D array of co-ordinates at each location
c_psf = SkyCoord(ra=ra_psf, dec=dec_psf, unit=(u.degree, u.degree))
# A 1D list of indices referring to the locations where we want to use the data
gal_indices = np.where(abs(c_psf.galactic.b.value)>10.)
# A 1D list of pairs of co-ordinates ("points") referring to the locations where we want to use the data
gin = gal_indices[0]
idx = indexes[gin[:]]
a_data = a[idx[:,1], idx[:,0]]
b_data = b[idx[:,1], idx[:,0]]
pa_data = pa[idx[:,1], idx[:,0]]
blur_data = blur[idx[:,1], idx[:,0]]
grid_x, grid_y = np.mgrid[0:179:180j, 0:359:360j]
# Only interpolate over points which are not NaN
a_cubic_interp = griddata(idx[np.logical_not(np.isnan(a_data))], a_data[np.logical_not(np.isnan(a_data))], (grid_y, grid_x), method="linear")
b_cubic_interp = griddata(idx[np.logical_not(np.isnan(a_data))], b_data[np.logical_not(np.isnan(a_data))], (grid_y, grid_x), method="linear")
pa_cubic_interp = griddata(idx[np.logical_not(np.isnan(a_data))], pa_data[np.logical_not(np.isnan(a_data))], (grid_y, grid_x), method="linear")
blur_cubic_interp = griddata(idx[np.logical_not(np.isnan(a_data))], blur_data[np.logical_not(np.isnan(a_data))], (grid_y, grid_x), method="linear")
# Diagnostic plot
#plt.clf()
#plt.imshow(a_cubic_interp,vmin=0.05,vmax=0.2)
#plt.colorbar()
#plt.savefig("cubicinterp_a.png")
psf[0].data[0] = a_cubic_interp
psf[0].data[1] = b_cubic_interp
psf[0].data[2] = pa_cubic_interp
psf[0].data[3] = blur_cubic_interp
psf.writeto(options.output,clobber=True)
| [
"[email protected]"
] | |
224d87a22335339e3a3a573013fcc5a5ebd3426a | 38eef62739e4a95fba2280f46d97712d14f15014 | /e2g9/SintBrainiac.py~ | 93617920b0c4e8b43fd3cb126f90c87b7a49313f | [] | no_license | pwilthew/Brainiax | 6cfe03b84ef75c78726690d2980f0c05fc9b6ff5 | e0c197e7d6dafdc159d303b3a153812abd53c912 | refs/heads/master | 2021-01-21T17:41:14.473269 | 2014-01-27T21:28:45 | 2014-01-27T21:28:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,389 | #!/usr/bin/env python
#coding: utf8
# Analisis Sintactico del lenguaje Brainiac.
# Modulo: SintBrainiac
# Autores: Wilthew, Patricia 09-10910
# Leopoldo Pimentel 06-40095
import ply.lex as lex
import ply.yacc as yacc
import sys
import funciones
from LexBrainiax import tokens
contador = -1
# Clases utilizadas para imprimir el arbol sintactico
# Clase para NUMERO
class numero:
def __init__(self,value):
self.type = "Numero"
self.value = value
def __str__(self):
global contador
contador = contador + 1
tabs = " "*contador
str_ = str(self.value) + " "
contador = contador - 1
return str_
# Clase para IDENTIFICADOR
class ident:
def __init__(self,name):
self.type = "Identificador"
self.name = name
def __str__(self):
global contador
contador = contador + 1
tabs = " "*contador
str_ = str(self.name) + " "
contador = contador - 1
return str_
# Clase para EXPRESION UNARIA
class op_un:
def __init__(self,pre,e):
self.pre = pre
self.e = e
def __str__(self):
global contador
contador = contador + 1
tabs = " "*contador
str_ = "EXPRESION_UNARIA\n" + tabs + "Operador: " + str(self.pre) + "\n" + tabs + "Valor: " + str(self.e) + " "
contador = contador - 1
return str_
# Clase para EXPRESION BINARIA
class op_bin:
def __init__(self,left,right,op):
self.left = left
self.right = right
self.op = op
if op == '+':
self.op = 'Suma'
elif op == '-':
self.op = 'Resta'
elif op == '~':
self.op = 'Negacion'
elif op == '*':
self.op = 'Multiplicacion'
elif op == '%':
self.op = 'Modulo'
elif op == '/':
self.op = 'Division'
elif op == '=':
self.op = 'Igual'
elif op == '/=':
self.op = 'Desigual'
elif op == '<':
self.op = 'Menor que'
elif op == '>':
self.op = 'Mayor que'
elif op == '>=':
self.op = 'Mayor o igual que'
elif op == '<=':
self.op = 'Menor o igual que'
elif op == '&':
self.op = 'Concatenacion'
elif op == '#':
self.op = 'Inspeccion'
elif op == '\/':
self.op = 'Or'
else:
self.op = 'And'
def __str__(self):
global contador
contador = contador + 1
tabs = contador*" "
tabs_plus = " " + tabs
str_ = "EXPRESION_BINARIA\n" + tabs + "Operacion: " + str(self.op) + "\n"
str_ = str_ + tabs + "Operador izquierdo: " + str(self.left) + "\n" + tabs + "Operador derecho: " + str(self.right) + " "
contador = contador - 1
return str_
# Clase para ITERACION_INDETERMINADA
class inst_while:
def __init__(self,cond,inst):
self.cond = cond
self.inst = inst
def __str__(self):
global contador
contador = contador + 1
tabs = " "*contador
str_ = "ITERACION_INDETERMINADA\n" + tabs + "Condicion: "
str_ = str_+ str(self.cond) + "\n" + tabs + "Instruccion: " + str(self.inst) + " "
contador = contador - 1
return str_
# Clase para ITERACION_DETERMINADA
class inst_for:
def __init__(self,ident,inf,sup,inst):
self.ident = ident
self.inf = inf
self.sup = sup
self.inst = inst
def __str__(self):
global contador
contador = contador + 1
tabs = " "*contador
str_ = "ITERACION_DETERMINADA\n" + tabs + "Identificador: " + str(self.ident)
str_ = str_ + "\n" + tabs + "Cota inf: " + str(self.inf) +", Cota sup: "
str_ = str_ + str(self.sup) + "\n" + tabs + "Instruccion: " + str(self.inst) + " "
contador = contador - 1
return str_
# Clase para CONDICIONAL
class inst_if:
def __init__(self,cond,instr0,instr1):
self.cond = cond
self.instr0 = instr0
self.instr1 = instr1
def __str__(self):
global contador
contador = contador + 1
tabs = " "*contador
aux = ""
if self.instr1 != None:
aux = "\n" +tabs + "Else: " + str(self.instr1) + " "
str_ = "CONDICIONAL\n" + tabs + "Guardia: " + str(self.cond) + "\n" + tabs + "Exito: " + str(self.instr0) + aux
contador = contador - 1
return str_
# Clase para B-INSTRUCCION
class inst_b:
def __init__(self, slist, ident):
self.slist = slist
self.ident = ident
def __pop__(self):
return self.slist.pop()
def __len__(self):
return len(self.slist)
def __str__(self):
global contador
contador = contador +1
tabs = " "*contador
lista_simbolos = ""
for elem in self.slist:
lista_simbolos = lista_simbolos + str(elem)
str_ = "B-INSTRUCCION\n" + tabs + "Lista de simbolos: " + lista_simbolos + "\n"
straux = tabs + "Identificador: " + str(self.ident) + " "
contador = contador - 1
return str_ + straux
# Clase para ASIGNACION
class inst_asig:
def __init__(self,ident,val):
self.ident = ident
self.val = val
def __str__(self):
global contador
contador = contador + 1
tabs = " "*contador
str_ = "ASIGNACION\n" + tabs + "Identificador: " + str(self.ident) + "\n" + tabs + "Valor: " + str(self.val) + " "
contador = contador - 1
return str_
# Clase para READ
class inst_read:
def __init__(self,ident):
self.ident = ident
def __str__(self):
global contador
contador = contador + 1
tabs = " "*contador
str_ = "READ\n" + tabs + "Identificador: " + str(self.ident.name) + " "
contador = contador - 1
return str_
# Clase para WRITE
class inst_write:
def __init__(self,expr):
self.expr = expr
def __str__(self):
global contador
contador += 1
tabs = contador*" "
strw = "WRITE" + "\n" + tabs + "Contenido: "
str1 = strw + str(self.expr) + " "
contador = contador - 1
return str1
# Clase para SECUENCIACION
class inst_list:
def __init__(self):
self.lista = []
def __len__(self):
return len(self.lista)
def __pop__(self):
return self.lista.pop()
def __str__(self):
global contador
contador = contador + 1
self.lista.reverse()
str_ = "SECUENCIACION\n"
contador = contador + 1
tabs = contador*" "
while self.lista:
elemento = self.lista.pop()
str_ = str_ + tabs + str(elemento)
if len(self.lista) != 0:
str_ = str_ + "\n" + tabs + "\n"
contador = contador - 1
return str_
def print_(self,contador):
self.lista.reverse()
while self.lista:
elemento = self.lista.pop()
elemento.print_(contador,0)
tabs = contador*" "
if len(self.lista) != 0:
str_ = str_ + ";"
return str_
# Clase para BLOQUE
class bloque:
def __init__(self,lista):
self.lista = lista
def __len__(self):
return len(self.lista)
def __str__(self):
global contador
contador = contador + 1
tabs = " "*contador
str_ = "BLOQUE\n"
str_ = str_ + str(self.lista)
contador = contador - 1
return str_
def main():
# Se abre el archivo y se guarda su contenido en el string codigo
file_name = sys.argv[1]
fp = open(file_name)
codigo = fp.read()
# Manejo de gramática y construccion de arbol
# Definicion del símbolo inicial
start = 'programa'
# Precedencia de los operadores
precedence = (
('left','TkDisyuncion'),
('left','TkConjuncion'),
('left','TkIgual','TkDesigual'),
('left','TkMenor','TkMayor','TkMayorIgual','TkMenorIgual'),
('left','TkMas','TkResta'),
('left','TkMult','TkDiv','TkMod'),
('left','TkConcat'),
('left','TkAt'),
('right','uminus','unot', 'uinspeccion'),
)
# PROGRAMA
def p_programa(p):
''' programa : declaracion TkExecute instlist TkDone
| TkExecute instlist TkDone '''
if len(p) == 5:
p[0] = p[3]
elif len(p) == 4:
p[0] = p[2]
# TERMINO UNARIO
def p_term_num(p):
''' term : TkNum '''
p[0] = numero(p[1])
str_ = ""
tabs = (contador+1)*" "
# IDENTIFICADOR
def p_term_ident(p):
''' term : TkIdent '''
p[0] = ident(p[1])
str_ = ""
tabs = (contador+1)*" "
# EXPRESION UNARIA ARITMETICA
def p_exp_un(p):
''' exp_un : TkResta exp %prec uminus
| TkNegacion exp %prec unot
| TkInspeccion exp %prec uinspeccion '''
p[0] = op_un(p[1],p[2])
# EXPRESION
def p_exp(p):
''' exp : term
| exp_un
| TkParAbre exp TkParCierra
| TkCorcheteAbre exp TkCorcheteCierra
| TkLlaveAbre exp TkLlaveCierra
| exp TkMas exp
| exp TkMult exp
| exp TkMod exp
| exp TkDiv exp
| exp TkResta exp
| TkTrue
| TkFalse
| exp TkIgual exp
| exp TkDesigual exp
| exp TkMenor exp
| exp TkMayor exp
| exp TkMenorIgual exp
| exp TkMayorIgual exp
| exp TkDisyuncion exp
| exp TkConjuncion exp
| exp TkConcat exp '''
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4 and p[1] != '(' and p[1] != '[' and p[1] != '{':
p[0] = op_bin(p[1],p[3],p[2])
else:
p[0] = p[2]
# ASIGNACION
def p_instruccion_asignacion(p):
''' instruccion : TkIdent TkAsignacion exp '''
p[0] = inst_asig(p[1],p[3])
# READ
def p_instruccion_read(p):
''' instruccion : TkRead exp '''
p[0] = inst_read(p[2])
# WRITE
def p_instruccion_write(p):
''' instruccion : TkWrite exp '''
p[0] = inst_write(p[2])
# WHILE
def p_instruccion_while(p):
''' instruccion : TkWhile exp TkDo instlist TkDone '''
p[0] = inst_while(p[2],p[4])
# FOR
def p_instruccion_for(p):
''' instruccion : TkFor TkIdent TkFrom exp TkTo exp TkDo instlist TkDone'''
p[0] = inst_for(p[2],p[4],p[6],p[8])
# IF
def p_instruccion_if(p):
''' instruccion : TkIf exp TkThen instlist TkDone
| TkIf exp TkThen instlist TkElse instlist TkDone '''
if len(p) == 6:
p[0] = inst_if(p[2],p[4],None)
else:
p[0] = inst_if(p[2],p[4],p[6])
# BLOQUE DE INSTRUCCIONES
def p_instruccion_bloque(p):
''' instruccion : declaracion TkExecute instlist TkDone
| TkExecute instlist TkDone '''
if len(p) == 4:
p[0] = inst_bloque(p[2])
elif len(p) == 5:
p[0] = inst_bloque(p[3])
# BLOQUE DE B-INSTRUCCION (Ej: {lista_tape} At [a] )
def p_instruccion_b(p):
''' instruccion : TkLlaveAbre lista_tape TkLlaveCierra TkAt ident_tape '''
p[0] = inst_b(p[2], p[5])
def p_ident_tape(p):
''' ident_tape : TkCorcheteAbre exp TkCorcheteCierra
| TkIdent '''
if len(p) == 4:
p[0] = p[2]
elif len(p) == 2:
p[0] = p[1]
# LISTA DE SIMBOLOS DE B-INSTRUCCIONES (Ej: ++++--...>>><..)
def p_lista_tape(p):
''' lista_tape : lista_tape simb_tape
| simb_tape '''
if len(p) == 2:
p[0] = []
p[0].append(p[1])
else:
p[0] = p[1]
p[0].append(p[2])
def p_simb_tape(p):
'''simb_tape : TkPunto
| TkMayor
| TkMenor
| TkMas
| TkResta
| TkComa '''
p[0] = p[1]
# SECUENCIACION DE INSTRUCCIONES
def p_instlist(p):
''' instlist : instlist semicoloninst
| instruccion '''
if len(p) == 2:
p[0] = inst_list()
p[0].lista.append(p[1])
elif len(p) == 3:
p[0] = p[1]
p[0].lista.append(p[2])
def p_commainst(p):
''' semicoloninst : TkPuntoYComa instruccion '''
p[0] = p[2]
# DECLARACION
def p_declaracion(p):
''' declaracion : TkDeclare declist '''
def p_declist(p):
''' declist : dec TkPuntoYComa declist
| dec '''
def p_dec(p):
''' dec : varlist TkType tipo '''
def p_varlist(p):
'''varlist : TkIdent TkComa varlist
| TkIdent '''
def p_tipo_int(p):
'tipo : TkInteger'
def p_tipo_bool(p):
'tipo : TkBoolean'
def p_tipo_tape(p):
'tipo : TkTape'
#Funcion de error del parser
def p_error(p):
c = funciones.hallar_columna(codigo,p)
print "Error de sintaxis en linea %s, columna %s: token \'%s\' inesperado." % (p.lineno,c,p.value[0])
sys.exit(0)
# Se construye la funcion del parser
parser = yacc.yacc()
# LOGGER
# Set up a logging object
import logging
logging.basicConfig(
level = logging.DEBUG,
filename = "parselog.txt",
filemode = "w",
format = "%(filename)10s:%(lineno)4d:%(message)s"
)
log = logging.getLogger()
# Se construye el árbol
arbol = parser.parse(codigo,debug=log)
# Se imprime el árbol
print funciones.print_arbol(arbol)
if __name__ == "__main__":
main()
| [
"patwilthew@cookie.(none)"
] | patwilthew@cookie.(none) |
|
eba5b10fdb01d5e9de0a691c5d7012932098fcb9 | b8b0a29b6f5bac70c408e46e6df1d6583e9ad8c0 | /portdata/serializers.py | 83fafe1145f099424288819777404e25e9f5cc1e | [] | no_license | varunsak/sdgindia | 20c41575a6f0c638662f1df6bd7a121ce3da8cf8 | a7fe9f6770e7b6ba628c376e773b11a19f58ccf4 | refs/heads/master | 2020-04-08T02:33:04.252409 | 2019-01-19T19:56:43 | 2019-01-19T19:56:43 | 158,939,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | from rest_framework import serializers
from .models import PortData
class DataSerializer(serializers.ModelSerializer):
id = serializers.IntegerField(read_only=True)
class Meta:
model = PortData
fields = (
'id', 'product', 'quantity', 'unit', 'item_rate_inv', 'currency', 'total_amount', 'fob_inr', 'item_rate_inr', 'fob_usd', 'foreign_port', 'foreign_country', 'india_port', 'india_company',
'foreign_company', 'invoice_number', 'hs_code'
)
| [
"[email protected]"
] | |
c33e6baaffb8face637be67783a763a26bfa8b9a | f13988ddd8623c3c3df09c9ed4d8fce837281624 | /ps4/segmentation.py | 7ec68d6f3dc228a0fa538d2ed0d85b4efb9a9b8a | [] | no_license | jingxa/cs231a_my | aee8e5aafe2d5654dfde2ea827038397fdaafb53 | ddb70703bf31ecc7ae8aa715ec603ab007935cb7 | refs/heads/master | 2020-03-15T15:11:35.998984 | 2018-11-07T14:19:56 | 2018-11-07T14:19:56 | 132,206,754 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,966 | py | import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
from imageio import imread
from scipy.spatial.distance import cdist
def kmeans_segmentation(im, features, num_clusters):
H, W = im.shape[0], im.shape[1]
N = features.shape[0]
# 第一步: 随机选择num_clusters个种子
center_idx = np.random.randint(N, size=num_clusters)
centriods = features[center_idx]
matrixes = np.zeros((H, W))
# 第二步: 迭代器划分
while True:
# 每个像素到cneter的距离
dist = np.zeros((N, num_clusters))
for i in range(num_clusters):
dist[:, i] = np.linalg.norm(features - centriods[i, :], axis=1) # 距离
# 寻找最近中心
nearest = np.argmin(dist, axis=1) # (N,1)
# 更新
prev_centriods = centriods
for i in range(num_clusters):
pixels_idx = np.where(nearest == i) # 和 第 i 个中心邻近的像素集合
cluster = features[pixels_idx] # (M,5)
centriods[i, :] = np.mean(cluster, axis=0) # 重新计算平均值
# 收敛
if np.array_equal(prev_centriods, centriods):
break
pixels_clusters = np.reshape(nearest, (H, W))
return pixels_clusters
def meanshift_segmentation(im, features, bandwidth):
H, W = im.shape[0], im.shape[1]
N, M = features.shape # 数量, 特征维度
mask = np.ones(N)
clusters = []
while np.sum(mask) > 0 : # 当前还有像素未被遍历
loc = np.argwhere(mask > 0)
idx = loc[int(np.random.choice(loc.shape[0], 1)[0])][0] # 随扈挑选一个像素
mask[idx] = 0 # 标记
current_mean = features[idx]
prev_mean = current_mean
while True:
dist = np.linalg.norm(features - prev_mean, axis=1)
incircle = dist < bandwidth # 距离小于半径的点
mask[incircle] = 0
current_mean = np.mean(features[incircle], axis=0) # 新的中心
# 稳定,收敛
if np.linalg.norm(current_mean - prev_mean) < 0.01 * bandwidth:
break
prev_mean = current_mean
isValid = True
for cluster in clusters:
if np.linalg.norm(cluster - current_mean) < 0.5 * bandwidth: # 两个划分为一个cluster
isValid = False
if isValid: # 添加一个新cluster
clusters.append(current_mean)
pixels_clusters = np.zeros((H, W))
clusters = np.array(clusters)
for i in range(N): # 计算每个像素点的最近中心
idx = np.argmin(np.linalg.norm(features[i, :] - clusters, axis=1))
h = int(i/W)
w = i % W
pixels_clusters[h, w] = idx
return pixels_clusters.astype(int)
def draw_clusters_on_image(im, pixel_clusters):
num_clusters = int(pixel_clusters.max()) + 1
average_color = np.zeros((num_clusters, 3))
cluster_count = np.zeros(num_clusters)
for i in range(im.shape[0]):
for j in range(im.shape[1]):
c = pixel_clusters[i,j]
cluster_count[c] += 1
average_color[c, :] += im[i, j, :]
for c in range(num_clusters):
average_color[c,:] /= float(cluster_count[c])
out_im = np.zeros_like(im)
for i in range(im.shape[0]):
for j in range(im.shape[1]):
c = pixel_clusters[i,j]
out_im[i,j,:] = average_color[c,:]
return out_im
if __name__ == '__main__':
# Change these parameters to see the effects of K-means and Meanshift
num_clusters = [5]
bandwidths = [0.3]
for filename in ['lake', 'rocks', 'plates']:
img = imread('data/%s.jpeg' % filename)
# Create the feature vector for the images
features = np.zeros((img.shape[0] * img.shape[1], 5))
for row in range(img.shape[0]):
for col in range(img.shape[1]):
features[row*img.shape[1] + col, :] = np.array([row, col,
img[row, col, 0], img[row, col, 1], img[row, col, 2]]) #
features_normalized = features / features.max(axis = 0)
# Part I: Segmentation using K-Means
# for nc in num_clusters:
# clustered_pixels = kmeans_segmentation(img, features_normalized, nc)
# cluster_im = draw_clusters_on_image(img, clustered_pixels)
# plt.imshow(cluster_im)
# plt.title('K-means with %d clusters on %s.jpeg' % (int(nc), filename))
# plt.show()
# # Part II: Segmentation using Meanshift
for bandwidth in bandwidths:
clustered_pixels = meanshift_segmentation(img, features_normalized, bandwidth)
cluster_im = draw_clusters_on_image(img, clustered_pixels)
plt.imshow(cluster_im)
plt.title('Meanshift with bandwidth %.2f on %s.jpeg' % (bandwidth, filename))
plt.show()
| [
"[email protected]"
] | |
9e134e9dc6bdf1edb51087e18635b3916beb92af | 0de56aed5714b04f2236300b2ba8252d9a0bf71a | /2016_11_Python/GUI/PyQt/firstPyQt.py | a9e43e6c73e48b32eb7fe9e9d5f87578f6fe1759 | [] | no_license | JasonatWang/LearnToProgram | fb5d6a0ade9732312cf8d257d70537af76fcb891 | 677872a940bfe635901460385d22d4ee45818c08 | refs/heads/master | 2020-12-03T05:21:00.315712 | 2016-12-23T06:12:58 | 2016-12-23T06:13:17 | 68,612,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,597 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'firstPyQt.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(802, 592)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 801, 391))
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(self.verticalLayoutWidget)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.pushButton_2 = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton_2.setObjectName("pushButton_2")
self.horizontalLayout.addWidget(self.pushButton_2)
self.pushButton = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.pushButton.setObjectName("pushButton")
self.horizontalLayout.addWidget(self.pushButton)
self.verticalLayout.addLayout(self.horizontalLayout)
self.verticalLayoutWidget.raise_()
self.label.raise_()
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 802, 30))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "Hello World!"))
self.pushButton_2.setText(_translate("MainWindow", "OK"))
self.pushButton.setText(_translate("MainWindow", "Cancel")) | [
"[email protected]"
] | |
58343eabd4c1b2fc4cf60575af3f1d3455845188 | f51d53650185500e379805b15855b0330b6a0e7f | /src/pdf_routines.py | beb1450a8ccec7c9e6ce6b9fd2a9fb344aaa8a09 | [
"MIT"
] | permissive | olrodrig/SNII_ETOS | 0888dfcadd450c93a24f22462ebb4ac37d40854d | a11afd49c9c32bd249a11c935880d132ac17849a | refs/heads/master | 2022-05-28T20:45:29.794411 | 2020-04-25T08:06:43 | 2020-04-25T08:06:43 | 258,718,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,709 | py | import numpy as np
from sklearn import mixture
def snii_templates_epochs():
JD_ln_template, JD_fd_template = {}, {}
JD_ln_template['1986L'] , JD_fd_template['1986L'] = 2446705.5 , 2446711.1
JD_ln_template['1990E'] , JD_fd_template['1990E'] = 2447932.5 , 2447937.62
JD_ln_template['1999br'] , JD_fd_template['1999br'] = 2451272.9 , 2451280.9
JD_ln_template['1999em'] , JD_fd_template['1999em'] = 2451471.95 , 2451479.51
JD_ln_template['1999gi'] , JD_fd_template['1999gi'] = 2451515.68 , 2451522.32
JD_ln_template['1999go'] , JD_fd_template['1999go'] = 2451527.7 , 2451535.7
JD_ln_template['2000dc'] , JD_fd_template['2000dc'] = 2451758.8 , 2451765.8
JD_ln_template['2000dj'] , JD_fd_template['2000dj'] = 2451785.487, 2451795.9
JD_ln_template['2000el'] , JD_fd_template['2000el'] = 2451835.7 , 2451840.6
JD_ln_template['2001X'] , JD_fd_template['2001X'] = 2451958.0 , 2451968.3
JD_ln_template['2001do'] , JD_fd_template['2001do'] = 2452131.7 , 2452135.7
JD_ln_template['2001fa'] , JD_fd_template['2001fa'] = 2452195.9 , 2452200.9
JD_ln_template['2002an'] , JD_fd_template['2002an'] = 2452292.04 , 2452297.02
JD_ln_template['2002ce'] , JD_fd_template['2002ce'] = 2452369.7 , 2452375.378
JD_ln_template['2002gd'] , JD_fd_template['2002gd'] = 2452549.28 , 2452550.53
JD_ln_template['2003Z'] , JD_fd_template['2003Z'] = 2452660.2 , 2452669.2
JD_ln_template['2003bn'] , JD_fd_template['2003bn'] = 2452691.5 , 2452692.83
JD_ln_template['2003ej'] , JD_fd_template['2003ej'] = 2452770.8 , 2452779.8
JD_ln_template['2003hg'] , JD_fd_template['2003hg'] = 2452860.9 , 2452869.9
JD_ln_template['2003hl'] , JD_fd_template['2003hl'] = 2452863.0 , 2452872.0
JD_ln_template['2003iq'] , JD_fd_template['2003iq'] = 2452918.47 , 2452921.458
JD_ln_template['2004ci'] , JD_fd_template['2004ci'] = 2453168.9 , 2453171.8
JD_ln_template['2004er'] , JD_fd_template['2004er'] = 2453269.88 , 2453273.9
JD_ln_template['2004et'] , JD_fd_template['2004et'] = 2453270.517, 2453271.483
JD_ln_template['2004fc'] , JD_fd_template['2004fc'] = 2453292.89 , 2453295.124
JD_ln_template['2004fx'] , JD_fd_template['2004fx'] = 2453300.92 , 2453306.93
JD_ln_template['2005ay'] , JD_fd_template['2005ay'] = 2453449.121, 2453456.58
JD_ln_template['2005cs'] , JD_fd_template['2005cs'] = 2453548.43 , 2453549.41
JD_ln_template['2005dz'] , JD_fd_template['2005dz'] = 2453615.8 , 2453623.71
JD_ln_template['2006Y'] , JD_fd_template['2006Y'] = 2453763.09 , 2453770.08
JD_ln_template['2006bc'] , JD_fd_template['2006bc'] = 2453811.087, 2453819.15
JD_ln_template['2006bp'] , JD_fd_template['2006bp'] = 2453833.677, 2453834.647
JD_ln_template['2006it'] , JD_fd_template['2006it'] = 2454004.69 , 2454009.67
JD_ln_template['2006iw'] , JD_fd_template['2006iw'] = 2454009.737, 2454011.798
JD_ln_template['2007hv'] , JD_fd_template['2007hv'] = 2454342.5 , 2454352.87
JD_ln_template['2007il'] , JD_fd_template['2007il'] = 2454345.94 , 2454353.95
JD_ln_template['2007pk'] , JD_fd_template['2007pk'] = 2454409.83 , 2454414.81
JD_ln_template['2008bh'] , JD_fd_template['2008bh'] = 2454538.57 , 2454548.66
JD_ln_template['2008br'] , JD_fd_template['2008br'] = 2454559.323, 2454564.265
JD_ln_template['2008ho'] , JD_fd_template['2008ho'] = 2454787.77 , 2454796.61
JD_ln_template['2008if'] , JD_fd_template['2008if'] = 2454802.73 , 2454812.71
JD_ln_template['2008il'] , JD_fd_template['2008il'] = 2454822.69 , 2454827.64
JD_ln_template['2008in'] , JD_fd_template['2008in'] = 2454824.45 , 2454824.95
JD_ln_template['2009ao'] , JD_fd_template['2009ao'] = 2454886.62 , 2454894.62
JD_ln_template['2009bz'] , JD_fd_template['2009bz'] = 2454912.03 , 2454919.98
JD_ln_template['2010id'] , JD_fd_template['2010id'] = 2455450.82 , 2455454.743
JD_ln_template['2012aw'] , JD_fd_template['2012aw'] = 2456001.769, 2456003.349
JD_ln_template['2013am'] , JD_fd_template['2013am'] = 2456371.698, 2456373.138
JD_ln_template['2013by'] , JD_fd_template['2013by'] = 2456402.872, 2456403.752
JD_ln_template['2013ej'] , JD_fd_template['2013ej'] = 2456497.04 , 2456497.625
JD_ln_template['2013fs'] , JD_fd_template['2013fs'] = 2456570.82 , 2456571.737
JD_ln_template['2013hj'] , JD_fd_template['2013hj'] = 2456635.7 , 2456638.8
JD_ln_template['2014G'] , JD_fd_template['2014G'] = 2456668.35 , 2456671.111
JD_ln_template['LSQ14gv'], JD_fd_template['LSQ14gv'] = 2456670.7 , 2456674.8
JD_ln_template['2014cx'] , JD_fd_template['2014cx'] = 2456901.89 , 2456902.90
JD_ln_template['2014cy'] , JD_fd_template['2014cy'] = 2456898.8 , 2456900.5
JD_ln_template['2015bs'] , JD_fd_template['2015bs'] = 2456915.5 , 2456925.5
JD_ln_template['2016esw'], JD_fd_template['2016esw'] = 2457607.802, 2457608.814
return JD_ln_template, JD_fd_template
#compute weighted average
def weighted_average(x, sigma_x, with_intrinsic_error=True):
if len(x) > 1:
if with_intrinsic_error:
residuals = x - np.mean(x)
rms = np.sqrt(np.sum(residuals**2)/float(len(residuals)-1))
sigma_0s = np.linspace(0.0, rms, 100)
else:
sigma_0s = np.array([0.0])
m2lnL_min = 1.e90
for sigma_0 in sigma_0s:
Var = sigma_x**2 + sigma_0**2
w_ave = np.sum(x/Var)/np.sum(1.0/Var)
m2lnL = np.sum(np.log(Var)+(x-w_ave)**2/Var)
if m2lnL < m2lnL_min:
m2lnL_min = m2lnL
best_x = w_ave
best_error = np.sqrt(1.0/np.sum(1.0/Var))
else:
best_x, best_error = x[0], sigma_x[0]
return best_x, best_error
#pick rangom values given a pdf
def values_from_distribution(x, pdf, N):
x_sample = np.random.choice(x, N, p=pdf/np.sum(pdf)) #sum of probabilities must to be 1
return x_sample
#Simpson's rule
def simpson(x,f):
integral = (f[0] + f[-1]) / 3.0 #extremes
n = len(x)
four = "o"
for i in range(1, n - 1):
if four == "o":
integral += f[i] * 4.0 / 3.0
four = "x"
else:
integral += f[i] * 2.0 / 3.0
four = "o"
integral = (x[1] - x[0]) * integral
return integral
#discard possible outliers through the Tukey's rule
def tukey_rule(x, k=1.5):
Q1, Q3 = np.quantile(x, [0.25, 0.75])
IQR = Q3 - Q1
x = x[x>=Q1-k*IQR]
x = x[x<=Q3+k*IQR]
return x
#return a normalized gaussian pdf
def gaussian_pdf(mu, sigma, x_sampled):
g_sampled = np.exp(-0.5*(mu-x_sampled)**2/sigma**2)
g_sampled = g_sampled / simpson(x_sampled, g_sampled)
return g_sampled
#return a uniform pdf
def uniform_pdf(x_min, x_max, x):
h = 1.0/(x_max-x_min)
pdf = np.linspace(h, h, len(x))
for i in range(0, len(x)):
if x[i] < x_min or x[i] > x_max:
pdf[i] = 0.0
return pdf
#return a pdf computed as a mixture of Gaussians
def get_pdf(y, y_sampled, max_components=2):
x, x_sampled = y.reshape(-1,1), y_sampled.reshape(-1,1)
BIC_min = 1.e90
for n_components in range(1, max_components+1):
gmm = mixture.GaussianMixture(n_components=n_components)
model = gmm.fit(x)
BIC = model.bic(x)
if BIC < BIC_min:
BIC_min = BIC
model_min = model
ln_pdf = model_min.score_samples(x_sampled)
pdf = np.exp(ln_pdf)
return pdf
#return different pdf's
def final_pdfs(z, JD_ln, JD_fd, pdfs_per_sn, x_sampled, N_sample, rms_t0):
#define the uniform pdf's given by the JD_fd and JD_fd+JD_ln
ln, fd = (JD_ln - JD_fd)/(1.0+z), 0.0
pdf_fd = uniform_pdf(-9999.0, fd, x_sampled) #fd as prior
pdf_fd_ln = uniform_pdf(ln, fd, x_sampled) #fd and ln as prior
#combine the pdf of different sne
pdf_snid = np.linspace(1.0, 1.0, len(x_sampled))
for pdf_per_sn in pdfs_per_sn:
pdf_snid = pdf_snid*pdf_per_sn
#add typical rms(t0) error
err_0 = np.random.normal(0.0, rms_t0, N_sample)
err_0 = np.random.choice(tukey_rule(err_0), N_sample)
err_0 = err_0 - np.median(err_0)
t0s_snid = values_from_distribution(x_sampled, pdf_snid, N_sample)
t0s_snid = t0s_snid + err_0
t0s_snid = np.random.choice(tukey_rule(t0s_snid),N_sample)
#compute pdf's
pdf_snid = get_pdf(t0s_snid, x_sampled, max_components=1)
pdf_snid_fd = pdf_snid*pdf_fd
pdf_snid_fd_ln = pdf_snid*pdf_fd_ln
#normalize pdf's
pdf_snid = pdf_snid / simpson(x_sampled, pdf_snid)
pdf_snid_fd = pdf_snid_fd / simpson(x_sampled, pdf_snid_fd)
pdf_snid_fd_ln = pdf_snid_fd_ln / simpson(x_sampled, pdf_snid_fd_ln)
return pdf_fd_ln, pdf_snid, pdf_snid_fd, pdf_snid_fd_ln
def average_pdf_per_sn_bm_with_t0_error(sne_bm, t0s_bm, rms_t0s_bm, x_pdf, N_sample):
JD_ln_template, JD_fd_template = snii_templates_epochs()
pdfs_per_sn = []
for sn_bm, spec_phase, err_spec_phase in zip(sne_bm, t0s_bm, rms_t0s_bm):
delta = round(JD_fd_template[sn_bm]-JD_ln_template[sn_bm],3)
rms_uniform = delta/np.sqrt(12.0)
if rms_uniform < 0.3*err_spec_phase:
pdf_per_sn = gaussian_pdf(spec_phase, err_spec_phase, x_pdf)
else:
err_t0_template = np.random.uniform(-0.5*delta,0.5*delta, N_sample)
err_t0_template = err_t0_template - np.median(err_t0_template) #center the distibution to zero
x1 = np.random.normal(spec_phase, err_spec_phase, N_sample)
x1 = np.random.choice(tukey_rule(x1), N_sample)
x1 = x1 - np.median(x1) + spec_phase #center the distibution to the phase
#include values from the uniform distrution
x= x1 + err_t0_template
pdf_per_sn = get_pdf(x, x_pdf)
pdf_per_sn = pdf_per_sn / simpson(x_pdf, pdf_per_sn)
pdfs_per_sn.append(pdf_per_sn)
return pdfs_per_sn
def average_pdf_per_sn_bm(t0s_best, rms_t0s_best, sne_best):
#best matching SNe
sne_bm = list(set(sne_best))
t0s_bm, rms_t0s_bm = [], []
for sn_bm in sne_bm:
phases, err_phases = np.array([]), np.array([])
for sn_i, spec_phase, err_spec_phase in zip(sne_best, t0s_best, rms_t0s_best):
if sn_i == sn_bm:
phases = np.append(phases, spec_phase)
err_phases = np.append(err_phases, err_spec_phase)
t0_best, rms_t0_best = weighted_average(phases, err_phases)
t0s_bm.append(t0_best)
rms_t0s_bm.append(rms_t0_best)
return sne_bm, t0s_bm, rms_t0s_bm
def typical_pdf_per_sn_bm_per_spectrum(t0s_best, rms_t0s_best, sne_best, t_spec_best):
epochs = list(set(t_spec_best))
new_sne_best, new_t0_best, new_rms_t0_best = [], [], []
for epoch in epochs:
#number of templates at the epoch
templates = []
for t, sn_best in zip(t_spec_best, sne_best):
if t == epoch: templates.append(sn_best)
templates = list(set(templates))
for template in templates:
phases, err_phases = np.array([]), np.array([])
for t, sn_best, spec_phase, err_spec_phase in zip(t_spec_best, sne_best, t0s_best, rms_t0s_best):
if t == epoch and sn_best == template:
phases = np.append(phases, spec_phase)
err_phases = np.append(err_phases, err_spec_phase)
t0_best, rms_t0_best = weighted_average(phases, err_phases)
new_sne_best.append(template)
new_t0_best.append(t0_best)
new_rms_t0_best.append(rms_t0_best*np.sqrt(float(len(phases))))
sne_best, t0_best, rms_t0_best = np.array(new_sne_best), np.array(new_t0_best), np.array(new_rms_t0_best)
return sne_best, t0_best, rms_t0_best | [
"[email protected]"
] | |
2b6b3d0ed44ecf20e0b302e6ccd0aa6574a753fa | 22cbb7cffc3e5cf53fe87d2db216fdb88c8b7a8c | /stems/gis/convert.py | e26ac0443e6bd20f52888999784f13231793fecd | [
"BSD-3-Clause"
] | permissive | ceholden/stems | 838eb496978f7b68ae72988e0469c60e8730cb9c | 2e219eb76a44d6897881642635103b3353fc5539 | refs/heads/master | 2022-02-12T21:56:41.939073 | 2019-08-19T23:09:49 | 2019-08-19T23:09:49 | 164,480,487 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,186 | py | """ GIS variable conversion library
Functions here are convenient ways of going from various representations
of GIS information used in this stack (e.g., WKT) to the following
representations:
* Coordinate Reference System
* :py:class:`rasterio.crs.CRS`
* Geotransform
* :py:class:`affine.Affine`
* Bounding Box
* :py:class:`rasterio.coords.BoundingBox`
* Bounds
* :py:class:`shapely.geom.Polygon`
"""
from functools import singledispatch
import logging
from affine import Affine
import numpy as np
from osgeo import osr
from rasterio.coords import BoundingBox
from rasterio.crs import CRS
from rasterio.errors import CRSError
import shapely.geometry
from ..utils import (find_subclasses,
register_multi_singledispatch)
logger = logging.getLogger()
LIST_TYPE = (tuple, list, np.ndarray, )
# XARRAY_TYPE = (xr.Dataset, xr.DataArray)
GEOM_TYPE = find_subclasses(shapely.geometry.base.BaseGeometry)
# ============================================================================
# Affine geotransform
@singledispatch
def to_transform(value, from_gdal=False):
""" Convert input into an :py:class:`affine.Affine` transform
Parameters
----------
value : Affine or iterable
6 numbers representing affine transform
from_gdal : bool, optional
If `value` is a tuple or list, specifies if transform
is GDAL variety (True) or rasterio/affine (False)
Returns
-------
affine.Affine
Affine transform
"""
raise _CANT_CONVERT(value)
@to_transform.register(Affine)
def _to_transform_affine(value, from_gdal=False):
return value
@register_multi_singledispatch(to_transform, LIST_TYPE)
def _to_transform_iter(value, from_gdal=False):
if from_gdal:
return Affine.from_gdal(*value[:6])
else:
return Affine(*value[:6])
@to_transform.register(str)
def _to_transform_str(value, from_gdal=False, sep=','):
return _to_transform_iter([float(v) for v in value.split(sep)])
# ============================================================================
# CRS
# TODO: Dispatch function for Cartopy
@singledispatch
def to_crs(value):
""" Convert a CRS representation to a :py:class:`rasterio.crs.CRS`
Parameters
----------
value : str, int, dict, or osr.SpatialReference
Coordinate reference system as WKT, Proj.4 string, EPSG code,
rasterio-compatible proj4 attributes in a dict, or OSR definition
Returns
-------
rasterio.crs.CRS
CRS
"""
raise _CANT_CONVERT(value)
@to_crs.register(CRS)
def _to_crs_crs(value):
return value
@to_crs.register(str)
def _to_crs_str(value):
# After rasterio=1.0.14 WKT is backbone so try it first
try:
crs_ = CRS.from_wkt(value)
crs_.is_valid
except CRSError as err:
logger.debug('Could not parse CRS as WKT', err)
try:
crs_ = CRS.from_string(value)
crs_.is_valid
except CRSError as err:
logger.debug('Could not parse CRS as Proj4', err)
raise CRSError('Could not interpret CRS input as '
'either WKT or Proj4')
return crs_
@to_crs.register(int)
def _to_crs_epsg(value):
return CRS.from_epsg(value)
@to_crs.register(dict)
def _to_crs_dict(value):
return CRS(value)
@to_crs.register(osr.SpatialReference)
def _to_crs_osr(value):
return CRS.from_wkt(value.ExportToWkt())
# ============================================================================
# BoundingBox
@singledispatch
def to_bounds(value):
""" Convert input to a :py:class:`rasterio.coords.BoundingBox`
Parameters
----------
value : iterable, or Polygon
Input containing some geographic information
Returns
-------
BoundingBox
Bounding box (left, bottom, right, top). Also described as
(minx, miny, maxx, maxy)
"""
raise _CANT_CONVERT(value)
@to_bounds.register(BoundingBox)
def _to_bounds_bounds(value):
return value
@register_multi_singledispatch(to_bounds, LIST_TYPE)
def _to_bounds_iter(value):
return BoundingBox(*value)
@register_multi_singledispatch(to_bounds, GEOM_TYPE)
def _to_bounds_geom(value):
return BoundingBox(*value.bounds)
# ============================================================================
# Polygon
@singledispatch
def to_bbox(value):
""" Convert input a bounding box :py:class:`shapely.geometry.Polygon`
Parameters
----------
value : BoundingBox
Object representing a bounding box, or an xarray object with coords
we can use to calculate one from
Returns
-------
shapely.geometry.Polygon
BoundingBox as a polygon
"""
raise _CANT_CONVERT(value)
@register_multi_singledispatch(to_bbox, GEOM_TYPE)
def _to_bbox_geom(value):
return _to_bbox_bounds(BoundingBox(*value.bounds))
@to_bbox.register(BoundingBox)
def _to_bbox_bounds(value):
return shapely.geometry.box(*value)
# ============================================================================
# UTILITIES
def _CANT_CONVERT(obj):
return TypeError(f"Don't know how to convert this type: {type(obj)}")
| [
"[email protected]"
] | |
a4ff973f1cfc97ff556afebe59b954dffd24c381 | 5d2041f8e03fba17af04494b84947528a623852d | /tools/onnx-script.py | 52a2e146b04ce549ebd06ff3f0449d9224f6bded | [
"MIT"
] | permissive | nihui/netron | e60ddf9f64de0d57d1b691502db18edb2aa6b511 | 3963751827f30dd0955b9ad5e80d316aae807272 | refs/heads/master | 2022-01-19T18:26:57.059856 | 2021-12-30T03:44:11 | 2021-12-30T03:44:11 | 249,875,598 | 9 | 4 | MIT | 2020-03-25T03:13:18 | 2020-03-25T03:13:17 | null | UTF-8 | Python | false | false | 9,563 | py |
from __future__ import unicode_literals
import io
import json
import os
import re
import sys
import onnx
from onnx.backend.test.case import collect_snippets
snippets = collect_snippets()
categories = {
'Constant': 'Constant',
'Conv': 'Layer',
'ConvInteger': 'Layer',
'ConvTranspose': 'Layer',
'FC': 'Layer',
'RNN': 'Layer',
'LSTM': 'Layer',
'GRU': 'Layer',
'Gemm': 'Layer',
'Dropout': 'Dropout',
'Elu': 'Activation',
'HardSigmoid': 'Activation',
'LeakyRelu': 'Activation',
'PRelu': 'Activation',
'ThresholdedRelu': 'Activation',
'Relu': 'Activation',
'Selu': 'Activation',
'Sigmoid': 'Activation',
'Tanh': 'Activation',
'LogSoftmax': 'Activation',
'Softmax': 'Activation',
'Softplus': 'Activation',
'Softsign': 'Activation',
'BatchNormalization': 'Normalization',
'InstanceNormalization': 'Normalization',
'LpNormalization': 'Normalization',
'LRN': 'Normalization',
'Flatten': 'Shape',
'Reshape': 'Shape',
'Tile': 'Shape',
'Xor': 'Logic',
'Not': 'Logic',
'Or': 'Logic',
'Less': 'Logic',
'And': 'Logic',
'Greater': 'Logic',
'Equal': 'Logic',
'AveragePool': 'Pool',
'GlobalAveragePool': 'Pool',
'GlobalLpPool': 'Pool',
'GlobalMaxPool': 'Pool',
'LpPool': 'Pool',
'MaxPool': 'Pool',
'MaxRoiPool': 'Pool',
'Concat': 'Tensor',
'Slice': 'Tensor',
'Split': 'Tensor',
'Pad': 'Tensor',
'ImageScaler': 'Data',
'Crop': 'Data',
'Upsample': 'Data',
'Transpose': 'Transform',
'Gather': 'Transform',
'Unsqueeze': 'Transform',
'Squeeze': 'Transform',
}
attribute_type_table = {
'undefined': None,
'float': 'float32', 'int': 'int64', 'string': 'string', 'tensor': 'tensor', 'graph': 'graph',
'floats': 'float32[]', 'ints': 'int64[]', 'strings': 'string[]', 'tensors': 'tensor[]', 'graphs': 'graph[]',
}
def generate_json_attr_type(attribute_type, attribute_name, op_type, op_domain):
assert isinstance(attribute_type, onnx.defs.OpSchema.AttrType)
key = op_domain + ':' + op_type + ':' + attribute_name
if key == ':Cast:to' or key == ':EyeLike:dtype' or key == ':RandomNormal:dtype':
return 'DataType'
s = str(attribute_type)
s = s[s.rfind('.')+1:].lower()
if s in attribute_type_table:
return attribute_type_table[s]
return None
def generate_json_attr_default_value(attr_value):
if not str(attr_value):
return None
if attr_value.HasField('i'):
return attr_value.i
if attr_value.HasField('s'):
return attr_value.s.decode('utf8')
if attr_value.HasField('f'):
return attr_value.f
return None
def generate_json_support_level_name(support_level):
assert isinstance(support_level, onnx.defs.OpSchema.SupportType)
s = str(support_level)
return s[s.rfind('.')+1:].lower()
def generate_json_types(types):
r = []
for type in types:
r.append(type)
r = sorted(r)
return r
def format_range(value):
if value == 2147483647:
return '∞'
return str(value)
def format_description(description):
def replace_line(match):
link = match.group(1)
url = match.group(2)
if not url.startswith("http://") and not url.startswith("https://"):
url = "https://github.com/onnx/onnx/blob/master/docs/" + url
return "[" + link + "](" + url + ")"
description = re.sub("\\[(.+)\\]\\(([^ ]+?)( \"(.+)\")?\\)", replace_line, description)
return description
def generate_json(schemas, json_file):
json_root = []
for schema in schemas:
json_schema = {}
json_schema['name'] = schema.name
if schema.domain:
json_schema['module'] = schema.domain
else:
json_schema['module'] = 'ai.onnx'
json_schema['version'] = schema.since_version
json_schema['support_level'] = generate_json_support_level_name(schema.support_level)
if schema.doc:
json_schema['description'] = format_description(schema.doc.lstrip())
if schema.attributes:
json_schema['attributes'] = []
for _, attribute in sorted(schema.attributes.items()):
json_attribute = {}
json_attribute['name'] = attribute.name
attribute_type = generate_json_attr_type(attribute.type, attribute.name, schema.name, schema.domain)
if attribute_type:
json_attribute['type'] = attribute_type
elif 'type' in json_attribute:
del json_attribute['type']
json_attribute['required'] = attribute.required
default_value = generate_json_attr_default_value(attribute.default_value)
if default_value:
json_attribute['default'] = default_value
json_attribute['description'] = format_description(attribute.description)
json_schema['attributes'].append(json_attribute)
if schema.inputs:
json_schema['inputs'] = []
for input in schema.inputs:
json_input = {}
json_input['name'] = input.name
json_input['type'] = input.typeStr
if input.option == onnx.defs.OpSchema.FormalParameterOption.Optional:
json_input['option'] = 'optional'
elif input.option == onnx.defs.OpSchema.FormalParameterOption.Variadic:
json_input['list'] = True
json_input['description'] = format_description(input.description)
json_schema['inputs'].append(json_input)
json_schema['min_input'] = schema.min_input
json_schema['max_input'] = schema.max_input
if schema.outputs:
json_schema['outputs'] = []
for output in schema.outputs:
json_output = {}
json_output['name'] = output.name
json_output['type'] = output.typeStr
if output.option == onnx.defs.OpSchema.FormalParameterOption.Optional:
json_output['option'] = 'optional'
elif output.option == onnx.defs.OpSchema.FormalParameterOption.Variadic:
json_output['list'] = True
json_output['description'] = format_description(output.description)
json_schema['outputs'].append(json_output)
json_schema['min_output'] = schema.min_output
json_schema['max_output'] = schema.max_output
if schema.min_input != schema.max_input:
json_schema['inputs_range'] = format_range(schema.min_input) + ' - ' + format_range(schema.max_input)
if schema.min_output != schema.max_output:
json_schema['outputs_range'] = format_range(schema.min_output) + ' - ' + format_range(schema.max_output)
if schema.type_constraints:
json_schema['type_constraints'] = []
for type_constraint in schema.type_constraints:
json_schema['type_constraints'].append({
'description': type_constraint.description,
'type_param_str': type_constraint.type_param_str,
'allowed_type_strs': type_constraint.allowed_type_strs
})
if schema.name in snippets:
def update_code(code):
lines = code.splitlines()
while len(lines) > 0 and re.search("\\s*#", lines[-1]):
lines.pop()
if len(lines) > 0 and len(lines[-1]) == 0:
lines.pop()
return '\n'.join(lines)
json_schema['examples'] = []
for summary, code in sorted(snippets[schema.name]):
json_schema['examples'].append({
'summary': summary,
'code': update_code(code)
})
if schema.name in categories:
json_schema['category'] = categories[schema.name]
json_root.append(json_schema);
json_root = sorted(json_root, key=lambda item: item['name'] + ':' + str(item['version'] if 'version' in item else 0).zfill(4))
with io.open(json_file, 'w', newline='') as fout:
json_root = json.dumps(json_root, indent=2)
for line in json_root.splitlines():
line = line.rstrip()
if sys.version_info[0] < 3:
line = str(line)
fout.write(line)
fout.write('\n')
def metadata():
json_file = os.path.join(os.path.dirname(__file__), '../source/onnx-metadata.json')
all_schemas_with_history = onnx.defs.get_all_schemas_with_history()
generate_json(all_schemas_with_history, json_file)
def optimize():
import onnx
from onnx import optimizer
file = sys.argv[2]
base = os.path.splitext(file)
onnx_model = onnx.load(file)
passes = optimizer.get_available_passes()
optimized_model = optimizer.optimize(onnx_model, passes)
onnx.save(optimized_model, base + '.optimized.onnx')
def infer():
import onnx
import onnx.shape_inference
from onnx import shape_inference
file = sys.argv[2]
base = os.path.splitext(file)[0]
onnx_model = onnx.load(base + '.onnx')
onnx_model = onnx.shape_inference.infer_shapes(onnx_model)
onnx.save(onnx_model, base + '.shape.onnx')
if __name__ == '__main__':
command_table = { 'metadata': metadata, 'optimize': optimize, 'infer': infer }
command = sys.argv[1]
command_table[command]()
| [
"[email protected]"
] | |
25f94ab6830074c80784e5bce87d5041838da2af | 96890d754bd943510ad2b5e3a0cba336fab24d44 | /Week7/After14.py | f051a0b47f89f4fb9463f9bece77e23caaf0f586 | [] | no_license | Chudvan/Python_osnovy_programmirovaniya-Coursera- | 304925397d3e7f4b49bc3f62dc89f782d36a1f76 | 19117cb198ed50bb90ff8082efc0dad4e80bce13 | refs/heads/master | 2020-07-07T13:49:14.504232 | 2019-08-21T02:00:01 | 2019-08-21T02:00:01 | 203,366,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | from sys import stdin
numberWordsDict = dict()
for line in stdin:
lineList = line.split()
for word in lineList:
if word not in numberWordsDict:
numberWordsDict[word] = 0
numberWordsDict[word] += 1
tupleList = []
for word in numberWordsDict:
tupleList.append((numberWordsDict[word], word))
tupleList.sort(key=lambda curTuple: (-curTuple[0], curTuple[1]))
for curTuple in tupleList:
print(curTuple[1])
| [
"[email protected]"
] | |
8031b06595673b677e41319deb604caa3164a455 | 5ca39c2f45bdef4f93e57b17a357a2565fe1cf02 | /contactbook.py | 05a5715d3a06a40a21e502278f0cf56788ca7c36 | [] | no_license | Ajit1999/ContactBook-API | de6f51d0e1fcf49b5c8b8bfacf4b7750b64b9356 | df64583db98eb3421f07177f3c7dbb771c218ac4 | refs/heads/main | 2023-07-12T00:12:38.396876 | 2021-08-22T11:55:31 | 2021-08-22T11:55:31 | 398,787,514 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,215 | py | from flask import Flask
from flask_pymongo import PyMongo
from bson.json_util import dumps
from bson.objectid import ObjectId
from flask import jsonify, request
app = Flask(__name__)
app.secret_key = "secretkey"
app.config['MONGO_URI'] = "mongodb://localhost:27017/User"
mongo = PyMongo(app)
@app.route('/add',methods=['POST'])
def add_user():
_json = request.json
_name = _json['name']
_address = _json['address']
_contactno = _json['contact']
_email = _json['email']
if _name and _address and _contactno and _email and request.method == 'POST':
id = mongo.db.user.insert({'name':_name,'address':_address,'contact':_contactno,'email':_email})
resp = jsonify("Contact added sucessfully")
resp.status_code = 200
return resp
else:
return not_found()
@app.route('/users')
def users():
users = mongo.db.user.find()
resp = dumps(users)
return resp
@app.route('/user/<id>')
def user(id):
user = mongo.db.user.find_one({'_id':ObjectId(id)})
resp = dumps(user)
return resp
@app.route('/delete/<id>',methods=['DELETE'])
def delete_user(id):
delete_user = mongo.db.user.delete_one({'_id': ObjectId(id)})
resp = jsonify("Contact deleted successfully")
resp.status_code = 200
return resp
@app.route('/update/<id>', methods =['PUT'])
def update(id):
_id = id
_json = request.json
_name = _json['name']
_address = _json['address']
_contactno = _json['contact']
_email = _json['email']
if _name and _address and _contactno and _email and _id and request.method == 'PUT':
mongo.db.user.update({'_id':ObjectId(_id['$oid']) if '$oid' in _id else ObjectId(_id)}, {'$set': {'name':_name,'address':_address,'contact':_contactno,'email':_email,}})
resp = jsonify("Contact updated Successfully")
resp.status_code = 200
return resp
else:
return not_found()
@app.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message':'Not Found' + request.url
}
resp = jsonify(message)
resp.status_code = 404
return resp
if __name__ =="__main__":
app.run(debug = True)
| [
"[email protected]"
] | |
104377ea2f080beb98e6dc6a42d72693b8f7e54c | 60b2156d7bc5bd42fa1d8e7684f4b8c2d8f12748 | /tests/test_utils.py | cc5027edc12f34ec0d6c005b5c9015294d5a63ae | [
"MIT"
] | permissive | coras-io/lint-review | 74d89b05611ba4d18ab4224a6af9e5b93b5f27c3 | 0df19429a265a79edecb53b4371bf63db7e61617 | refs/heads/master | 2020-12-25T22:28:52.698909 | 2019-11-28T15:56:53 | 2019-11-28T15:56:53 | 63,415,943 | 2 | 2 | MIT | 2019-11-28T15:56:54 | 2016-07-15T11:07:20 | Python | UTF-8 | Python | false | false | 513 | py | import lintreview.utils as utils
import os
from unittest import skipIf
js_hint_installed = os.path.exists(
os.path.join(os.getcwd(), 'node_modules', '.bin', 'jshint'))
def test_in_path():
assert utils.in_path('python'), 'No python in path'
assert not utils.in_path('bad_cmd_name')
@skipIf(not js_hint_installed, 'Missing local jshint. Skipping')
def test_npm_exists():
assert utils.npm_exists('jshint'), 'Should be there.'
assert not utils.npm_exists('not there'), 'Should not be there.'
| [
"[email protected]"
] | |
da74b5b74654f0fbd6447f906cfa0864252ad0ea | 43e788ee824ce1f6611d42690688136e5840af0e | /Video.py | 5727fe4166addad073efc4954296de4a11e5ee5a | [] | no_license | Karthik8396/lrn_opencv2 | 3b9c9d824bee26c5d3c5c8ab54fb12e5a9bf145e | 1d475f5b285cca187ff449f0036dcfe3dd5db136 | refs/heads/master | 2020-07-10T05:09:03.104573 | 2019-08-31T14:23:17 | 2019-08-31T14:23:17 | 204,174,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | import cv2
import numpy
cap=cv2.VideoCapture(0) #first webcam
fourcc =cv2.VideoWriter_fourcc(*'XVID') # for saving the video and fourcc is codec
out=cv2.VideoWriter('output.avi',fourcc,20.0,(640,480)) # adding codec and size of video
cv2.VideoWriter()
while True :
ret,frame = cap.read()
gray=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
cv2.imshow('frame',frame)
cv2.imshow('gray',gray)
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'): #waitkey return 32 bit value(32 ones) 0xFF is 11111111(8 bit value),logical and makes it true and if executes
break #ord is for getting key value
cap.release()
out.release()
cv2.destroyAllWindows() | [
"[email protected]"
] | |
4aef633a4a753583109bbede3ee7210ea326e777 | 6cec9a15d1c9427229f3c301b04bbe64f316bbce | /controlledEnviroment/GUIpackage/Classes/LetterToCharactersClass.py | ab0de4a4b3da873f5bdf638a9426c5ee6cd8f359 | [] | no_license | carolyn-brodie/Summer2021 | bf04d1a089a183dbfb9273c2b6a5d70ceb930f62 | 407741a8c4bf45c5e389b3a2c1b07a874c8eacaf | refs/heads/master | 2023-06-17T22:43:38.157442 | 2021-07-22T19:18:29 | 2021-07-22T19:18:29 | 373,882,939 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,332 | py | class LetterToCharacters():
def __init__(self):
self.letters = ["ch", "sh", "th", "wh", "ng", "nk", "wr", "str", "spr", "bl", "cl", "fl", "gl", "pl", "br",
"cr", "dr", "fr",
"gr", "pr", "tr", "sk", "sl", "sp", "st", "sw"]
self.alphabet = {"a": 0, "b": 0, "c": 0, "d": 0, "e": 0, "f": 0,
"g": 0, "h": 0, "i": 0, "j": 0, "k": 0, "l": 0,
"m": 0, "n": 0, "o": 0, "p": 0, "q": 0, "r": 0,
"s": 0, "t": 0, "u": 0, "v": 0, "w": 0, "x": 0,
"y": 0, "z": 0, "!": 0, "@": 0, "#": 0, "$": 0, "%": 0, "^": 0, ")": 0, "*": 0, "(": 0, "_": 0}
self.digraph_dict = {"ch": "!", "sh": "@", "th": "#", "wh": "$", "ng": "%", "nk": "^", "wr": ")"}
self.blend_dict = {"str": "*", "spr": "(", "bl": "[", "cl": "]", "fl": "|", "gl": ":", "pl": "<", "br": ">",
"cr": "?", "dr": "~",
"fr": "`", "gr": "\u00d8", "pr": "\u00d9", "tr": "\u00da", "sk": "\u00db", "sl": "\u00dd",
"sp": "\u00de",
"st": "\u00df", "sw": "\u00e0"}
self.vowel_dict = {"ai": "\u00e1", "au": "\u00e2", "aw": "\u00e3", "ay": "\u00e4", "ea": "\u00e5",
"ee": "\u00e6",
"ei": "\u00e7",
"eo": "\u00e8", "eu": "\u00e9", "ew": "\u00ea", "ey": "\u00eb", "ie": "\u00ec",
"oa": "\u00ed",
"oe": "\u00ee",
"oi": "\u00ef", "oo": "\u00f0", "ou": "\u00f1", "ow": "\u00f2", "oy": "\u00f3",
"ue": "\u00f4",
"ui": "\u00f5"}
self.combined_dict = {}
self.combined_dict.update(self.digraph_dict)
self.combined_dict.update(self.blend_dict)
self.combined_dict.update(self.vowel_dict)
self.reverse_dict = {value: key for (key, value) in self.combined_dict.items()}
self.allCombined = self.returnAllCombined()
def lettersToCharacters(self, word):
for item in self.letters:
if item in word:
var = word.index(item)
word = word.replace(word[var: var + len(item)], self.combined_dict[item])
return word
def charactersToLetters(self, word):
for item in self.reverse_dict.keys():
if item in word:
var = word.index(item)
word = word.replace(word[var], self.reverse_dict[item])
return word
def returnCombined(self):
return self.combined_dict
def returnReversed(self):
return self.reverse_dict
def returnAllCombined(self):
temp = self.alphabet
temp.update(self.reverse_dict)
return temp
def formatDictForReturn(self, dict1):
temp = dict1
for char in temp:
temp[char] = 0
return temp
def nestDict(self, dict1):
temp = {}
temp.update(dict1)
for char1 in temp:
temp1 = {}
temp1.update(dict1)
temp[char1] = temp1
return temp
def returnFormated(self):
temp = self.nestDict(self.formatDictForReturn(self.returnAllCombined()))
return temp
| [
"[email protected]"
] | |
3095ad9d0178728b8363be5fa150c0ea43e6ecea | 9c902c6bc6ea2cce71195acd5baa8f44ab928eb6 | /pythonapp/imgtxt/admin.py | 0124dec01736c26d6587dbe332000f3719f39cdc | [] | no_license | mogilivishal/Verzeo-OCR-Project | a383b56014e13dfef598a191012fc51dc9579624 | 8b34a6c8b323e0b55c7902f2c4f873a1e4ce04e7 | refs/heads/master | 2022-04-17T20:32:45.724447 | 2020-02-16T17:38:52 | 2020-02-16T17:38:52 | 240,932,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | from django.contrib import admin
from .models import Document
admin.site.register(Document) | [
"[email protected]"
] | |
3b5723e132a7e8f7a265ee90af5a94bd78032635 | cccabd5a16b9e230bbf8379b4f8d42a64f0f2608 | /pysweng/tests/test_oop.py | 8a4fc0b20b9e6804472a681a7c45f97ba0f8afaf | [
"MIT"
] | permissive | lopezpdvn/pysweng | 75bef93803c15cdf0859c6fefcee2693fb011364 | af28b5454385db5314876dde37f2c2bc18731734 | refs/heads/master | 2021-01-18T23:42:55.054505 | 2016-12-30T09:43:18 | 2016-12-30T09:43:18 | 55,115,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 478 | py | import unittest
from pysweng.oop import (dummy_function, DUMMY_GLOBAL_CONSTANT_0,
DUMMY_GLOBAL_CONSTANT_1)
class TestDummies(unittest.TestCase):
def test_global_variables(self):
self.assertEqual(DUMMY_GLOBAL_CONSTANT_0, 'FOO')
self.assertEqual(DUMMY_GLOBAL_CONSTANT_1, 'BAR')
def test_dummy_funcion(self):
self.assertEqual(dummy_function('a'), 'a');
self.assertEqual(dummy_function(555), 555);
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
7f9a2d07182faa806f9337f02a6a0ce4035514fd | 0676f6e4d3510a0305d29aa0b1fe740d538d3b63 | /Python/SImplifyPline/CleanUpPolyline.py | 1ce7d7116eb272886ed20d4186ae8a3b571c98fb | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | pgolay/PG_Scripts | f70ffe7e5ca07acd6f4caedc9a9aec566542da7c | 796704a7daa6ac222a40bb02afdb599f74a6b0d4 | refs/heads/master | 2021-01-19T16:53:41.525879 | 2017-02-07T18:26:10 | 2017-02-07T18:26:10 | 2,730,362 | 9 | 1 | null | 2016-12-30T17:58:08 | 2011-11-08T00:04:33 | Python | UTF-8 | Python | false | false | 1,898 | py | import Rhino
import scriptcontext as sc
"""
Cleans up by collapsing tiny segments in a polyline.
"""
def CleanUpPolyline():
while True:
tol = sc.doc.ModelAbsoluteTolerance
if sc.sticky.has_key("PLineSimplifyTol"):
tol = sc.sticky["PLineSimplifyTol"]
go = Rhino.Input.Custom.GetObject()
go.AcceptNumber(True, False)
go.GeometryFilter = Rhino.DocObjects.ObjectType.Curve
opDblTol = Rhino.Input.Custom.OptionDouble(tol)
go.AddOptionDouble("SegmentTolerance",opDblTol)
result = go.Get()
if( go.CommandResult() != Rhino.Commands.Result.Success ):
return
if result == Rhino.Input.GetResult.Object:
if type(go.Object(0).Geometry()) == Rhino.Geometry.PolylineCurve:
curve = go.Object(0).Geometry()
rc, pLine = curve.TryGetPolyline()
pLineId = go.Object(0).ObjectId
else:
sc.doc.Objects.UnselectAll()
sc.doc.Views.Redraw()
print "Sorry, that was not a polyline."
continue
break
elif result == Rhino.Input.GetResult.Option:
tol = opDblTol.CurrentValue
sc.sticky["PLineSimplifyTol"] = tol
continue
elif result == Rhino.Input.GetResult.Number:
tol = go.Number()
sc.sticky["PLineSimplifyTol"] = tol
continue
break
count = pLine.CollapseShortSegments(tol)
if count !=0:
sc.doc.Objects.Replace(pLineId, pLine)
sc.doc.Views.Redraw()
print str(count) + " short segments were collapsed."
else:
print "No short segments were collapsed."
pass
if __name__ == "__main__":
CleanUpPolyline() | [
"[email protected]"
] | |
f11868c799a295320f785d89daea2d28092944a7 | 05824a52e2ca67db8b8d2bd21ece1a53dc5d23de | /code/configuration.py | 7e48afa3edbec5a7b1f8e4dc19656ad3e4e002e4 | [] | no_license | HankTsai/Sales_Forecast_Retailer | 65c19f77fdb3ac573abf9846dee46695e45c91ac | 07d7a37c4b3cc482765ae747fd1cfd9b96096dc1 | refs/heads/main | 2023-07-18T06:13:38.393562 | 2021-08-31T03:40:59 | 2021-08-31T03:40:59 | 378,896,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,577 | py |
import os
import logging
from pymssql import connect
from datetime import datetime
from configparser import ConfigParser
config = ConfigParser()
config.read('setting.ini')
class CodeLogger:
"""log儲存設定模組"""
def __init__(self):
self.logger = logging.getLogger(os.path.basename(__file__))
self.formatter = logging.Formatter(
'["%(asctime)s - %(levelname)s - %(name)s - %(message)s" - function:%(funcName)s - line:%(lineno)d]')
self.log_name = config['filepath']['log_path'] + datetime.now().strftime("forecast_%Y-%m-%d_%H-%M-%S.log")
logging.basicConfig(level=logging.INFO, datefmt='%Y%m%d_%H:%M:%S',)
def store_logger(self):
"""設定log儲存"""
handler = logging.FileHandler(self.log_name, "w", encoding = "UTF-8")
handler.setFormatter(self.formatter)
self.logger.addHandler(handler)
self.logger.propagate = False
def show_logger(self):
"""設定log在終端機顯示"""
console = logging.StreamHandler()
console.setLevel(logging.FATAL)
console.setFormatter(self.formatter)
self.logger.addHandler(console)
class DBConnect:
"""繼承並設計DB連線處理"""
def __init__(self):
self.host = config['connect']['server']
self.user = config['connect']['username']
self.password = config['connect']['password']
self.database = config['connect']['database']
self.conn = connect(host=self.host, user=self.user, password=self.password, database=self.database, autocommit=True)
def query(self, sql, as_dict=False, para=()):
"""查詢DB數據"""
# as_dict 是讓數據呈現key/value型態
try:
cursor = self.conn.cursor(as_dict)
if para:
cursor.execute(sql,para)
return cursor
else:
cursor.execute(sql)
return cursor
except Exception as me:
CodeLogger().logger.error(me)
def insert(self, sql, para=()):
"""新增DB數據"""
try:
cursor = self.conn.cursor()
cursor.execute(sql,para)
except Exception as me:
CodeLogger().logger.error(me)
def delete(self, sql, para=()):
"""刪除DB數據"""
try:
cursor = self.conn.cursor()
cursor.execute(sql,para)
except Exception as me:
CodeLogger().logger.error(me)
def commit(self):
self.conn.commit()
def close(self):
self.conn.close() | [
"[email protected]"
] | |
ee3452616d5ab280c04845cc2164cbdf6db586d2 | 9032e88ca0c90a15b96d2142d2629484cdf469b6 | /py_controls/MemoryManager.py | fd1bc79f0d91f58ce62c4bd3349152244c888609 | [
"MIT"
] | permissive | CyberCrunch/DU_AI_Gov | 856db1db4e67e37ac8c8f05fc096a9bbc50027a8 | a9fcf3b603c39bf0704df172a6745620d1d3c06b | refs/heads/master | 2021-06-20T12:46:35.360703 | 2017-08-08T19:18:14 | 2017-08-08T19:18:14 | 77,530,730 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,088 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 30 15:52:43 2016
@author: robin
"""
import json
from enum import Enum #testing possible enums for readability...(not implemeted)
class NrH(Enum): #human data formtat for Json
name = 0
human = 1
job = 2
status = 3
position = 4
money = 5
class NrL(Enum): #location data formtat for Json
name = 0
location = 1
planet = 2
structure = 3
longitude = 4
latitude = 5
resource = 6
reward = 7
class SpH(Enum): #human string formtat for registration
name = 0
job = 1
class SpL(Enum): #location string formtat for registration
name = 0
planet = 1
structure = 2
longitude = 3
latitude = 4
def regHuman(msg):
splitStr = msg.split()
if(len(splitStr) != 2):
return "Invalid Parameters, please use Format: !reg YourName YourJob"
with open('memoryDB.json', 'r+') as json_file:
json_data = json.load(json_file)
json_data[splitStr[SpH.name.value]] = ['Human', splitStr[SpH.job.value],"idle", "unknownPos", 0]
json_file.seek(0, 0)
json_file.write(json.dumps(json_data, indent=4))
json_file.truncate()
return ("New human registered: " +msg)
def regLocation(msg):
splitStr = msg.split()
if(len(splitStr) != 5):
return ("Invalid Parameters, please use Format: !geodata name planet type longitude latitude")
with open('memoryDB.json', 'r+') as json_file:
json_data = json.load(json_file)
json_data[splitStr[SpL.name.value]] = ['Location', splitStr[SpL.planet.value], splitStr[SpL.structure.value], splitStr[SpL.longitude.value], splitStr[SpL.latitude.value], "default", 0]
json_file.seek(0, 0)
json_file.write(json.dumps(json_data, indent=4))
json_file.truncate()
return ("New location registered: " +msg)
def getDatabase():
with open('memoryDB.json', 'r') as json_file:
json_data = json.load(json_file)
return(json.dumps(json_data, indent=4, sort_keys=True)) | [
"[email protected]"
] | |
a33b2f9f3cd62ddd7189114556f08b0144aad7c6 | b08d42933ac06045905d7c005ca9c114ed3aecc0 | /src/coefSubset/evaluate/ranks/tenth/rank_2p49_Q.py | c80b9b7c96acce81b347d895d8286c78c576e7d8 | [] | no_license | TanemuraKiyoto/PPI-native-detection-via-LR | d148d53f5eb60a4dda5318b371a3048e3f662725 | 897e7188b0da94e87126a4acc0c9a6ff44a64574 | refs/heads/master | 2022-12-05T11:59:01.014309 | 2020-08-10T00:41:17 | 2020-08-10T00:41:17 | 225,272,083 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,204 | py | # 9 July 2019
# Kiyoto Aramis Tanemura
# Several metrics are used to assess the performance of the trained RF model, notably native ranking. This script returns a ranking of the native protein-protein complex among a decoy set. For convenience, I will define as a function and will call in a general performance assessment script.
# Modified 11 July 2019 by Kiyoto Aramis Tanemura. To parallelize the process, I will replace the for loop for the testFileList to a multiprocessing pool.
# Modified 9 September 2019 by Kiyoto Aramis Tanemura. I will use the function to perform the calculation on one CSV file only. Thus instead of a function to import in other scripts, they will be individual jobs parallelized as individual jobs in the queue.
import os
import pandas as pd
import numpy as np
import pickle
os.chdir('/mnt/scratch/tanemur1/')
# Read the model and trainFile
testFile = '2p49.csv'
identifier = 'Q'
thresholdCoef = 0.1
testFilePath = '/mnt/scratch/tanemur1/CASF-PPI/nonb_descriptors/complete/'
modelPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/tenth/'
outputPath = '/mnt/home/tanemur1/6May2019/2019-11-11/results/coefSubset/evaluate/tenth/ranks/'
pdbID = testFile[:4]
with open(modelPath + 'model' + identifier + '.pkl', 'rb') as f:
clf = pickle.load(f)
result = pd.DataFrame()
scoreList = []
df1 = pd.read_csv(testFilePath + testFile)
dropList = ['Unnamed: 0', 'Unnamed: 0.1', 'ref']
df1 = df1.drop(dropList, axis = 1)
df1 = df1.set_index('Pair_name')
df1 = pd.DataFrame(df1.values.T, columns = df1.index, index = df1.columns)
df1.fillna(0.0, inplace = True)
df1 = df1.reindex(sorted(df1.columns), axis = 1)
# Drop features with coefficients below threshold
coefs = pd.read_csv('/mnt/home/tanemur1/6May2019/2019-11-11/results/medianCoefs.csv', index_col = 0, header = None, names = ['coefficients'])
coefs = coefs[np.abs(coefs['coefficients']) < thresholdCoef]
dropList = list(coefs.index)
del coefs
df1.drop(dropList, axis = 1, inplace = True)
with open(modelPath + 'standardScaler' + identifier + '.pkl', 'rb') as g:
scaler = pickle.load(g)
for i in range(len(df1)):
# subtract from one row each row of the dataframe, then remove the trivial row[[i]] - row[[i]]. Also some input files have 'class' column. This is erroneous and is removed.
df2 = pd.DataFrame(df1.iloc[[i]].values - df1.values, index = df1.index, columns = df1.columns)
df2 = df2.drop(df1.iloc[[i]].index[0], axis = 0)
# Standardize inut DF using the standard scaler used for training data.
df2 = scaler.transform(df2)
# Predict class of each comparison descriptor and sum the classes to obtain score. Higher score corresponds to more native-like complex
predictions = clf.predict(df2)
score = sum(predictions)
scoreList.append(score)
# Make a new DataFrame to store the score and corresponding descriptorID. Add rank as column. Note: lower rank corresponds to more native-like complex
result = pd.DataFrame(data = {'score': scoreList}, index = df1.index.tolist()).sort_values(by = 'score', ascending = False)
result['rank'] = range(1, len(result) + 1)
with open(outputPath + pdbID + identifier + '.csv', 'w') as h:
result.to_csv(h)
| [
"[email protected]"
] | |
77d8acda1bcff51018b3fe72fc9c8578176f31e9 | c9aa19a4d46b5c5357121e76e2e9784f2140ba41 | /cashonly/management/commands/debtreminder.py | 09a10f922fe66a1bb31ef740723ed9ab65469d2c | [] | no_license | klonfed/cashonly | 2e617094ad95b82be62808fbbb781e9a2250b8a6 | 514e1c9cd8814e38b518b0be382940d1cb229725 | refs/heads/master | 2021-01-19T18:30:35.317250 | 2015-11-20T22:20:00 | 2015-11-20T22:20:00 | 41,054,334 | 2 | 2 | null | 2022-08-23T10:21:31 | 2015-08-19T19:07:16 | Python | UTF-8 | Python | false | false | 1,027 | py |
from cashonly.models import *
from django.conf import settings
from django.core.mail import send_mass_mail
from django.core.management.base import NoArgsCommand
from django.template import Context
from django.template.loader import get_template
from django.utils import translation
from django.utils.translation import ugettext as _
class Command(NoArgsCommand):
help = 'Sends a reminder mail to every with a negative credit'
def handle_noargs(self, **options):
translation.activate('de')
tpl = get_template('cashonly/debt_reminder.txt')
messages = []
for a in Account.objects.all():
if a.credit < 0:
name = '%s %s' % (a.user.first_name, a.user.last_name)
context = {'name': name, 'credit': a.credit}
rcpts = ['%s <%s>' % (name, a.user.email)]
messages.append(('%s%s' % (settings.EMAIL_SUBJECT_PREFIX,
_('Debt Reminder')),
tpl.render(Context(context)),
settings.DEFAULT_FROM_EMAIL, rcpts))
send_mass_mail(tuple(messages))
| [
"[email protected]"
] | |
38cdbaf54cca763167f5f61a21d94d207591b3a2 | 5485c26474df9c7f68d94976fae45da5f0091c3c | /auto_feature.py | de319e49c20d33cfabc61c32af47395ae90da9f0 | [] | no_license | CheneyYin/Motor | ecaab18e084ed4083c9ccb980a2d9b4310bf0637 | f3009e0335a9a70d5299b3814f7df4f43b03eff4 | refs/heads/master | 2020-05-07T12:40:15.447944 | 2019-08-12T03:28:22 | 2019-08-12T03:28:22 | 180,515,434 | 1 | 2 | null | 2019-04-17T07:00:08 | 2019-04-10T06:21:22 | Python | UTF-8 | Python | false | false | 627 | py | import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import tsfresh.feature_extraction.feature_calculators as fc
import matplotlib.pyplot as plt
import warnings
train_path1 = '../Motor-Data/Motor_tain/N/00aab5a5-e096-4e4e-803f-a8525506cbd8_F.csv'
train_path1 = '../Motor-Data/Motor_tain/N/00aab5a5-e096-4e4e-803f-a8525506cbd8_B.csv'
df1 = pd.read_csv(train_path1, header = 0)
df2 = pd.read_csv(train_path2, header = 0)
df = pd.DataFrame(data = np.column_stack([df1['ai1'],df1['ai2'], df2['ai1'], df2['ai2'], range(79999), '1']), columns = ['F_ai1','F_ai2', 'B_ai1', 'B_ai2', 'time', 'id'])
| [
"[email protected]"
] | |
6e1066a32d3b678c93a683c91c32ca9925549774 | 72d010d00355fc977a291c29eb18aeb385b8a9b0 | /MPK261/__init__.py | 1878e1129184af07da8510e9e370e01adae46916 | [] | no_license | maratbakirov/AbletonLive10_MIDIRemoteScripts | bf0749c5c4cce8e83b23f14f671e52752702539d | ed1174d9959b20ed05fb099f0461bbc006bfbb79 | refs/heads/master | 2021-06-16T19:58:34.038163 | 2021-05-09T11:46:46 | 2021-05-09T11:46:46 | 203,174,328 | 0 | 0 | null | 2019-08-19T13:04:23 | 2019-08-19T13:04:22 | null | UTF-8 | Python | false | false | 741 | py | # Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/MPK261/__init__.py
# Compiled at: 2018-04-23 20:27:04
from __future__ import absolute_import, print_function, unicode_literals
from .MPK261 import MPK261
from _Framework.Capabilities import controller_id, inport, outport, CONTROLLER_ID_KEY, PORTS_KEY, NOTES_CC, SCRIPT, REMOTE
def get_capabilities():
return {CONTROLLER_ID_KEY: controller_id(vendor_id=2536, product_ids=[
37], model_name='MPK261'),
PORTS_KEY: [
inport(props=[NOTES_CC, SCRIPT, REMOTE]),
outport(props=[SCRIPT, REMOTE])]}
def create_instance(c_instance):
return MPK261(c_instance)
| [
"[email protected]"
] | |
579153317b369ad77af1c66c5cb43036e863cc19 | 5be8b0f2ee392abeee6970e7a6364ac9a5b8ceaa | /xiaojian/second_phase/day12/http_sever2.0.py | 12ccde8198046391e24f9698efd843eacb0c011c | [] | no_license | Wellsjian/20180826 | 424b65f828f0174e4d568131da01dafc2a36050a | 0156ad4db891a2c4b06711748d2624080578620c | refs/heads/master | 2021-06-18T12:16:08.466177 | 2019-09-01T10:06:44 | 2019-09-01T10:06:44 | 204,462,572 | 0 | 1 | null | 2021-04-20T18:26:03 | 2019-08-26T11:38:09 | JavaScript | UTF-8 | Python | false | false | 3,467 | py | """
HTTP 2.0
接口设计:
1.提供句柄,通过句柄调用属性和方法
obj = open()
lock = Lock()
2.实例化对象,通过对象设置,启动服务
t = Thread()
p = Process()
3.根据功能需求,无法帮助用户决定的内容,通过参数传递
4.能够解决的问题,不要让用户去解决,需要用户解决的问题可以用重写的方法去解决
技术分析:
HTTP 协议
思路分析
1.使用类进行封装
2.从用户的角度决定代码的编写
"""
# 具体HTTP sever功能.
from socket import *
from select import *
class HTTPSever:
def __init__(self, host, port, dir):
self.addrss = (host, port)
self.host = host
self.port = port
self.dir = dir
self.rlist = []
self.wlist = []
self.xlist = []
self.create_socket()
self.bind()
# 创建套接字
def create_socket(self):
self.sockfd = socket()
self.sockfd.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
# 绑定地址
def bind(self):
self.sockfd.bind(self.addrss)
# 启动服务
def server_forver(self):
self.sockfd.listen(5)
print("listen the port %d" % self.port)
self.rlist.append(self.sockfd)
while True:
rs, ws, xs = select(self.rlist, self.wlist, self.xlist)
self.do_rlist(rs)
# 具体处理请求
def handle(self, connfd):
request = connfd.recv(1024)
if not request:
connfd.close()
self.rlist.remove(connfd)
return
# 提取请求内容
request_line = request.splitlines()[0]
info = request_line.decode().split(" ")[1]
print(connfd.getpeername(), ":", info)
if info == "/" or info[-5:] == ".html":
self.get_html(connfd, info)
else:
self.get_data(connfd,info)
def get_data(self,connfd,info):
response = "HTTP/1.1 200 ok\r\n"
response += "\r\n"
response += "<h1>Waiting for the HTTPSEVER 3.0<h1>"
connfd.send(response.encode())
def get_html(self,connfd,info):
if info == "/":
html_name = self.dir + "/index.html"
else:
html_name = self.dir + info
try:
obj = open(html_name)
except Exception:
response = "HTTP/1.1 404 not found\r\n"
response += "Content_Type:text/html\r\n"
response += "\r\n"
response += "<h1>sorry.....<h1>"
else:
response = "HTTP/1.1 200 OK\r\n"
response += "Content_Type:text/html\r\n"
response += "\r\n"
response += obj.read()
finally:
connfd.send(response.encode())
# 具体处理rlist里的监控信号
def do_rlist(self, rs):
for r in rs:
if r is self.sockfd:
connfd, addr = self.sockfd.accept()
print("Connect from ", addr)
self.rlist.append(connfd)
else:
self.handle(r)
if __name__ == "__main__":
# 希望通过HTTPSever类快速搭建http服务,用以展示自己的网页
# HOST = "0.0.0.0"
# PORT = 22222
# ADDR = (HOST, PORT)
# DIR = "./static"
HOST = "172.40.74.151"
PORT = 8888
DIR ="./hfklswn"
# 实例化对象
httpfd = HTTPSever(HOST, PORT, DIR)
# 启动HTTP服务
httpfd.server_forver()
| [
"[email protected]"
] | |
9f225b969a872422e058e823eb3bdbf8bb5b7223 | 1d605dbc4b6ff943ac3fffd2f610b698534bcdd2 | /trainShallowClassifier_tttt_highlevel.py | bdf0ae16ab6d26a275edadd551fff3285699bfcd | [] | no_license | emilbols/EFT4Tops | fec75b9b4b97f2e1c7611694445e07c1c23038ab | 4ce00b4c0d2d75af56c677709e83de0e41bce6d7 | refs/heads/master | 2020-04-10T16:27:03.309960 | 2019-04-11T12:50:09 | 2019-04-11T12:50:09 | 161,145,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,819 | py | from ROOT import TFile, TTree, TChain, TCanvas, TH1D, TLegend, gROOT, gStyle
import sys
import ROOT
import os
import time
from argparse import ArgumentParser
from array import array
from math import *
import numpy as np
from collections import Counter
import root_numpy as rootnp
import matplotlib.pyplot as plt
from keras import initializers
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Dropout, Input, Convolution1D, Concatenate, Flatten
from keras.utils import np_utils
from keras.callbacks import EarlyStopping, ModelCheckpoint, LearningRateScheduler
from keras.optimizers import SGD,Adam
from keras.regularizers import l1, l2
from keras.regularizers import l1, l2
from keras.utils import to_categorical
from keras.layers.normalization import BatchNormalization
#from keras.utils.visualize_util import plot
from numpy.lib.recfunctions import stack_arrays
from sklearn.preprocessing import StandardScaler
from keras.models import load_model
from sklearn.metrics import roc_curve,roc_auc_score
from sklearn.model_selection import train_test_split
import pickle
from rootpy.plotting import Hist
from rootpy.plotting import Hist2D
from sklearn.neural_network import MLPClassifier
from keras import backend as K
from keras.engine.topology import Layer
class SortLayer(Layer):
def __init__(self, kernel_initializer='glorot_uniform', **kwargs):
self.output_dim = output_dim
self.kernel_initializer = initializers.get(kernel_initializer)
self.kernel_size = conv_utils.normalize_tuple(1, 1, 'kernel_size')
super(SortLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
channel_axis = 1
input_dim = input_shape[channel_axis]
kernel_shape = self.kernel_size + (input_dim, 1)
self.kernel = self.add_weight(shape=kernel_shape,
initializer=self.kernel_initializer,
name='kernel')
super(SortLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
values = K.conv1d(x, self.kernel, strides = 1, padding = "valid", data_format = NULL, dilation_rate = 1)
order = tf.contrib.framework.argsort(values, direction='ASCENDING')
print order.shape
x = x[order]
return x
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
def draw_roc(df, df2, label, color, draw_unc=False, ls='-', draw_auc=True, flavour = False):
newx = np.logspace(-3, 0, 100)
tprs = pd.DataFrame()
scores = []
if flavour:
cs = ( (df['isC'] == 0) & (df['isCC'] == 0) & (df['isGCC'] == 0) )
else:
cs = ( (df['isUD'] == 0) & (df['isS'] == 0) & (df['isG'] == 0) )
df = df[cs]
df2 = df2[cs]
tmp_fpr, tmp_tpr, _ = roc_curve(np.clip(df['isB']+df['isBB']+df['isLeptonicB_C']+df['isLeptonicB']+df['isGBB'],0,1), df2['prob_isBB']+df2['prob_isB'])
scores.append(
roc_auc_score(np.clip(df['isB']+df['isBB']+df['isLeptonicB_C']+df['isLeptonicB']+df['isGBB'],0,1), df2['prob_isB']+df2['prob_isBB'])
)
coords = pd.DataFrame()
coords['fpr'] = tmp_fpr
coords['tpr'] = tmp_tpr
clean = coords.drop_duplicates(subset=['fpr'])
spline = InterpolatedUnivariateSpline(clean.fpr, clean.tpr,k=1)
tprs = spline(newx)
scores = np.array(scores)
auc = ' AUC: %.3f +/- %.3f' % (scores.mean(), scores.std()) if draw_auc else ''
plt.plot(tprs, newx, label=label + auc, c=color, ls=ls)
def makeROC(fpr, tpr, thresholds,AUC,outfile,signal_label, background_label):
c = TCanvas("c","c",700,600)
ROOT.gPad.SetMargin(0.15,0.07,0.15,0.05)
ROOT.gPad.SetLogy(0)
ROOT.gPad.SetGrid(1,1)
ROOT.gStyle.SetGridColor(17)
roc = ROOT.TGraph(len(fpr),tpr,fpr)
roc.SetLineColor(2)
roc.SetLineWidth(2)
roc.SetTitle(";Signal efficiency (%s); Background efficiency (%s)"%(signal_label, background_label))
roc.GetXaxis().SetTitleOffset(1.4)
roc.GetXaxis().SetTitleSize(0.045)
roc.GetYaxis().SetTitleOffset(1.4)
roc.GetYaxis().SetTitleSize(0.045)
roc.GetXaxis().SetRangeUser(0,1)
roc.GetYaxis().SetRangeUser(0.000,1)
roc.Draw("AL")
ROOT.gStyle.SetTextFont(42)
t = ROOT.TPaveText(0.2,0.84,0.4,0.94,"NBNDC")
t.SetTextAlign(11)
t.SetFillStyle(0)
t.SetBorderSize(0)
t.AddText('AUC = %.3f'%AUC)
t.Draw('same')
c.SaveAs(outfile)
def makeDiscr(discr_dict,outfile,xtitle="discriminator"):
c = ROOT.TCanvas("c","c",800,500)
ROOT.gStyle.SetOptStat(0)
ROOT.gPad.SetMargin(0.15,0.1,0.2,0.1)
#ROOT.gPad.SetLogy(1)
#ROOT.gPad.SetGrid(1,1)
ROOT.gStyle.SetGridColor(17)
l = TLegend(0.17,0.75,0.88,0.88)
l.SetTextSize(0.055)
l.SetBorderSize(0)
l.SetFillStyle(0)
l.SetNColumns(2)
colors = [2,4,8,ROOT.kCyan+2]
counter = 0
for leg,discr in discr_dict.iteritems():
a = Hist(30, 0, 1)
#fill_hist_with_ndarray(a, discr)
a.fill_array(discr)
a.SetLineColor(colors[counter])
a.SetLineWidth(2)
a.GetXaxis().SetTitle(xtitle)
a.GetXaxis().SetLabelSize(0.05)
a.GetXaxis().SetTitleSize(0.05)
a.GetXaxis().SetTitleOffset(1.45)
a.GetYaxis().SetTitle("a.u.")
a.GetYaxis().SetTickSize(0)
a.GetYaxis().SetLabelSize(0)
a.GetYaxis().SetTitleSize(0.06)
a.GetYaxis().SetTitleOffset(0.9)
a.Scale(1./a.Integral())
#a.GetYaxis().SetRangeUser(0.00001,100)
a.GetYaxis().SetRangeUser(0,0.2)
if counter == 0: a.draw("hist")
else: a.draw("same hist")
l.AddEntry(a,leg,"l")
counter += 1
l.Draw("same")
c.SaveAs(outfile)
def drawTrainingCurve(input,output):
hist = pickle.load(open(input,"rb"))
tr_acc = hist["acc"]
tr_loss = hist["loss"]
val_acc = hist["val_acc"]
val_loss = hist["val_loss"]
epochs = range(len(tr_acc))
plt.figure(1)
plt.subplot(211)
plt.plot(epochs, tr_acc,label="training")
plt.plot(epochs, val_acc, label="validation")
plt.legend(loc='best')
plt.grid(True)
#plt.xlabel("number of epochs")
plt.ylabel("accuracy")
plt.subplot(212)
plt.plot(epochs, tr_loss, label="training")
plt.plot(epochs, val_loss, label="validation")
plt.legend(loc='best')
plt.grid(True)
plt.xlabel("number of epochs")
plt.ylabel("loss")
plt.savefig(output)
gROOT.SetBatch(1)
OutputDir = 'Model_Shallow_highlevel_LO'
Y = np.load('LO_highlevel_train/truth.npy')
X_flat = np.load('LO_highlevel_train/features_flat.npy')
print Y.shape
SM = (Y == 0)
left = ((Y == 1) | (Y == 2))
leftright = ((Y == 3) | (Y == 4) )
right = (Y == 5)
Y[left] = 1
Y[leftright] = 2
Y[right] = 3
cut = len(Y[SM])/2
Y = Y[cut:]
SM = (Y == 0)
left = ((Y == 1))
right = ((Y == 2))
X_flat = X_flat[cut:]
print len(Y)
print len(Y[left])
print len(Y[SM])
print len(Y[right])
labels = Y
Y = to_categorical(labels, num_classes=4)
X_flat_train, X_flat_test, Y_train, Y_test, y_train, y_test = train_test_split(X_flat, Y, labels, test_size=0.2,random_state = 930607)
adam = Adam(lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
nclasses = 4
dropoutRate = 0.1
Inputs = Input(shape=(22,))
x = BatchNormalization(momentum=0.6,name='globalvars_input_batchnorm') (Inputs)
x = Dense(50,activation='relu',kernel_initializer='lecun_uniform',name='dense_0')(x)
x = Dropout(dropoutRate)(x)
pred=Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x)
model = Model(inputs=Inputs,outputs=pred)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
print model.summary()
X_train = X_flat_train
X_test = X_flat_test
train_history = model.fit(X_train, Y_train,
batch_size=512, epochs=200,
validation_data=(X_test, Y_test),
callbacks = [ModelCheckpoint(OutputDir + "/model_checkpoint_save.hdf5")],
shuffle=True,verbose=1)
pickle.dump(train_history.history,open(OutputDir + "/loss_and_acc.pkl",'wb'))
drawTrainingCurve(OutputDir+"/loss_and_acc.pkl",OutputDir+"/training_curve.pdf")
discr_dict = model.predict(X_test)
SM_discr = [(1-discr_dict[jdx,0]) for jdx in range(0,len(discr_dict[:,0])) if y_test[jdx] == 0]
EFT_discr = [(1-discr_dict[jdx,0]) for jdx in range(0,len(discr_dict[:,0])) if y_test[jdx] ==1 or y_test[jdx] == 2 or y_test[jdx] == 3]
fpr, tpr, thres = roc_curve(np.concatenate((np.zeros(len(SM_discr)),np.ones(len(EFT_discr)))),np.concatenate((SM_discr,EFT_discr)))
AUC = 1-roc_auc_score(np.concatenate((np.zeros(len(SM_discr)),np.ones(len(EFT_discr)))),np.concatenate((SM_discr,EFT_discr)))
makeROC(fpr, tpr, thres,AUC,OutputDir+"/roc_SMvsEFT.pdf","EFT","SM")
makeDiscr({"EFT":EFT_discr,"SM":SM_discr},OutputDir+"/discr_SMvsEFT.pdf","discriminator P(t_{L}) + P(t_{R})")
tL_discr = [discr_dict[jdx,1]/(1-discr_dict[jdx,0]) for jdx in range(0,len(discr_dict[:,0])) if y_test[jdx] == 1]
tLR_discr = [discr_dict[jdx,1]/(1-discr_dict[jdx,0]) for jdx in range(0,len(discr_dict[:,0])) if y_test[jdx] == 2]
tR_discr = [discr_dict[jdx,1]/(1-discr_dict[jdx,0]) for jdx in range(0,len(discr_dict[:,0])) if y_test[jdx] == 3]
fpr, tpr, thres = roc_curve(np.concatenate((np.zeros(len(tR_discr)),np.ones(len(tL_discr)))),np.concatenate((tR_discr,tL_discr)))
AUC = 1-roc_auc_score(np.concatenate((np.zeros(len(tR_discr)),np.ones(len(tL_discr)))),np.concatenate((tR_discr,tL_discr)))
makeROC(fpr, tpr, thres,AUC,OutputDir+"/roc_tLvstR.pdf","t_{L}","t_{R}")
makeDiscr({"tL":tL_discr,"tR":tR_discr},OutputDir+"/discr_tLvstR.pdf","discriminator #frac{P(t_{L})}{P(t_{L}) + P(t_{R})}")
| [
"[email protected]"
] | |
8015fef8dfd115d1d50b8421196c5d64d05910a8 | 1e88ef7359fc4a6bb4c8d0886971086e14124f15 | /models/CaptionModel.py | 19eb207e6466770b198a0f484cc6e30c9fc8e6be | [] | no_license | sunyuxi/RobustChangeCaptioning | 2e95e6b2e36adce0e2603be0003d28b3431a323d | c3ea1206a34cae8879a2accffc11c15b8fce0181 | refs/heads/master | 2023-08-17T16:02:22.527198 | 2021-08-19T20:55:44 | 2021-08-19T20:55:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,359 | py | # This file contains ShowAttendTell and AllImg model
# ShowAttendTell is from Show, Attend and Tell: Neural Image Caption Generation with Visual Attention
# https://arxiv.org/abs/1502.03044
# AllImg is a model where
# img feature is concatenated with word embedding at every time step as the input of lstm
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import reduce
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
class CaptionModel(nn.Module):
def __init__(self):
super(CaptionModel, self).__init__()
# implements beam search
# calls beam_step and returns the final set of beams
# augments log-probabilities with diversity terms when number of groups > 1
def forward(self, *args, **kwargs):
mode = kwargs.get('mode', 'forward')
if 'mode' in kwargs:
del kwargs['mode']
return getattr(self, '_'+mode)(*args, **kwargs)
def beam_search(self, init_state, init_logprobs, *args, **kwargs):
# function computes the similarity score to be augmented
def add_diversity(beam_seq_table, logprobsf, t, divm, diversity_lambda, bdash):
local_time = t - divm
unaug_logprobsf = logprobsf.clone()
for prev_choice in range(divm):
prev_decisions = beam_seq_table[prev_choice][local_time]
for sub_beam in range(bdash):
for prev_labels in range(bdash):
logprobsf[sub_beam][prev_decisions[prev_labels]] = logprobsf[sub_beam][prev_decisions[prev_labels]] - diversity_lambda
return unaug_logprobsf
# does one step of classical beam search
def beam_step(logprobsf, unaug_logprobsf, beam_size, t, beam_seq, beam_seq_logprobs, beam_logprobs_sum, state):
#INPUTS:
#logprobsf: probabilities augmented after diversity
#beam_size: obvious
#t : time instant
#beam_seq : tensor contanining the beams
#beam_seq_logprobs: tensor contanining the beam logprobs
#beam_logprobs_sum: tensor contanining joint logprobs
#OUPUTS:
#beam_seq : tensor containing the word indices of the decoded captions
#beam_seq_logprobs : log-probability of each decision made, same size as beam_seq
#beam_logprobs_sum : joint log-probability of each beam
ys,ix = torch.sort(logprobsf,1,True)
candidates = []
cols = min(beam_size, ys.size(1))
rows = beam_size
if t == 0:
rows = 1
for c in range(cols): # for each column (word, essentially)
for q in range(rows): # for each beam expansion
#compute logprob of expanding beam q with word in (sorted) position c
local_logprob = ys[q,c].item()
candidate_logprob = beam_logprobs_sum[q] + local_logprob
local_unaug_logprob = unaug_logprobsf[q,ix[q,c]]
candidates.append({'c':ix[q,c], 'q':q, 'p':candidate_logprob, 'r':local_unaug_logprob})
candidates = sorted(candidates, key=lambda x: -x['p'])
new_state = [_.clone() for _ in state]
#beam_seq_prev, beam_seq_logprobs_prev
if t >= 1:
#we''ll need these as reference when we fork beams around
beam_seq_prev = beam_seq[:t].clone()
beam_seq_logprobs_prev = beam_seq_logprobs[:t].clone()
for vix in range(beam_size):
v = candidates[vix]
#fork beam index q into index vix
if t >= 1:
beam_seq[:t, vix] = beam_seq_prev[:, v['q']]
beam_seq_logprobs[:t, vix] = beam_seq_logprobs_prev[:, v['q']]
#rearrange recurrent states
for state_ix in range(len(new_state)):
# copy over state in previous beam q to new beam at vix
new_state[state_ix][:, vix] = state[state_ix][:, v['q']] # dimension one is time step
#append new end terminal at the end of this beam
beam_seq[t, vix] = v['c'] # c'th word is the continuation
beam_seq_logprobs[t, vix] = v['r'] # the raw logprob here
beam_logprobs_sum[vix] = v['p'] # the new (sum) logprob along this beam
state = new_state
return beam_seq,beam_seq_logprobs,beam_logprobs_sum,state,candidates
# Start diverse_beam_search
cfg = kwargs['cfg']
gpu_ids = cfg.gpu_id
device = torch.device("cuda:%d" % gpu_ids[0])
beam_size = cfg.model.speaker.get('beam_size', 10)
group_size = cfg.model.speaker.get('group_size', 1)
diversity_lambda = cfg.model.speaker.get('diversity_lambda', 0.5)
decoding_constraint = cfg.model.speaker.get('decoding_constraint', 0)
max_ppl = cfg.model.speaker.get('max_ppl', 0)
bdash = beam_size // group_size # beam per group
# INITIALIZATIONS
beam_seq_table = [torch.LongTensor(self.seq_length, bdash).zero_() for _ in range(group_size)]
beam_seq_logprobs_table = [torch.FloatTensor(self.seq_length, bdash).zero_() for _ in range(group_size)]
beam_logprobs_sum_table = [torch.zeros(bdash) for _ in range(group_size)]
# logprobs # logprobs predicted in last time step, shape (beam_size, vocab_size)
done_beams_table = [[] for _ in range(group_size)]
state_table = [list(torch.unbind(_)) for _ in torch.stack(init_state).chunk(group_size, 2)]
logprobs_table = list(init_logprobs.chunk(group_size, 0))
# END INIT
# Chunk elements in the args
args = list(args)
args = [_.chunk(group_size) if _ is not None else [None]*group_size for _ in args]
args = [[args[i][j] for i in range(len(args))] for j in range(group_size)]
for t in range(self.seq_length + group_size - 1):
for divm in range(group_size):
if t >= divm and t <= self.seq_length + divm - 1:
# add diversity
logprobsf = logprobs_table[divm].data.float()
# suppress previous word
if decoding_constraint and t-divm > 0:
logprobsf.scatter_(1, beam_seq_table[divm][t-divm-1].unsqueeze(1).to(device), float('-inf'))
# suppress UNK tokens in the decoding (here <UNK> has an index of 1)
logprobsf[:, 1] = logprobsf[:, 1] - 1000
# diversity is added here
# the function directly modifies the logprobsf values and hence, we need to return
# the unaugmented ones for sorting the candidates in the end. # for historical
# reasons :-)
unaug_logprobsf = add_diversity(beam_seq_table,logprobsf,t,divm,diversity_lambda,bdash)
# infer new beams
beam_seq_table[divm],\
beam_seq_logprobs_table[divm],\
beam_logprobs_sum_table[divm],\
state_table[divm],\
candidates_divm = beam_step(logprobsf,
unaug_logprobsf,
bdash,
t-divm,
beam_seq_table[divm],
beam_seq_logprobs_table[divm],
beam_logprobs_sum_table[divm],
state_table[divm])
# if time's up... or if end token is reached then copy beams
for vix in range(bdash):
if beam_seq_table[divm][t-divm,vix] == 0 or t == self.seq_length + divm - 1:
final_beam = {
'seq': beam_seq_table[divm][:, vix].clone(),
'logps': beam_seq_logprobs_table[divm][:, vix].clone(),
'unaug_p': beam_seq_logprobs_table[divm][:, vix].sum().item(),
'p': beam_logprobs_sum_table[divm][vix].item()
}
if max_ppl:
final_beam['p'] = final_beam['p'] / (t-divm+1)
done_beams_table[divm].append(final_beam)
# don't continue beams from finished sequences
beam_logprobs_sum_table[divm][vix] = -1000
# move the current group one step forward in time
it = beam_seq_table[divm][t-divm]
logprobs_table[divm], state_table[divm] = self.get_logprobs_state(it.to(device), *(args[divm] + [state_table[divm]]))
# all beams are sorted by their log-probabilities
done_beams_table = [sorted(done_beams_table[i], key=lambda x: -x['p'])[:bdash] for i in range(group_size)]
done_beams = reduce(lambda a,b:a+b, done_beams_table)
return done_beams
| [
"[email protected]"
] | |
8021537043bd81114de1c88e600fa679c9929fbe | db9dd14e4f5acc3f8ab1e2d6abc296489a896a23 | /factor_catalog.py | 84e055cc211f31175e83a306e32f02b5f901ebfd | [] | no_license | IVRL/GANLocalEditing | 78696cbe052b1060bd3a5ccda3556d53ff0ddf9e | 4c87c1fb332113f38fc4e5ff7424b9655ca0e187 | refs/heads/master | 2021-04-24T12:42:04.789011 | 2020-05-02T17:43:17 | 2020-05-02T20:39:56 | 250,119,837 | 155 | 18 | null | null | null | null | UTF-8 | Python | false | false | 1,869 | py | '''
To download pickled instances for FFHQ and LSUN-Bedrooms, visit: https://drive.google.com/open?id=1GYzEzOCaI8FUS6JHdt6g9UfNTmpO08Tt
'''
import torch
import ptutils
from spherical_kmeans import MiniBatchSphericalKMeans
def one_hot(a, n):
import numpy as np
b = np.zeros((a.size, n))
b[np.arange(a.size), a] = 1
return b
class FactorCatalog:
def __init__(self, k, random_state=0, factorization=None, **kwargs):
if factorization is None:
factorization = MiniBatchSphericalKMeans
self._factorization = factorization(n_clusters=k, random_state=random_state, **kwargs)
self.annotations = {}
def _preprocess(self, X):
X_flat = ptutils.partial_flat(X)
return X_flat
def _postprocess(self, labels, X, raw):
heatmaps = torch.from_numpy(one_hot(labels, self._factorization.cluster_centers_.shape[0])).float()
heatmaps = ptutils.partial_unflat(heatmaps, N=X.shape[0], H=X.shape[-1])
if raw:
heatmaps = ptutils.MultiResolutionStore(heatmaps, 'nearest')
return heatmaps
else:
heatmaps = ptutils.MultiResolutionStore(torch.cat([(heatmaps[:, v].sum(1, keepdim=True)) for v in
self.annotations.values()], 1), 'nearest')
labels = list(self.annotations.keys())
return heatmaps, labels
def fit_predict(self, X, raw=False):
self._factorization.fit(self._preprocess(X))
labels = self._factorization.labels_
return self._postprocess(labels, X, raw)
def predict(self, X, raw=False):
labels = self._factorization.predict(self._preprocess(X))
return self._postprocess(labels, X, raw)
def __repr__(self):
header = '{} catalog:'.format(type(self._factorization))
return '{}\n\t{}'.format(header, self.annotations)
| [
"[email protected]"
] | |
d81d76e9d8b22c664357e05b002bbb03f28bb514 | bdbd35f1d2ac6a303fbf68b54b4c9c7d5c5f2568 | /static_frame/test/unit/test_frame_iter.py | 15ed042b08bb9864afe3e6f3b2baae453318789e | [
"MIT"
] | permissive | leemit/static-frame | 3d6818c67e71a701ec93f439d3b16c40813e1540 | 2191ff2e05947851ef929fbaf49a81f75920483f | refs/heads/master | 2023-03-28T06:19:06.231726 | 2021-03-26T20:45:40 | 2021-03-26T20:45:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,260 | py | import unittest
import typing as tp
import numpy as np
import frame_fixtures as ff
import static_frame as sf
# from static_frame import Index
from static_frame import IndexHierarchy
# from static_frame import IndexHierarchyGO
# from static_frame import IndexYearMonth
# from static_frame import IndexYearGO
# from static_frame import IndexYear
from static_frame import IndexDate
# from static_frame import IndexDateGO
from static_frame import Series
from static_frame import Frame
from static_frame import FrameGO
from static_frame import TypeBlocks
# from static_frame import mloc
# from static_frame import ILoc
from static_frame import HLoc
# from static_frame import DisplayConfig
# from static_frame import IndexAutoFactory
from static_frame.test.test_case import TestCase
# from static_frame.test.test_case import skip_win
# from static_frame.test.test_case import skip_linux_no_display
# from static_frame.test.test_case import skip_pylt37
# from static_frame.test.test_case import temp_file
# from static_frame.core.exception import ErrorInitFrame
# from static_frame.core.exception import ErrorInitIndex
from static_frame.core.exception import AxisInvalid
nan = np.nan
class TestUnit(TestCase):
#---------------------------------------------------------------------------
def test_frame_iter_a(self) -> None:
records = (
(1, 2, 'a', False, True),
(30, 50, 'b', True, False))
f1 = Frame.from_records(records,
columns=('p', 'q', 'r', 's', 't'),
index=('x','y'))
self.assertEqual((f1.keys() == f1.columns).all(), True)
self.assertEqual([x for x in f1.columns], ['p', 'q', 'r', 's', 't'])
self.assertEqual([x for x in f1], ['p', 'q', 'r', 's', 't'])
def test_frame_iter_array_a(self) -> None:
records = (
(1, 2, 'a', False, True),
(30, 50, 'b', True, False))
f1 = Frame.from_records(records,
columns=('p', 'q', 'r', 's', 't'),
index=('x','y'))
self.assertEqual(
next(iter(f1.iter_array(axis=0))).tolist(),
[1, 30])
self.assertEqual(
next(iter(f1.iter_array(axis=1))).tolist(),
[1, 2, 'a', False, True])
def test_frame_iter_array_b(self) -> None:
arrays = list(np.random.rand(1000) for _ in range(100))
f1 = Frame.from_items(
zip(range(100), arrays)
)
# iter columns
post = f1.iter_array(axis=0).apply_pool(np.sum, max_workers=4, use_threads=True)
self.assertEqual(post.shape, (100,))
self.assertAlmostEqual(f1.sum().sum(), post.sum())
post = f1.iter_array(axis=0).apply_pool(np.sum, max_workers=4, use_threads=False)
self.assertEqual(post.shape, (100,))
self.assertAlmostEqual(f1.sum().sum(), post.sum())
def test_frame_iter_array_c(self) -> None:
arrays = []
for _ in range(8):
arrays.append(list(range(8)))
f1 = Frame.from_items(
zip(range(8), arrays)
)
func = {x: chr(x+65) for x in range(8)}
# iter columns
post = f1.iter_element().apply_pool(func, max_workers=4, use_threads=True)
self.assertEqual(post.to_pairs(0),
((0, ((0, 'A'), (1, 'B'), (2, 'C'), (3, 'D'), (4, 'E'), (5, 'F'), (6, 'G'), (7, 'H'))), (1, ((0, 'A'), (1, 'B'), (2, 'C'), (3, 'D'), (4, 'E'), (5, 'F'), (6, 'G'), (7, 'H'))), (2, ((0, 'A'), (1, 'B'), (2, 'C'), (3, 'D'), (4, 'E'), (5, 'F'), (6, 'G'), (7, 'H'))), (3, ((0, 'A'), (1, 'B'), (2, 'C'), (3, 'D'), (4, 'E'), (5, 'F'), (6, 'G'), (7, 'H'))), (4, ((0, 'A'), (1, 'B'), (2, 'C'), (3, 'D'), (4, 'E'), (5, 'F'), (6, 'G'), (7, 'H'))), (5, ((0, 'A'), (1, 'B'), (2, 'C'), (3, 'D'), (4, 'E'), (5, 'F'), (6, 'G'), (7, 'H'))), (6, ((0, 'A'), (1, 'B'), (2, 'C'), (3, 'D'), (4, 'E'), (5, 'F'), (6, 'G'), (7, 'H'))), (7, ((0, 'A'), (1, 'B'), (2, 'C'), (3, 'D'), (4, 'E'), (5, 'F'), (6, 'G'), (7, 'H'))))
)
def test_frame_iter_array_d(self) -> None:
arrays = []
for _ in range(8):
arrays.append(list(range(8)))
f1 = Frame.from_items(
zip(range(8), arrays)
)
# when called with a pool, values are gien the func as a single argument, which for an element iteration is a tuple of coord, value
func = lambda arg: arg[0][1]
# iter columns
post = f1.iter_element_items().apply_pool(func, max_workers=4, use_threads=True)
self.assertEqual(post.to_pairs(0),
((0, ((0, 0), (1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), (7, 0))), (1, ((0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1))), (2, ((0, 2), (1, 2), (2, 2), (3, 2), (4, 2), (5, 2), (6, 2), (7, 2))), (3, ((0, 3), (1, 3), (2, 3), (3, 3), (4, 3), (5, 3), (6, 3), (7, 3))), (4, ((0, 4), (1, 4), (2, 4), (3, 4), (4, 4), (5, 4), (6, 4), (7, 4))), (5, ((0, 5), (1, 5), (2, 5), (3, 5), (4, 5), (5, 5), (6, 5), (7, 5))), (6, ((0, 6), (1, 6), (2, 6), (3, 6), (4, 6), (5, 6), (6, 6), (7, 6))), (7, ((0, 7), (1, 7), (2, 7), (3, 7), (4, 7), (5, 7), (6, 7), (7, 7))))
)
def test_frame_iter_array_e(self) -> None:
f = sf.Frame.from_dict(
dict(diameter=(12756, 6792, 142984),
mass=(5.97, 0.642, 1898)),
index=('Earth', 'Mars', 'Jupiter'),
dtypes=dict(diameter=np.int64))
post = f.iter_array(axis=0).apply(np.sum)
self.assertTrue(post.dtype == float)
def test_frame_iter_array_f(self) -> None:
f = sf.Frame(np.arange(12).reshape(3,4),
index=IndexDate.from_date_range('2020-01-01', '2020-01-03'))
post = f.iter_array(axis=0).apply(np.sum, name='foo')
self.assertEqual(post.name, 'foo')
self.assertEqual(
f.iter_array(axis=0).apply(np.sum).to_pairs(),
((0, 12), (1, 15), (2, 18), (3, 21))
)
self.assertEqual(
f.iter_array(axis=1).apply(np.sum).to_pairs(),
((np.datetime64('2020-01-01'), 6), (np.datetime64('2020-01-02'), 22), (np.datetime64('2020-01-03'), 38))
)
def test_frame_iter_array_g(self) -> None:
f = sf.FrameGO(index=IndexDate.from_date_range('2020-01-01', '2020-01-03'))
post = list(f.iter_array(axis=0))
self.assertEqual(post, [])
post = list(f.iter_array(axis=1))
self.assertEqual([x.tolist() for x in post], [[], [], []])
#---------------------------------------------------------------------------
def test_frame_iter_tuple_a(self) -> None:
post = tuple(sf.Frame.from_elements(range(5)).iter_tuple(axis=0, constructor=tuple))
self.assertEqual(post, ((0, 1, 2, 3, 4),))
def test_frame_iter_tuple_b(self) -> None:
post = tuple(sf.Frame.from_elements(range(3), index=tuple('abc')).iter_tuple(axis=0))
self.assertEqual(post, ((0, 1, 2),))
self.assertEqual(tuple(post[0]._asdict().items()),
(('a', 0), ('b', 1), ('c', 2))
)
def test_frame_iter_tuple_c(self) -> None:
with self.assertRaises(AxisInvalid):
post = tuple(sf.Frame.from_elements(range(5)).iter_tuple(axis=2))
def test_frame_iter_tuple_d(self) -> None:
f = sf.FrameGO(index=IndexDate.from_date_range('2020-01-01', '2020-01-03'))
post = list(f.iter_tuple(constructor=tuple, axis=0))
self.assertEqual(post, [])
post = list(f.iter_tuple(axis=1))
self.assertEqual([len(x) for x in post], [0, 0, 0])
def test_frame_iter_tuple_e(self) -> None:
records = (
(1, 2, 'a', False, True),
(30, 50, 'b', True, False))
f1 = FrameGO.from_records(records,
columns=('p', 'q', 'r', 's', 't'),
index=('x','y'))
class Record(tp.NamedTuple):
x: object
y: object
post1 = list(f1.iter_tuple(constructor=Record))
self.assertTrue(all(isinstance(x, Record) for x in post1))
post2 = list(f1.iter_tuple(constructor=tuple))
self.assertEqual(post2,
[(1, 30), (2, 50), ('a', 'b'), (False, True), (True, False)])
#---------------------------------------------------------------------------
def test_frame_iter_series_a(self) -> None:
f1 = ff.parse('f(Fg)|s(2,8)|i(I,str)|c(Ig,str)|v(int)')
post1 = tuple(f1.iter_series(axis=0))
self.assertEqual(len(post1), 8)
self.assertEqual(post1[0].to_pairs(),
(('zZbu', -88017), ('ztsv', 92867)))
post2 = tuple(f1.iter_series(axis=1))
self.assertEqual(len(post2), 2)
self.assertEqual(post2[0].to_pairs(),
(('zZbu', -88017), ('ztsv', 162197), ('zUvW', -3648), ('zkuW', 129017), ('zmVj', 58768), ('z2Oo', 84967), ('z5l6', 146284), ('zCE3', 137759)))
#---------------------------------------------------------------------------
def test_frame_iter_tuple_items_a(self) -> None:
records = (
(1, 2, 'a', False, True),
(30, 50, 'b', True, False))
f1 = FrameGO.from_records(records,
columns=('p', 'q', 'r', 's', 't'),
index=('x','y'))
post1 = list(f1.iter_tuple_items(constructor=list))
self.assertEqual(post1, [('p', [1, 30]), ('q', [2, 50]), ('r', ['a', 'b']), ('s', [False, True]), ('t', [True, False])])
#---------------------------------------------------------------------------
def test_frame_iter_element_a(self) -> None:
# reindex both axis
records = (
(2, 2, 'a', False, False),
(30, 34, 'b', True, False),
(2, 95, 'c', False, False),
(30, 73, 'd', True, True),
)
f1 = Frame.from_records(records,
columns=('p', 'q', 'r', 's', 't'),
index=('w', 'x', 'y', 'z'))
self.assertEqual(
[x for x in f1.iter_element()],
[2, 2, 'a', False, False, 30, 34, 'b', True, False, 2, 95, 'c', False, False, 30, 73, 'd', True, True])
self.assertEqual(list(f1.iter_element(axis=1)),
[2, 30, 2, 30, 2, 34, 95, 73, 'a', 'b', 'c', 'd', False, True, False, True, False, False, False, True])
self.assertEqual([x for x in f1.iter_element_items()],
[(('w', 'p'), 2), (('w', 'q'), 2), (('w', 'r'), 'a'), (('w', 's'), False), (('w', 't'), False), (('x', 'p'), 30), (('x', 'q'), 34), (('x', 'r'), 'b'), (('x', 's'), True), (('x', 't'), False), (('y', 'p'), 2), (('y', 'q'), 95), (('y', 'r'), 'c'), (('y', 's'), False), (('y', 't'), False), (('z', 'p'), 30), (('z', 'q'), 73), (('z', 'r'), 'd'), (('z', 's'), True), (('z', 't'), True)])
post1 = f1.iter_element().apply(lambda x: '_' + str(x) + '_')
self.assertEqual(post1.to_pairs(0),
(('p', (('w', '_2_'), ('x', '_30_'), ('y', '_2_'), ('z', '_30_'))), ('q', (('w', '_2_'), ('x', '_34_'), ('y', '_95_'), ('z', '_73_'))), ('r', (('w', '_a_'), ('x', '_b_'), ('y', '_c_'), ('z', '_d_'))), ('s', (('w', '_False_'), ('x', '_True_'), ('y', '_False_'), ('z', '_True_'))), ('t', (('w', '_False_'), ('x', '_False_'), ('y', '_False_'), ('z', '_True_')))))
post2 = f1.iter_element(axis=1).apply(lambda x: '_' + str(x) + '_')
self.assertEqual(post2.to_pairs(0),
(('p', (('w', '_2_'), ('x', '_30_'), ('y', '_2_'), ('z', '_30_'))), ('q', (('w', '_2_'), ('x', '_34_'), ('y', '_95_'), ('z', '_73_'))), ('r', (('w', '_a_'), ('x', '_b_'), ('y', '_c_'), ('z', '_d_'))), ('s', (('w', '_False_'), ('x', '_True_'), ('y', '_False_'), ('z', '_True_'))), ('t', (('w', '_False_'), ('x', '_False_'), ('y', '_False_'), ('z', '_True_')))))
def test_frame_iter_element_b(self) -> None:
# reindex both axis
records = (
(2, 2, 'a', False, False),
(30, 34, 'b', True, False),
(2, 95, 'c', False, False),
(30, 73, 'd', True, True),
)
f1 = Frame.from_records(records,
columns=('p', 'q', 'r', 's', 't'),
index=('w', 'x', 'y', 'z'))
# support working with mappings
post = f1.iter_element().map_any({2: 200, False: 200})
self.assertEqual(post.to_pairs(0),
(('p', (('w', 200), ('x', 30), ('y', 200), ('z', 30))), ('q', (('w', 200), ('x', 34), ('y', 95), ('z', 73))), ('r', (('w', 'a'), ('x', 'b'), ('y', 'c'), ('z', 'd'))), ('s', (('w', 200), ('x', True), ('y', 200), ('z', True))), ('t', (('w', 200), ('x', 200), ('y', 200), ('z', True))))
)
def test_frame_iter_element_c(self) -> None:
a2 = np.array([
[None, None],
[None, 1],
[None, 5]
], dtype=object)
a1 = np.array([True, False, True])
a3 = np.array([['a'], ['b'], ['c']])
tb1 = TypeBlocks.from_blocks((a3, a1, a2))
f1 = Frame(tb1,
index=self.get_letters(None, tb1.shape[0]),
columns=IndexHierarchy.from_product(('i', 'ii'), ('a', 'b'))
)
values = list(f1.iter_element())
self.assertEqual(values,
['a', True, None, None, 'b', False, None, 1, 'c', True, None, 5]
)
f2 = f1.iter_element().apply(lambda x: str(x).lower().replace('e', ''))
self.assertEqual(f1.columns.__class__, f2.columns.__class__,)
self.assertEqual(f2.to_pairs(0),
((('i', 'a'), (('a', 'a'), ('b', 'b'), ('c', 'c'))), (('i', 'b'), (('a', 'tru'), ('b', 'fals'), ('c', 'tru'))), (('ii', 'a'), (('a', 'non'), ('b', 'non'), ('c', 'non'))), (('ii', 'b'), (('a', 'non'), ('b', '1'), ('c', '5'))))
)
def test_frame_iter_element_d(self) -> None:
f1 = sf.Frame.from_elements(['I', 'II', 'III'], columns=('A',))
f2 = sf.Frame.from_elements([67, 28, 99], columns=('B',), index=('I', 'II', 'IV'))
post = f1['A'].iter_element().map_any(f2['B'])
# if we do not match the mapping, we keep the value.
self.assertEqual(post.to_pairs(),
((0, 67), (1, 28), (2, 'III')))
def test_frame_iter_element_e(self) -> None:
f1 = Frame.from_records(np.arange(9).reshape(3, 3))
self.assertEqual(list(f1.iter_element(axis=1)),
[0, 3, 6, 1, 4, 7, 2, 5, 8])
mapping = {x: x*3 for x in range(9)}
f2 = f1.iter_element(axis=1).map_all(mapping)
self.assertEqual([d.kind for d in f2.dtypes.values],
['i', 'i', 'i'])
#---------------------------------------------------------------------------
def test_frame_iter_group_a(self) -> None:
columns = tuple('pqrst')
index = tuple('zxwy')
records = (('A', 1, 'a', False, False),
('A', 2, 'b', True, False),
('B', 1, 'c', False, False),
('B', 2, 'd', True, True))
f = Frame.from_records(
records, columns=columns, index=index,name='foo')
f = f.set_index_hierarchy(('p', 'q'), drop=True)
with self.assertRaises(AxisInvalid):
_ = f.iter_group('s', axis=-1).apply(lambda x: x.shape)
post = f.iter_group('s').apply(lambda x: x.shape)
self.assertEqual(post.to_pairs(),
((False, (2, 3)), (True, (2, 3)))
)
def test_frame_iter_group_b(self) -> None:
columns = tuple('pqrst')
index = tuple('zxwy')
records = (('A', 1, 'a', False, False),
('A', 2, 'b', True, False),
('B', 1, 'c', False, False),
('B', 2, 'd', True, True))
f = Frame.from_records(
records, columns=columns, index=index, name='foo')
post = f.iter_group(['p', 'q']).apply(len)
self.assertEqual(post.to_pairs(),
((('A', 1), 1), (('A', 2), 1), (('B', 1), 1), (('B', 2), 1))
)
def test_frame_iter_group_c(self) -> None:
columns = tuple('pqrst')
index = tuple('zxwy')
records = (('A', 1, 'a', False, False),
('A', 2, 'b', True, False),
('B', 1, 'c', False, False),
('B', 2, 'd', True, True))
f = Frame.from_records(
records, columns=columns, index=index, name='foo')
with self.assertRaises(TypeError):
next(iter(f.iter_group(foo='x')))
with self.assertRaises(TypeError):
next(iter(f.iter_group(3, 5)))
self.assertEqual(next(iter(f.iter_group('q'))).to_pairs(0),
(('p', (('z', 'A'), ('w', 'B'))), ('q', (('z', 1), ('w', 1))), ('r', (('z', 'a'), ('w', 'c'))), ('s', (('z', False), ('w', False))), ('t', (('z', False), ('w', False))))
)
def test_frame_iter_group_d(self) -> None:
f = sf.Frame.from_element(1, columns=[1,2,3], index=['a'])
empty = f.reindex([])
self.assertEqual(list(empty.iter_element()), [])
self.assertEqual(list(empty.iter_group(key=1)), [])
def test_frame_iter_group_e(self) -> None:
f = sf.Frame.from_element(None, columns=[1,2,3], index=['a'])
empty = f.reindex([])
self.assertEqual(list(empty.iter_element()), [])
self.assertEqual(list(empty.iter_group(key=1)), [])
def test_frame_iter_group_f(self) -> None:
f = sf.Frame(np.arange(3).reshape(1,3), columns=tuple('abc'))
f = f.drop.loc[0]
post1 = tuple(f.iter_group(['b','c']))
self.assertEqual(post1, ())
post2 = tuple(f.iter_group('a'))
self.assertEqual(post2, ())
#---------------------------------------------------------------------------
def test_frame_iter_group_items_a(self) -> None:
# testing a hierarchical index and columns, selecting column with a tuple
records = (
('a', 999999, 0.1),
('a', 201810, 0.1),
('b', 999999, 0.4),
('b', 201810, 0.4))
f1 = Frame.from_records(records, columns=list('abc'))
f1 = f1.set_index_hierarchy(['a', 'b'], drop=False)
f1 = f1.relabel_level_add(columns='i')
groups = list(f1.iter_group_items(('i', 'a'), axis=0))
self.assertEqual(groups[0][0], 'a')
self.assertEqual(groups[0][1].to_pairs(0),
((('i', 'a'), ((('a', 999999), 'a'), (('a', 201810), 'a'))), (('i', 'b'), ((('a', 999999), 999999), (('a', 201810), 201810))), (('i', 'c'), ((('a', 999999), 0.1), (('a', 201810), 0.1)))))
self.assertEqual(groups[1][0], 'b')
self.assertEqual(groups[1][1].to_pairs(0),
((('i', 'a'), ((('b', 999999), 'b'), (('b', 201810), 'b'))), (('i', 'b'), ((('b', 999999), 999999), (('b', 201810), 201810))), (('i', 'c'), ((('b', 999999), 0.4), (('b', 201810), 0.4)))))
def test_frame_iter_group_items_b(self) -> None:
columns = tuple('pqrst')
index = tuple('zxwy')
records = (('A', 1, 'a', False, False),
('A', 2, 'b', True, False),
('B', 1, 'c', False, False),
('B', 2, 'd', True, True))
f = Frame.from_records(
records, columns=columns, index=index,name='foo')
f = f.set_index_hierarchy(('p', 'q'), drop=True)
post = f.iter_group_items('s').apply(
lambda k, x: f'{k}: {len(x)}')
self.assertEqual(post.to_pairs(),
((False, 'False: 2'), (True, 'True: 2'))
)
def test_frame_iter_group_items_c(self) -> None:
# Test optimized sorting approach. Data must have a non-object dtype and key must be single
data = np.array([[0, 1, 1, 3],
[3, 3, 2, 3],
[5, 5, 1, 3],
[7, 2, 2, 4]])
frame = sf.Frame(data, columns=tuple('abcd'), index=tuple('wxyz'))
# Column
groups = list(frame.iter_group_items('c', axis=0))
expected_pairs = [
(('a', (('w', 0), ('y', 5))),
('b', (('w', 1), ('y', 5))),
('c', (('w', 1), ('y', 1))),
('d', (('w', 3), ('y', 3)))),
(('a', (('x', 3), ('z', 7))),
('b', (('x', 3), ('z', 2))),
('c', (('x', 2), ('z', 2))),
('d', (('x', 3), ('z', 4))))]
self.assertEqual([1, 2], [group[0] for group in groups])
self.assertEqual(expected_pairs, [group[1].to_pairs(axis=0) for group in groups])
# Index
groups = list(frame.iter_group_items('w', axis=1))
expected_pairs = [
(('a', (('w', 0), ('x', 3), ('y', 5), ('z', 7))),), #type: ignore
(('b', (('w', 1), ('x', 3), ('y', 5), ('z', 2))), #type: ignore
('c', (('w', 1), ('x', 2), ('y', 1), ('z', 2)))),
(('d', (('w', 3), ('x', 3), ('y', 3), ('z', 4))),)] #type: ignore
self.assertEqual([0, 1, 3], [group[0] for group in groups])
self.assertEqual(expected_pairs, [group[1].to_pairs(axis=0) for group in groups])
def test_frame_iter_group_items_d(self) -> None:
# Test iterating with multiple key selection
data = np.array([[0, 1, 1, 3],
[3, 3, 2, 3],
[5, 5, 1, 3],
[7, 2, 2, 4]])
frame = sf.Frame(data, columns=tuple('abcd'), index=tuple('wxyz'))
# Column
groups = list(frame.iter_group_items(['c', 'd'], axis=0))
expected_pairs = [
(('a', (('w', 0), ('y', 5))),
('b', (('w', 1), ('y', 5))),
('c', (('w', 1), ('y', 1))),
('d', (('w', 3), ('y', 3)))),
(('a', (('x', 3),)),
('b', (('x', 3),)),
('c', (('x', 2),)),
('d', (('x', 3),))),
(('a', (('z', 7),)),
('b', (('z', 2),)),
('c', (('z', 2),)),
('d', (('z', 4),)))]
self.assertEqual([(1, 3), (2, 3), (2, 4)], [group[0] for group in groups])
self.assertEqual(expected_pairs, [group[1].to_pairs(axis=0) for group in groups])
# Index
groups = list(frame.iter_group_items(['x', 'y'], axis=1))
expected_pairs = [
(('c', (('w', 1), ('x', 2), ('y', 1), ('z', 2))),), #type: ignore
(('d', (('w', 3), ('x', 3), ('y', 3), ('z', 4))),), #type: ignore
(('a', (('w', 0), ('x', 3), ('y', 5), ('z', 7))), #type: ignore
('b', (('w', 1), ('x', 3), ('y', 5), ('z', 2)))),
]
self.assertEqual([(2, 1), (3, 3), (3, 5)], [group[0] for group in groups])
self.assertEqual(expected_pairs, [group[1].to_pairs(axis=0) for group in groups])
def test_frame_iter_group_items_e(self) -> None:
columns = tuple('pqrst')
index = tuple('zxwy')
records = (('A', 1, 'a', False, False),
('A', 2, 'b', True, False),
('B', 1, 'c', False, False),
('B', 2, 'd', True, True))
f = Frame.from_records(
records, columns=columns, index=index,name='foo')
# using an array to select
self.assertEqual(
tuple(k for k, v in f.iter_group_items(f.columns == 's')),
((False,), (True,))
)
self.assertEqual(
tuple(k for k, v in f.iter_group_items(f.columns.isin(('p', 't')))),
(('A', False), ('B', False), ('B', True))
)
self.assertEqual(
tuple(k for k, v in f.iter_group_items(['s', 't'])),
((False, False), (True, False), (True, True))
)
self.assertEqual(
tuple(k for k, v in f.iter_group_items(slice('s','t'))),
((False, False), (True, False), (True, True))
)
def test_frame_iter_group_items_f(self) -> None:
objs = [object() for _ in range(2)]
data = [[1, 2, objs[0]], [3, 4, objs[0]], [5, 6, objs[1]]]
f = sf.Frame.from_records(data, columns=tuple('abc'))
post1 = {k: v for k, v in f.iter_group_items('c')}
post2 = {k[0]: v for k, v in f.iter_group_items(['c'])} # as a list, this gets a multiple key
self.assertEqual(len(post1), 2)
self.assertEqual(len(post1), len(post2))
obj_a = objs[0]
obj_b = objs[1]
self.assertEqual(post1[obj_a].shape, (2, 3))
self.assertEqual(post1[obj_a].shape, post2[obj_a].shape)
self.assertEqual(post1[obj_a].to_pairs(0),
(('a', ((0, 1), (1, 3))), ('b', ((0, 2), (1, 4))), ('c', ((0, obj_a), (1, obj_a)))))
self.assertEqual(post2[obj_a].to_pairs(0),
(('a', ((0, 1), (1, 3))), ('b', ((0, 2), (1, 4))), ('c', ((0, obj_a), (1, obj_a)))))
self.assertEqual(post1[obj_b].shape, (1, 3))
self.assertEqual(post1[obj_b].shape, post2[obj_b].shape)
self.assertEqual(post1[obj_b].to_pairs(0),
(('a', ((2, 5),)), ('b', ((2, 6),)), ('c', ((2, obj_b),))))
self.assertEqual(post2[obj_b].to_pairs(0),
(('a', ((2, 5),)), ('b', ((2, 6),)), ('c', ((2, obj_b),))))
#---------------------------------------------------------------------------
def test_frame_iter_group_index_a(self) -> None:
records = (
(2, 2, 'a', False, False),
(30, 34, 'b', True, False),
(2, 95, 'c', False, False),
)
f1 = Frame.from_records(records,
columns=('p', 'q', 'r', 's', 't'),
index=('x', 'y', 'z'))
with self.assertRaises(TypeError):
f1.iter_group_labels(3, 4)
with self.assertRaises(TypeError):
f1.iter_group_labels(foo=4)
post = tuple(f1.iter_group_labels(0, axis=0))
self.assertEqual(len(post), 3)
self.assertEqual(
f1.iter_group_labels(0, axis=0).apply(lambda x: x[['p', 'q']].values.sum()).to_pairs(),
(('x', 4), ('y', 64), ('z', 97))
)
def test_frame_iter_group_index_b(self) -> None:
records = (
(2, 2, 'a', 'q', False, False),
(30, 34, 'b', 'c', True, False),
(2, 95, 'c', 'd', False, False),
)
f1 = Frame.from_records(records,
columns=IndexHierarchy.from_product((1, 2, 3), ('a', 'b')),
index=('x', 'y', 'z'))
# with axis 1, we are grouping based on columns while maintain the index
post_tuple = tuple(f1.iter_group_labels(1, axis=1))
self.assertEqual(len(post_tuple), 2)
post = f1[HLoc[f1.columns[0]]]
self.assertEqual(post.__class__, Series)
self.assertEqual(post.to_pairs(),
(('x', 2), ('y', 30), ('z', 2))
)
post = f1.loc[:, HLoc[f1.columns[0]]]
self.assertEqual(post.__class__, Series)
self.assertEqual(post.to_pairs(),
(('x', 2), ('y', 30), ('z', 2))
)
self.assertEqual(
f1.iter_group_labels(1, axis=1).apply(lambda x: x.iloc[:, 0].sum()).to_pairs(),
(('a', 34), ('b', 131))
)
def test_frame_iter_group_index_c(self) -> None:
columns = tuple('pqrst')
index = tuple('zxwy')
records = (('A', 1, 'a', False, False),
('A', 2, 'b', True, False),
('B', 1, 'c', False, False),
('B', 2, 'd', True, True))
f = Frame.from_records(
records, columns=columns, index=index,name='foo')
f = f.set_index_hierarchy(('p', 'q'), drop=True)
with self.assertRaises(AxisInvalid):
_ = f.iter_group_labels_items(0, axis=-1).apply(lambda k, x: f'{k}:{x.size}')
post = f.iter_group_labels_items(0).apply(lambda k, x: f'{k}:{x.size}')
self.assertEqual(post.to_pairs(),
(('A', 'A:6'), ('B', 'B:6'))
)
#---------------------------------------------------------------------------
def test_frame_reversed(self) -> None:
columns = tuple('pqrst')
index = tuple('zxwy')
records = ((2, 2, 'a', False, False),
(30, 34, 'b', True, False),
(2, 95, 'c', False, False),
(30, 73, 'd', True, True))
f = Frame.from_records(
records, columns=columns, index=index,name='foo')
self.assertTrue(tuple(reversed(f)) == tuple(reversed(columns)))
#---------------------------------------------------------------------------
def test_frame_axis_window_items_a(self) -> None:
base = np.array([1, 2, 3, 4])
records = (base * n for n in range(1, 21))
f1 = Frame.from_records(records,
columns=list('ABCD'),
index=self.get_letters(20))
post0 = tuple(f1._axis_window_items(size=2, axis=0))
self.assertEqual(len(post0), 19)
self.assertEqual(post0[0][0], 'b')
self.assertEqual(post0[0][1].__class__, Frame)
self.assertEqual(post0[0][1].shape, (2, 4))
self.assertEqual(post0[-1][0], 't')
self.assertEqual(post0[-1][1].__class__, Frame)
self.assertEqual(post0[-1][1].shape, (2, 4))
post1 = tuple(f1._axis_window_items(size=2, axis=1))
self.assertEqual(len(post1), 3)
self.assertEqual(post1[0][0], 'B')
self.assertEqual(post1[0][1].__class__, Frame)
self.assertEqual(post1[0][1].shape, (20, 2))
self.assertEqual(post1[-1][0], 'D')
self.assertEqual(post1[-1][1].__class__, Frame)
self.assertEqual(post1[-1][1].shape, (20, 2))
def test_frame_axis_window_items_b(self) -> None:
base = np.array([1, 2, 3, 4])
records = (base * n for n in range(1, 21))
f1 = Frame.from_records(records,
columns=list('ABCD'),
index=self.get_letters(20))
post0 = tuple(f1._axis_window_items(size=2, axis=0, as_array=True))
self.assertEqual(len(post0), 19)
self.assertEqual(post0[0][0], 'b')
self.assertEqual(post0[0][1].__class__, np.ndarray)
self.assertEqual(post0[0][1].shape, (2, 4))
self.assertEqual(post0[-1][0], 't')
self.assertEqual(post0[-1][1].__class__, np.ndarray)
self.assertEqual(post0[-1][1].shape, (2, 4))
post1 = tuple(f1._axis_window_items(size=2, axis=1, as_array=True))
self.assertEqual(len(post1), 3)
self.assertEqual(post1[0][0], 'B')
self.assertEqual(post1[0][1].__class__, np.ndarray)
self.assertEqual(post1[0][1].shape, (20, 2))
self.assertEqual(post1[-1][0], 'D')
self.assertEqual(post1[-1][1].__class__, np.ndarray)
self.assertEqual(post1[-1][1].shape, (20, 2))
def test_frame_iter_window_a(self) -> None:
base = np.array([1, 2, 3, 4])
records = (base * n for n in range(1, 21))
f1 = Frame.from_records(records,
columns=list('ABCD'),
index=self.get_letters(20))
self.assertEqual(
f1.iter_window(size=3).apply(lambda f: f['B'].sum()).to_pairs(),
(('c', 12), ('d', 18), ('e', 24), ('f', 30), ('g', 36), ('h', 42), ('i', 48), ('j', 54), ('k', 60), ('l', 66), ('m', 72), ('n', 78), ('o', 84), ('p', 90), ('q', 96), ('r', 102), ('s', 108), ('t', 114))
)
post = list(f1.iter_window(size=3))
self.assertEqual(len(post), 18)
self.assertTrue(all(f.shape == (3, 4) for f in post))
#---------------------------------------------------------------------------
def test_frame_axis_interface_a(self) -> None:
# reindex both axis
records = (
(1, 2, 'a', False, True),
(30, 34, 'b', True, False),
(54, 95, 'c', False, False),
(65, 73, 'd', True, True),
)
f1 = Frame.from_records(records,
columns=('p', 'q', 'r', 's', 't'),
index=('w', 'x', 'y', 'z'))
self.assertEqual(f1.to_pairs(1),
(('w', (('p', 1), ('q', 2), ('r', 'a'), ('s', False), ('t', True))), ('x', (('p', 30), ('q', 34), ('r', 'b'), ('s', True), ('t', False))), ('y', (('p', 54), ('q', 95), ('r', 'c'), ('s', False), ('t', False))), ('z', (('p', 65), ('q', 73), ('r', 'd'), ('s', True), ('t', True)))))
for x in f1.iter_tuple(axis=0):
self.assertTrue(len(x), 4)
for x in f1.iter_tuple(axis=1):
self.assertTrue(len(x), 5)
f2 = f1[['p', 'q']]
s1 = f2.iter_array(axis=0).apply(np.sum)
self.assertEqual(list(s1.items()), [('p', 150), ('q', 204)])
s2 = f2.iter_array(axis=1).apply(np.sum)
self.assertEqual(list(s2.items()),
[('w', 3), ('x', 64), ('y', 149), ('z', 138)])
def sum_if(idx: tp.Hashable, vals: tp.Iterable[int]) -> tp.Optional[int]:
if idx in ('x', 'z'):
return tp.cast(int, np.sum(vals))
return None
s3 = f2.iter_array_items(axis=1).apply(sum_if)
self.assertEqual(list(s3.items()),
[('w', None), ('x', 64), ('y', None), ('z', 138)])
#---------------------------------------------------------------------------
def test_frame_group_a(self) -> None:
# reindex both axis
records = (
(2, 2, 'a', False, False),
(30, 34, 'b', True, False),
(2, 95, 'c', False, False),
(30, 73, 'd', True, True),
)
f1 = Frame.from_records(records,
columns=('p', 'q', 'r', 's', 't'),
index=('w', 'x', 'y', 'z'))
with self.assertRaises(AxisInvalid):
post = tuple(f1._axis_group_iloc_items(4, axis=-1))
post = tuple(f1._axis_group_iloc_items(4, axis=0)) # row iter, group by column 4
group1, group_frame_1 = post[0]
group2, group_frame_2 = post[1]
self.assertEqual(group1, False)
self.assertEqual(group2, True)
self.assertEqual(group_frame_1.to_pairs(0),
(('p', (('w', 2), ('x', 30), ('y', 2))), ('q', (('w', 2), ('x', 34), ('y', 95))), ('r', (('w', 'a'), ('x', 'b'), ('y', 'c'))), ('s', (('w', False), ('x', True), ('y', False))), ('t', (('w', False), ('x', False), ('y', False)))))
self.assertEqual(group_frame_2.to_pairs(0),
(('p', (('z', 30),)), ('q', (('z', 73),)), ('r', (('z', 'd'),)), ('s', (('z', True),)), ('t', (('z', True),))))
def test_frame_group_b(self) -> None:
# reindex both axis
records = (
(2, 2, 'a', False, False),
(30, 34, 'b', True, False),
(2, 95, 'c', False, False),
(30, 73, 'd', True, True),
)
f1 = Frame.from_records(records,
columns=('p', 'q', 'r', 's', 't'),
index=('w', 'x', 'y', 'z'))
# column iter, group by row 0
post = list(f1._axis_group_iloc_items(0, axis=1))
self.assertEqual(post[0][0], 2)
self.assertEqual(post[0][1].to_pairs(0),
(('p', (('w', 2), ('x', 30), ('y', 2), ('z', 30))), ('q', (('w', 2), ('x', 34), ('y', 95), ('z', 73)))))
self.assertEqual(post[1][0], False)
self.assertEqual(post[1][1].to_pairs(0),
(('s', (('w', False), ('x', True), ('y', False), ('z', True))), ('t', (('w', False), ('x', False), ('y', False), ('z', True)))))
self.assertEqual(post[2][0], 'a')
self.assertEqual(post[2][1].to_pairs(0),
(('r', (('w', 'a'), ('x', 'b'), ('y', 'c'), ('z', 'd'))),))
def test_frame_axis_interface_b(self) -> None:
# reindex both axis
records = (
(2, 2, 'a', False, False),
(30, 34, 'b', True, False),
(2, 95, 'c', False, False),
(30, 73, 'd', True, True),
)
f1 = Frame.from_records(records,
columns=('p', 'q', 'r', 's', 't'),
index=('w', 'x', 'y', 'z'))
post = list(f1.iter_group_items('s', axis=0))
self.assertEqual(post[0][1].to_pairs(0),
(('p', (('w', 2), ('y', 2))), ('q', (('w', 2), ('y', 95))), ('r', (('w', 'a'), ('y', 'c'))), ('s', (('w', False), ('y', False))), ('t', (('w', False), ('y', False)))))
self.assertEqual(post[1][1].to_pairs(0),
(('p', (('x', 30), ('z', 30))), ('q', (('x', 34), ('z', 73))), ('r', (('x', 'b'), ('z', 'd'))), ('s', (('x', True), ('z', True))), ('t', (('x', False), ('z', True)))))
s1 = f1.iter_group('p', axis=0).apply(lambda f: f['q'].values.sum())
self.assertEqual(list(s1.items()), [(2, 97), (30, 107)])
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
33136feebbdbb37d3c3f294a2ecda89a59ed0b98 | ef55b064f4352e2f97431851d5535d6b173fb504 | /build-gui.py | 3f63548df82b17cc7d1b61a8ad113d3398e7053a | [
"MIT"
] | permissive | cheuer/ALttPDoorRandomizer | 8e56e3594a2a180953d0b44a83b668321e4af167 | 44d7e6c15cca8dc613e8fe9cdca07eaa3c5f44a3 | refs/heads/DoorDev | 2021-03-02T22:59:24.950136 | 2020-03-26T23:04:54 | 2020-03-26T23:04:54 | 245,912,753 | 0 | 0 | MIT | 2020-03-09T05:22:22 | 2020-03-09T00:45:46 | null | UTF-8 | Python | false | false | 543 | py | import subprocess
import os
import shutil
DEST_DIRECTORY = '.'
if os.path.isdir("upx"):
upx_string = "--upx-dir=upx"
else:
upx_string = ""
if os.path.isdir("build"):
shutil.rmtree("build")
subprocess.run(" ".join(["pyinstaller Gui.spec ",
upx_string,
"-y ",
"--onefile ",
f"--distpath {DEST_DIRECTORY} ",
]),
shell=True)
| [
"[email protected]"
] | |
ede12f3384950d410a2e5b5c0bb5ba2b28076ac9 | 6c67e2ae195521910fd3d8180fc5a70b9f60db81 | /controllers/utils/rtsq_library/rtsq_level.py | bbe95532b2799521638fa5f25075270c273de949 | [
"MIT"
] | permissive | zeroday0619/Real-Time-Delivery-Query-API | be8b7f0cd74e6c8651fc034064f51e6ec20bac17 | fc2f973c205fe453f77ae27dcd99ce3c2e84528d | refs/heads/master | 2020-09-08T01:43:08.857874 | 2019-11-17T22:32:44 | 2019-11-17T22:32:44 | 220,975,056 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | def level(resp):
"""
Args:
resp:
level:
string
Returns:
[level 1: 배송준비중, 2: 집화완료, 3: 배송중, 4: 지점 도착, 5: 배송출발, 6:배송 완료]
"""
if resp['level'] == 1:
return {
"code": 1,
"level": "배송 준비중"
}
elif resp['level'] == 2:
return {
"code": 2,
"level": "집화 완료"
}
elif resp['level'] == 3:
return {
"code": 3,
"level": "배송중"
}
elif resp['level'] == 4:
return {
"code": 4,
"level": "지점 도착"
}
elif resp['level'] == 5:
return {
"code": 5,
"level": "배송 출발"
}
elif resp['level'] == 6:
return {
"code": 6,
"level": "배송 완료"
}
else:
return {
"code": 0,
"level": "Internal System Error"
}
| [
"[email protected]"
] | |
f7d9aea052dd03a9baf3a059a9a907746703c781 | a4d4e2c99b1ee5e8045bfbf55949ea5ae34ae371 | /Jobb/Jobbtider.spec | fab167111d3db9b7dc2170e30bf9f2712feb6021 | [] | no_license | NBerlin/LearningPython | 87ee01633a69d719ce79df0177b3740305569621 | 8d59f9dee34beb712160a13b19c6a882e9b8755d | refs/heads/master | 2022-11-05T03:49:44.159119 | 2019-05-09T17:55:04 | 2019-05-09T17:55:04 | 124,292,605 | 0 | 0 | null | 2022-10-26T17:06:30 | 2018-03-07T20:47:59 | Python | UTF-8 | Python | false | false | 888 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['Jobbtider.py'],
pathex=['C:\\Users\\Nicki\\Documents\\Programmering\\LearnPython\\Jobb'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
exclude_binaries=True,
name='Jobbtider',
debug=False,
strip=False,
upx=True,
console=True )
coll = COLLECT(exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
name='Jobbtider')
| [
"[email protected]"
] | |
2532b165031ee7384c512a86e01653fe3015a922 | a0c53168a4bdcfb0aa917d6d2c602f0999443a10 | /projex/docgen/document.py | 7ee7ccaa34818e7e66f325153fd66981982afda1 | [] | no_license | kanooshka/DPS_PIPELINE | 8067154c59ca5c8c9c09740969bb6e8537021903 | df2fcdecda5bce98e4235ffddde1e99f334562cc | refs/heads/master | 2021-05-24T04:32:03.457648 | 2018-09-07T13:25:11 | 2018-09-07T13:25:11 | 29,938,064 | 3 | 2 | null | 2020-07-23T23:06:37 | 2015-01-27T22:26:01 | Python | UTF-8 | Python | false | false | 41,392 | py | #!/usr/bin/python
""" Defines the document class that is used with the docgen system. """
# define authorship information
__authors__ = ['Eric Hulser']
__author__ = ','.join(__authors__)
__credits__ = []
__copyright__ = 'Copyright (c) 2011, Projex Software'
__license__ = 'LGPL'
# maintanence information
__maintainer__ = 'Projex Software'
__email__ = '[email protected]'
#------------------------------------------------------------------------------
import inspect
import logging
import new
import os
import re
import xml.sax.saxutils
from projex import text
from projex import wikitext
from projex.docgen import templates
from projex.docgen import commands
logger = logging.getLogger(__name__)
DATA_TYPE_ORDER = [
'module',
'class',
'variable',
'member',
'property',
'enum',
'function',
'method',
'signal',
'slot',
'abstract method',
'class method',
'static method',
'deprecated method',
'built-in',
]
DATA_PRIVACY_ORDER = [
'public',
'imported public',
'protected',
'imported protected',
'private',
'imported private',
'built-in',
'imported built-in',
]
DATA_ORDER = []
for privacy in DATA_PRIVACY_ORDER:
for typ in DATA_TYPE_ORDER:
DATA_ORDER.append('%s %s' % (privacy, typ))
class Attribute(tuple):
""" Used to map tuple returns to support different python versions. """
def __init__( self, member_tuple ):
super(Attribute, self).__init__(member_tuple)
self.name = member_tuple[0]
self.kind = member_tuple[1]
self.defining_class = member_tuple[2]
self.object = member_tuple[3]
if ( hasattr(self.object, 'func_type') ):
self.kind = self.object.func_type
#------------------------------------------------------------------------------
class DocumentData(object):
""" Struct to hold data about a document object. """
name = None
value = None
dataType = None
privacy = None
def section( self ):
"""
Returns the section type for this data by joining the privacy and \
type information.
:return <str>
"""
return (self.privacy + ' ' + self.dataType)
@staticmethod
def create( name,
value,
kind = 'data',
defaultVarType = 'variable',
defaultFuncType ='function' ):
"""
Creates a new document data instance.
:return <DocumentData>
"""
# look for private members
results = re.match('^(_\w+)__.+', name)
if ( results ):
name = name.replace(results.group(1), '')
# determine the privacy level for this data
privacy = 'public'
if ( name.startswith('__') and name.endswith('__') ):
privacy = 'built-in'
elif ( name.startswith('__') ):
privacy = 'private'
elif ( name.startswith('_') ):
privacy = 'protected'
docdata = DocumentData()
docdata.name = name
docdata.value = value
# look for specific kinds of methods
if ( kind == 'method' ):
type_name = type(value).__name__
if ( type_name == 'pyqtSignal' ):
kind = 'signal'
elif ( type_name == 'pyqtSlot' ):
kind = 'slot'
elif ( type_name == 'pyqtProperty' ):
kind = 'property'
elif ( hasattr(value, 'func_type') ):
kind = getattr(value, 'func_type')
if ( kind != 'data' ):
docdata.dataType = kind
else:
docdata.dataType = commands.defaultValueType( value,
defaultVarType,
defaultFuncType )
docdata.privacy = privacy
return docdata
#------------------------------------------------------------------------------
class Document(object):
"""
Defines the class that collects all documentation for a python
object.
"""
cache = {}
aliases = {}
reverseAliases = {}
def __init__( self ):
self._object = None
self._parent = None
self._objectName = ''
self._html = ''
self._allMembersHtml = ''
self._title = ''
self._data = {}
self._sourceHtml = {}
self._children = []
# protected methods
def _bases( self, cls, recursive = False ):
"""
Looks up the bases for the inputed obj instance.
:param obj | <object>
:param recursive | <bool>
:return [<cls>, ..]
"""
if ( not inspect.isclass( cls ) ):
return []
output = list(cls.__bases__[:])
if ( not recursive ):
return output
for basecls in output:
output += self._bases(basecls, recursive = recursive)
return list(set(output))
def _collectMembers( self, obj ):
if ( not inspect.isclass( obj ) ):
return []
try:
members = inspect.classify_class_attrs(self._object)
except AttributeError:
members = []
# support python25-
if ( members and type(members[0]) == tuple ):
members = [ Attribute(member) for member in members ]
return members
def _generateAllMemberSummary( self, member ):
"""
Generates the member summary documentation.
:param member <Attribute>
:return <str>
"""
try:
obj = getattr(member.defining_class, member.name)
except AttributeError:
return ''
key = member.name
cls = member.defining_class
if ( 'method' in member.kind ):
docname = cls.__module__ + '-' + cls.__name__
doc = Document.cache.get(docname)
if ( doc ):
opts = (doc.url(relativeTo = self), key, key)
href = '<a href="%s#%s">%s</a>' % opts
else:
href = key
kind = member.kind
if ( hasattr(obj, 'func_type') ):
kind = obj.func_type
templ = '%s::%s%s'
if ( 'static' in kind ):
templ += ' [static]'
elif ( 'class' in kind ):
templ += ' [class]'
elif ( 'abstract' in kind ):
templ += ' [abstract]'
elif ( 'deprecated' in kind ):
templ += ' [deprecated]'
return templ % (cls.__name__, href, self._generateArgs(obj))
else:
opts = (cls.__name__, key, type(member.object).__name__)
return '%s::%s : %s' % opts
def _generateAllMembersDocs(self):
"""
Generates the all members documentation for this document.
:return <str>
"""
if ( not inspect.isclass(self._object) ):
return ''
members = self._collectMembers(self._object)
member_docs = []
members.sort( lambda x, y: cmp( x.name, y.name ) )
for member in members:
if ( member.name.startswith('__') and member.name.endswith('__') ):
continue
member_doc = self._generateAllMemberSummary(member)
if ( member_doc ):
member_docs.append('<li>%s</li>' % member_doc)
environ = commands.ENVIRON.copy()
environ['members_left'] = '\n'.join( member_docs[:len(member_docs)/2])
environ['members_right'] = '\n'.join( member_docs[len(member_docs)/2:])
environ['title'] = self.title()
environ['base_url'] = self.baseurl()
environ['static_url'] = environ['base_url'] + '/_static'
environ['navigation'] %= environ
return templates.template('allmembers.html') % environ
def _generateArgs(self, obj):
"""
Generates the argument information for the inputed object.
:param obj | <variant>
:return <str>
"""
try:
return inspect.formatargspec( *inspect.getargspec( obj ) )
except TypeError:
try:
return self._generateArgs( obj.im_func )
except AttributeError:
pass
if ( isinstance( obj, new.instancemethod ) and
hasattr( obj.im_func, 'func_args' ) ):
return obj.im_func.func_args
return '(*args, **kwds) [unknown]'
def _generateHtml( self ):
"""
Generates the HTML documentation for this document.
:return <str>
"""
if ( self.isNull() or self._html ):
return self._html
# generate module docs
if ( inspect.ismodule( self._object ) ):
return self._generateModuleDocs()
# generate class docs
elif ( inspect.isclass( self._object ) ):
return self._generateClassDocs()
# not sure what this is
return ''
def _generateClassDocs( self ):
"""
Generates class documentation for this object.
"""
html = []
self.parseData()
# determine the inheritance
bases = []
for base in self._bases( self._object ):
doc = commands.findDocument(base)
if ( doc ):
opt = {}
opt['text'] = base.__name__
opt['url'] = doc.url( relativeTo = self )
bases.append( templates.template('link_standard.html') % opt )
else:
bases.append( base.__name__ )
if ( len(bases) > 1 ):
basestxt = ', '.join(bases[:-1])
inherits = 'Inherits %s and %s.' % (basestxt, bases[-1])
elif (len(bases) == 1):
inherits = 'Inherits %s.' % bases[0]
else:
inherits = ''
# determine the subclasses
subclasses = []
for subcls in self._subclasses( self._object ):
doc = commands.findDocument(subcls)
if ( doc ):
opt = {}
opt['text'] = subcls.__name__
opt['url'] = doc.url( relativeTo = self )
subclasses.append( templates.template('link_standard.html') % opt )
else:
subclasses.append( subcls.__name__ )
if ( len(subclasses) > 1 ):
subs = ', '.join(subclasses[:-1])
inherited_by = 'Inherited by %s and %s.' % (subs, subclasses[-1])
elif ( len(subclasses) == 1 ):
inherited_by = 'Inherited by %s.' % (subclasses[0])
else:
inherited_by = ''
allmembers = self.objectName().split('.')[-1] + '-allmembers.html'
# generate the module environ
environ = commands.ENVIRON.copy()
environ['title'] = self.title()
environ['allmembers'] = './' + allmembers
environ['breadcrumbs'] = self.breadcrumbs()
environ['url'] = self.url()
environ['doctype'] = 'Class'
environ['inherits'] = inherits
environ['inherited_by'] = inherited_by
modname = self._object.__module__
moddoc = Document.cache.get(modname)
if ( moddoc ):
modurl = moddoc.url(relativeTo = self)
environ['module'] = '<a href="%s">%s</a>' % (modurl, modname)
else:
environ['module'] = modname
html.append( templates.template('header_class.html') % environ )
# generate the summary report
gdata = self.groupedData()
keys = [key for key in gdata.keys() if key in DATA_ORDER]
keys.sort(lambda x, y: cmp(DATA_ORDER.index(x), DATA_ORDER.index(y)))
for key in keys:
html.append( self._generateSummary( key, gdata[key] ) )
# generate the main documentation
maindocs = self._generateObjectDocs( self._object )
if ( maindocs ):
environ = commands.ENVIRON.copy()
environ['type'] = 'Class'
environ['contents'] = maindocs
html.append( templates.template('docs_main.html') % environ )
# generate the member documentation
funcs = self.data().values()
html.append( self._generateMemberDocs( 'Member Documentation',
funcs))
# generate the document environ
return '\n'.join(html)
def _generateMemberDocs( self, title, data ):
"""
Generates the member documentation for the inputed set of data.
:param title | <str>
:param data | [ <DocumentData>, .. ]
"""
if ( not data ):
return ''
bases = []
subclasses = []
# generate the html
html = []
data.sort(lambda x, y: cmp(x.name, y.name))
for entry in data:
# generate function information
if ( 'function' in entry.dataType or 'method' in entry.dataType ):
# lookup base methods for reimplimintation
reimpliments = []
for base in bases:
if ( entry.name in base.__dict__ ):
doc = commands.findDocument(base)
if ( doc ):
opt = {}
opt['text'] = base.__name__
opt['url'] = doc.url( relativeTo = self )
opt['url'] += '#' + entry.name
href = templates.template('link_standard.html') % opt
reimpliments.append( href )
else:
reimpliments.append( entry.name )
reimpliment_doc = ''
if ( reimpliments ):
urls = ','.join(reimpliments)
reimpliment_doc = 'Reimpliments from %s.' % urls
# lookup submodules for reimplimentation
reimplimented = []
for subcls in subclasses:
if ( entry.name in subcls.__dict__ ):
doc = commands.findDocument(subcls)
if ( doc ):
opt = {}
opt['text'] = subcls.__name__
opt['url'] = doc.url( relativeTo = self )
opt['url'] += '#' + entry.name
href = templates.template('link_standard.html') % opt
reimplimented.append( href )
else:
reimplimented.append( entry.name )
reimplimented_doc = ''
if ( reimplimented ):
urls = ','.join(reimplimented)
reimplimented_doc = 'Reimplimented by %s.' % urls
func_split = entry.dataType.split(' ')
desc = ''
if ( len(func_split) > 1 ):
desc = '[%s]' % func_split[0]
# add the function to the documentation
environ = commands.ENVIRON.copy()
environ['type'] = entry.dataType
environ['name'] = entry.name
environ['args'] = self._generateArgs( entry.value )
environ['desc'] = desc
environ['contents'] = self._generateObjectDocs(entry.value)
environ['reimpliments'] = reimpliment_doc
environ['reimplimented'] = reimplimented_doc
html.append( templates.template('docs_function.html') % environ )
elif ( entry.dataType == 'enum' ):
environ = commands.ENVIRON.copy()
environ['name'] = entry.name
value_contents = []
values = entry.value.values()
values.sort()
for value in values:
value_opts = {}
value_opts['key'] = entry.value[value]
value_opts['value'] = value
value_templ = templates.template('docs_enum_value.html')
value_item = value_templ % value_opts
value_contents.append( value_item )
environ['contents'] = '\n'.join(value_contents)
html.append( templates.template('docs_enum.html') % environ )
environ = {}
environ['title'] = title
environ['contents'] = '\n'.join( html )
return templates.template('docs_members.html') % environ
def _generateModuleDocs( self ):
"""
Generates module documentation for this object.
"""
html = []
# generate the module environ
environ = commands.ENVIRON.copy()
environ['title'] = self.title()
environ['base_url'] = self.baseurl()
environ['static_url'] = environ['base_url'] + '/_static'
environ['breadcrumbs'] = self.breadcrumbs()
environ['url'] = self.url()
environ['doctype'] = 'Module'
if ( '__init__' in self._object.__file__ ):
environ['doctype'] = 'Package'
url_split = environ['url'].split('/')
sources_url = './%s-source.html' % url_split[-1].split('.')[0]
environ['sources'] = sources_url
environ['navigation'] %= environ
html.append( templates.template('header_module.html') % environ )
# generate the summary report
gdata = self.groupedData()
for key in sorted( gdata.keys(), key = lambda x: DATA_ORDER.index(x)):
value = gdata[key]
html.append( self._generateSummary( key, gdata[key] ) )
# generate the main documentation
maindocs = self._generateObjectDocs( self._object )
if ( maindocs ):
environ = commands.ENVIRON.copy()
environ['type'] = 'Module'
environ['contents'] = maindocs
html.append( templates.template('docs_main.html') % environ )
# generate the member documentation
html.append( self._generateMemberDocs('Module Function Documentation',
self.data().values()))
return '\n'.join(html)
def _generateObjectDocs( self, obj ):
"""
Generates documentation based on the inputed object's docstring and
member variable information.
:param obj | <str>
:return <str> html
"""
# get the documentation
try:
docs = inspect.getdoc(obj)
except AttributeError:
pass
if ( docs == None ):
try:
docs = inspect.getcomments(obj)
except AttributeError:
docs = ''
return wikitext.render(docs,
commands.url_handler,
options=commands.RENDER_OPTIONS)
def _generateSourceDocs( self ):
"""
Return the documentation containing the source code.
:return <str>
"""
if ( not inspect.ismodule(self._object) ):
return ''
# load the code file
codefilename = os.path.splitext( self._object.__file__ )[0]
codefilename += '.py'
codefile = open(codefilename, 'r')
code = codefile.read()
codefile.close()
environ = commands.ENVIRON.copy()
environ['code'] = xml.sax.saxutils.escape(code)
environ['title'] = self.title()
environ['base_url'] = self.baseurl()
environ['static_url'] = environ['base_url'] + '/_static'
environ['breadcrumbs'] = self.breadcrumbs(includeSelf = True)
environ['navigation'] %= environ
return templates.template('source.html') % environ
def _generateSummary( self, section, values, columns = 1 ):
"""
Generates summary information for the inputed section and value
data.
:param section | <str>
:param values | [ <DocumentData>, .. ]
:param columns | <int>
:return <str>
"""
# strip out built-in variables
newvalues = []
for value in values:
if ( not (value.privacy == 'built-in' and
value.dataType == 'variable' )):
newvalues.append(value)
values = newvalues
if ( not values ):
return ''
# split the data into columns
values.sort( lambda x, y: cmp( x.name.lower(), y.name.lower() ) )
url = self.url()
coldata = []
if ( columns > 1 ):
pass
else:
coldata = [values]
html = []
processed = []
for colitem in coldata:
for data in colitem:
data_environ = {}
data_environ['url'] = url
data_environ['name'] = data.name
data_environ['type'] = data.dataType
processed.append( data.name )
if ( 'function' in data.dataType or
'method' in data.dataType ):
data_environ['args'] = self._generateArgs( data.value )
templ = templates.template('summary_function.html')
html.append( templ % data_environ )
elif ( data.dataType == 'enum' ):
templ = templates.template('summary_enum.html')
html.append( templ % data_environ )
elif ( 'variable' in data.dataType or
'member' in data.dataType ):
try:
value = getattr(self._object, data.name)
except AttributeError:
value = None
data_environ['value_type'] = type(value).__name__
templ = templates.template('summary_variable.html')
html.append( templ % data_environ )
else:
datadoc = commands.findDocument(data.value)
if ( datadoc ):
opts = {}
opts['text'] = data.name
opts['url'] = datadoc.url( relativeTo = self )
contents = templates.template('link_standard.html') % opts
else:
contents = data.name
data_environ['contents'] = contents
templ = templates.template('summary_item.html')
html.append( templ % data_environ )
# update the bases environ
members = self._collectMembers(self._object)
inherited_members = {}
for member in members:
mem_name = member.name
mem_kind = member.kind
mem_cls = member.defining_class
mem_value = member.object
if ( hasattr(member.object, 'func_type') ):
mem_kind = member.object.func_type
if ( mem_cls == self._object ):
continue
data = DocumentData.create( mem_name,
mem_value,
mem_kind,
'member',
'method' )
if ( section != data.section() ):
continue
inherited_members.setdefault( mem_cls, 0 )
inherited_members[mem_cls] += 1
inherit_summaries = []
templ = templates.template('summary_inherit.html')
bases = self._bases( self._object, True )
inherits = inherited_members.keys()
inherits.sort( lambda x, y: cmp( bases.index(x), bases.index(y) ) )
for inherited in inherits:
count = inherited_members[inherited]
doc = commands.findDocument( inherited )
if ( not doc ):
continue
opt = {}
opt['count'] = count
opt['base'] = inherited.__name__
opt['url'] = doc.url( relativeTo = self )
opt['type'] = section
inherit_summaries.append( templ % opt )
# generate the summary information
words = [word.capitalize() for word in text.words(section)]
words[-1] = text.pluralize(words[-1])
summary_environ = {}
summary_environ['contents'] = '\n'.join(html)
summary_environ['section'] = ' '.join(words)
summary_environ['inherits'] = '\n'.join(inherit_summaries)
return templates.template('summary.html') % summary_environ
def _subclasses( self, obj ):
"""
Looks up all the classes that inherit from this object.
:param obj | <object>
:return [<cls>, ..]
"""
output = []
for doc in Document.cache.values():
doc_obj = doc.object()
if ( inspect.isclass( doc_obj ) and
obj in doc_obj.__bases__ ):
output.append( doc_obj )
return output
#------------------------------------------------------------------------------
# public methods
def addChild( self, child ):
"""
Adds the inputed document as a sub-child for this document.
:param child | <Document>
"""
child._parent = self
self._children.append(child)
def allMembersHtml( self ):
"""
Returns the documentation for all the members linked to this document.
This method only applies to class objects.
:return <str>
"""
if ( not inspect.isclass( self._object ) ):
return ''
if ( not self._allMembersHtml ):
self._allMembersHtml = self._generateAllMembersDocs()
return self._allMembersHtml
def baseurl( self ):
"""
Returns the relative url to get back to the root of the documentation
api.
:return <str>
"""
baseurl = self.url()
count = len(baseurl.split('/'))
return ('../' * count).strip('/')
def breadcrumbs(self,
relativeTo = None,
first = True,
includeSelf = False):
"""
Creates a link to all of the previous modules for this item.
:param relativeTo | <Document> | Relative to another document.
first | <bool>
includeSelf | <bool> | Create a link to this doc.
:return <str>
"""
basecrumbs = ''
if ( not relativeTo ):
relativeTo = self
basecrumbs = self.title().split('.')[-1]
if ( includeSelf ):
opts = {
'url': './' + os.path.split(self.url())[1],
'text': self.title().split('.')[-1]
}
basecrumbs = templates.template('link_breadcrumbs.html') % opts
if ( inspect.isclass( self._object ) ):
doc = Document.cache.get( self._object.__module__ )
elif ( inspect.ismodule( self._object ) ):
parent_mod = '.'.join( self._object.__name__.split('.')[:-1] )
doc = Document.cache.get( parent_mod )
else:
doc = None
if ( doc ):
opts = {}
opts['url'] = doc.url(relativeTo)
opts['text' ] = doc.title().split('.')[-1]
link = templates.template('link_breadcrumbs.html') % opts
subcrumbs = doc.breadcrumbs(relativeTo, first = False)
else:
subcrumbs = ''
link = ''
parts = []
if ( first ):
# add the home url
baseurl = self.baseurl()
home_url = '%s/index.html' % baseurl
home_opts = { 'text': 'Home', 'url': home_url }
home_part = templates.template('link_breadcrumbs.html') % home_opts
parts.append(home_part)
# add the api url
api_url = '%s/api/index.html' % baseurl
api_opts = { 'text': 'API', 'url': api_url }
api_part = templates.template('link_breadcrumbs.html') % api_opts
parts.append(api_part)
if ( subcrumbs ):
parts.append( subcrumbs )
if ( link ):
parts.append( link )
if ( basecrumbs ):
parts.append( basecrumbs )
return ''.join( parts )
def children( self ):
"""
Returns the child documents for this instance.
:return [ <Document>, .. ]
"""
return self._children
def data( self ):
"""
Returns the data that has been loaded for this document.
:return <dict>
"""
return self._data
def export( self, basepath, page = None ):
"""
Exports the html files for this document and its children to the
given basepath.
:param basepath | <str>
:param page | <str> || None
:return <bool> success
"""
# make sure the base path exists
if ( not os.path.exists( basepath ) ):
return False
basepath = os.path.normpath(basepath)
url = self.url()
filename = os.path.join(basepath, url)
docpath = os.path.dirname(filename)
# add the doc path
if ( not os.path.exists(docpath) ):
os.makedirs(docpath)
if ( not page ):
page = templates.template('page.html')
# setup the default environ
commands.url_handler.setRootUrl(self.baseurl())
doc_environ = commands.ENVIRON.copy()
doc_environ['title'] = self.title()
doc_environ['base_url'] = self.baseurl()
doc_environ['static_url'] = doc_environ['base_url'] + '/_static'
doc_environ['contents'] = self.html()
doc_environ['breadcrumbs'] = self.breadcrumbs(includeSelf = True)
doc_environ['navigation'] %= doc_environ
# generate the main html file
exportfile = open(filename, 'w')
exportfile.write( page % doc_environ )
exportfile.close()
# generate the all members html file
allmember_html = self.allMembersHtml()
if ( allmember_html ):
fpath, fname = os.path.split(filename)
fname = fname.split('.')[0] + '-allmembers.html'
afilesource = os.path.join(fpath, fname)
doc_environ['contents'] = allmember_html
# create the crumbs
crumbs = self.breadcrumbs(includeSelf = True)
opts = {'url': '#', 'text': 'All Members'}
crumbs += templates.template('link_breadcrumbs.html') % opts
doc_environ['breadcrumbs'] = crumbs
# save the all members file
membersfile = open(afilesource, 'w')
membersfile.write( page % doc_environ )
membersfile.close()
# generate the source code file
source_html = self.sourceHtml()
if ( source_html ):
fpath, fname = os.path.split(filename)
fname = fname.split('.')[0] + '-source.html'
sfilesource = os.path.join(fpath, fname)
doc_environ['contents'] = source_html
# create the crumbs
crumbs = self.breadcrumbs(includeSelf = True)
opts = {'url': '#', 'text': 'Source Code'}
crumbs += templates.template('link_breadcrumbs.html') % opts
doc_environ['breadcrumbs'] = crumbs
# save the source file
sourcefile = open(sfilesource, 'w')
sourcefile.write( page % doc_environ )
sourcefile.close()
# generate the children
for child in self.children():
child.export(basepath, page)
def findData( self, dtype ):
"""
Looks up the inputed data objects based on the given data type.
:param dataType | <str>
:return <str>
"""
self.parseData()
output = []
for data in self._data.values():
if ( data.dataType == dtype or
(data.privacy + ' ' + data.dataType) == dtype ):
output.append(data)
return output
def groupedData( self ):
"""
Groups the data together based on their data types and returns it.
:return { <str> grp: [ <DocumentData>, .. ], .. }
"""
output = {}
values = self._data.values()
values.sort( lambda x, y: cmp(x.name, y.name) )
for data in values:
dtype = '%s %s' % (data.privacy, data.dataType)
output.setdefault(dtype, [])
output[dtype].append(data)
return output
def html( self ):
"""
Returns the generated html for this document.
:return <str>
"""
if ( not self._html ):
self._html = self._generateHtml()
return self._html
def isNull( self ):
"""
Returns whether or not this document has any data associated with it.
:return <bool>
"""
return self._object == None
def object( self ):
"""
Returns the object that this document represents.
:return <object> || None
"""
return self._object
def objectName( self ):
"""
Returns the object name that this object will represent. This will
be similar to a URL, should be unique per document.
:return <str>
"""
return self._objectName
def parent( self ):
"""
Returns the parent document of this instance.
:return <Document> || None
"""
return self._parent
def parseData( self ):
"""
Parses out all the information that is part of this item's object.
This is the method that does the bulk of the processing for the
documents.
:return <bool> success
"""
if ( self.isNull() or self._data ):
return False
class_attrs = []
obj = self.object()
# parse out class information
cls_kind_map = {}
if ( inspect.isclass( obj ) ):
contents = self._collectMembers(obj)
for const in contents:
if ( const[2] == obj ):
class_attrs.append( const[0] )
cls_kind_map[const.name] = const.kind
# try to load all the items
try:
members = dict(inspect.getmembers(obj))
except AttributeError:
members = {}
for key in dir(obj):
if ( not key in members ):
try:
members[key] = getattr(obj, key)
except AttributeError:
pass
modname = ''
if ( inspect.ismodule(obj) ):
modname = obj.__name__
for name, value in members.items():
# ignore inherited items
if ( class_attrs and not name in class_attrs ):
continue
varType = 'variable'
funcType = 'function'
kind = 'data'
if ( inspect.isclass( self._object ) ):
varType = 'member'
funcType = 'static method'
kind = cls_kind_map.get(name, 'data')
docdata = DocumentData.create( name,
value,
kind,
varType,
funcType )
if ( modname and hasattr(value, '__module__') and
modname != getattr(value, '__module__') ):
docdata.privacy = 'imported ' + docdata.privacy
self._data[name] = docdata
def setObject( self, obj ):
"""
Sets the object instance for this document to the inputed object. This
will be either a module, package, class, or enum instance. This will
clear the html information and title data.
:param obj | <variant>
"""
self._object = obj
self._html = ''
self._allMembersHtml = ''
self._title = str(obj.__name__)
if ( inspect.isclass( obj ) ):
self.setObjectName( '%s-%s' % (obj.__module__, obj.__name__) )
else:
self.setObjectName( obj.__name__ )
def setObjectName( self, objectName ):
"""
Sets the object name for this document to the given name.
:param objectName | <str>
"""
self._objectName = objectName
def setTitle( self, title ):
"""
Sets the title string for this document to the inputed string.
:param title | <str>
"""
self._title = title
def sourceHtml( self ):
"""
Returns the source file html for this document. This method only
applies to module documents.
:return <str>
"""
if ( not inspect.ismodule(self._object) ):
return ''
if ( not self._sourceHtml ):
self._sourceHtml = self._generateSourceDocs()
return self._sourceHtml
def title( self ):
"""
Returns the title string for this document.
:return <str>
"""
return self._title
def url( self, relativeTo = None ):
"""
Returns the path to this document's html file. If the optional
relativeTo keyword is specified, then the generated url will be made
in relation to the local path for the current document.
:param relativeTo <Document> || None
:return <str>
"""
modname = self.objectName()
if ( inspect.ismodule( self._object ) ):
if ( '__init__' in self._object.__file__ ):
modname += '.__init__'
if ( not relativeTo ):
return modname.replace('.','/') + '.html'
relmodule = relativeTo.objectName()
relobject = relativeTo.object()
if ( inspect.ismodule( relobject ) ):
if ( '__init__' in relobject.__file__ ):
relmodule += '.__init__'
relpath = relmodule.split('.')
mypath = modname.split('.')
go_up = '/..' * (len(relpath)-1)
go_down = '/'.join([ part for part in mypath if part ])
return (go_up + '/' + go_down + '.html').strip('/') | [
"[email protected]"
] | |
c8435b9d400c6a96e9ff08e7bc2c5cf72e3f2732 | 0625bb1df6c031f46e7dfd51f060092c9fa3416f | /water_battle_game.py | 5b1edbb25df3b0e9fb81b6f2d0b8b6cd1f56395c | [] | no_license | Sviatoslav-Lobanov/Python_learning | 393ad9e2f1ba79d8ad204aba51906c88f7d18043 | 87278f0b3b8e3d468cd5d8d22e9a966b8c1821c9 | refs/heads/main | 2023-05-04T07:57:08.746663 | 2021-05-19T22:46:04 | 2021-05-19T22:46:04 | 356,970,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,758 | py | from random import randint
# Внутренняя логика игры — корабли, игровая доска и вся логика связанная с ней.
# Внешняя логика игры — пользовательский интерфейс, искусственный интеллект, игровой контроллер, который считает побитые корабли.
# В начале имеет смысл написать классы исключений, которые будет использовать наша программа. Например, когда игрок пытается выстрелить в клетку за пределами поля, во внутренней логике должно выбрасываться соответствующее исключение BoardOutException, а потом отлавливаться во внешней логике, выводя сообщение об этой ошибке пользователю.
class BoardException(Exception):
pass
class BoardOutException(BoardException):
def __str__(self):
return "Вы пытаетесь выстрелить за доску!"
class BoardUsedException(BoardException):
def __str__(self):
return "Вы уже стреляли в эту клетку"
class BoardWrongShipException(BoardException):
def __str__(self):
return "Корабль вышел за границы поля"
pass
# Далее нужно реализовать класс Dot — класс точек на поле. Каждая точка описывается параметрами:
#
# Координата по оси x .
# Координата по оси y .
# В программе мы будем часто обмениваться информацией о точках на поле, поэтому имеет смысле сделать отдельный тип данных дня них.
# Очень удобно будет реализовать в этом классе метод __eq__, чтобы точки можно было проверять на равенство.
# Тогда, чтобы проверить, находится ли точка в списке, достаточно просто использовать оператор in, как мы делали это с числами .
class Dot:
def __init__(self,x,y):
self.x=x
self.y=y
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __repr__(self):
return f"Dot({self.x},{self.y})"
# Следующим идёт класс Ship — корабль на игровом поле, который описывается параметрами:
#
# Длина.
# Точка, где размещён нос корабля.
# Направление корабля (вертикальное/горизонтальное).
# Количеством жизней (сколько точек корабля еще не подбито).
# И имеет методы:
#
# Метод dots, который возвращает список всех точек корабля.
class Ship:
def __init__(self, bow, long, orientation):
self.bow = bow
self.long = long
self.orientation = orientation
self.lives = long
@property
def dots(self):
ship_dots = []
for i in range(self.long):
cur_x = self.bow.x
cur_y = self.bow.y
if self.orientation == 0:
cur_x += i
elif self.orientation == 1:
cur_y += i
ship_dots.append(Dot(cur_x, cur_y))
return ship_dots
def shooten(self, shot):
return shot in self.dots
# Самый важный класс во внутренней логике — класс Board — игровая доска. Доска описывается параметрами:
#
# Двумерный список, в котором хранятся состояния каждой из клеток.
# Список кораблей доски.
# Параметр hid типа bool — информация о том, нужно ли скрывать корабли на доске (для вывода доски врага) или нет (для своей доски).
# Количество живых кораблей на доске.
class Board:
def __init__(self, hid=False, size=6):
self.size = size
self.hid = hid
self.count = 0
self.field = [["O"] * size for _ in range(size)]
self.busy = []
self.ships = []
# И имеет методы:
#
# Метод add_ship, который ставит корабль на доску (если ставить не получается, выбрасываем исключения).
def add_ship(self, ship):
for d in ship.dots:
if self.out(d) or d in self.busy:
raise BoardWrongShipException()
for d in ship.dots:
self.field[d.x][d.y] = "■"
self.busy.append(d)
self.ships.append(ship)
self.contour(ship)
# Метод contour, который обводит корабль по контуру. Он будет полезен и в ходе самой игры, и в при расстановке кораблей (помечает соседние точки,
# где корабля по правилам быть не может).
def contour(self, ship, verb=False):
near = [
(-1, -1), (-1, 0), (-1, 1),
(0, -1), (0, 0), (0, 1),
(1, -1), (1, 0), (1, 1)
]
for d in ship.dots:
for dx, dy in near:
cur = Dot(d.x + dx, d.y + dy)
if not (self.out(cur)) and cur not in self.busy:
if verb:
self.field[cur.x][cur.y] = "."
self.busy.append(cur)
# Метод, который выводит доску в консоль в зависимости от параметра hid.
def __str__(self):
res = ""
res += " | 1 | 2 | 3 | 4 | 5 | 6 |"
for i, row in enumerate(self.field):
res += f"\n{i + 1} | " + " | ".join(row) + " |"
if self.hid:
res = res.replace("■", "O")
return res
# Метод out, который для точки (объекта класса Dot) возвращает True, если точка выходит за пределы поля, и False, если не выходит.
def out(self, d):
return not ((0 <= d.x < self.size) and (0 <= d.y < self.size))
# Метод shot, который делает выстрел по доске (если есть попытка выстрелить за пределы и в использованную точку, нужно выбрасывать исключения).
def shot(self, d):
if self.out(d):
raise BoardOutException()
if d in self.busy:
raise BoardUsedException()
self.busy.append(d)
for ship in self.ships:
if d in ship.dots:
ship.lives -= 1
self.field[d.x][d.y] = "X"
if ship.lives == 0:
self.count += 1
self.contour(ship, verb=True)
print("Корабль уничтожен!")
return False
else:
print("Корабль ранен!")
return True
self.field[d.x][d.y] = "."
print("Мимо!")
return False
def begin(self):
self.busy = []
class All_board():
def __init__(self, board_1=None, board_2=None):
self.board_1 = board_1
self.board_2 = board_2
def __str__(self):
res = ""
res2 = ""
res += " Доска пользователя Доска компьютера "
res += f"\n | 1 | 2 | 3 | 4 | 5 | 6 | ... | 1 | 2 | 3 | 4 | 5 | 6 |"
for i, row in enumerate(self.board_1.field):
for j, row2 in enumerate(self.board_2.field):
if i == j:
res2 = " | ".join(row2).replace("■", "O")
res += f"\n{i + 1} | " + " | ".join(row) + " | " +"..."+ f"{i + 1} | " + res2 + " | "
return res
# Теперь нужно заняться внешней логикой: Класс Player — класс игрока в игру (и AI, и пользователь). Этот класс будет родителем для классов с AI и с пользователем.
# Игрок описывается параметрами:
# Собственная доска (объект класса Board)
# Доска врага.
# И имеет следующие методы:
#
# ask — метод, который «спрашивает» игрока, в какую клетку он делает выстрел.
# Пока мы делаем общий для AI и пользователя класс, этот метод мы описать не можем.
# Оставим этот метод пустым. Тем самым обозначим, что потомки должны реализовать этот метод.
# move — метод, который делает ход в игре.
# Тут мы вызываем метод ask, делаем выстрел по вражеской доске (метод Board.shot), отлавливаем исключения, и если они есть, пытаемся повторить ход.
# Метод должен возвращать True, если этому игроку нужен повторный ход (например если он выстрелом подбил корабль).
class Player:
def __init__(self, board, enemy):
self.board = board
self.enemy = enemy
self.last_shoot = None
def ask(self):
raise NotImplementedError()
def move(self,shoot_near):
while True:
try:
target = self.ask(shoot_near)
repeat = self.enemy.shot(target)
self.last_shoot = target
# if repeat: print ("после попадания вторая попытка",last_shoot)
return repeat
except BoardException as e:
print(e)
# Теперь нам остаётся унаследовать классы AI и User от Player и переопределить в них метод ask.
# Для AI это будет выбор случайной точка, а для User этот метод будет спрашивать координаты точки из консоли.
class AI(Player):
def ask(self, shoot_near):
if self.last_shoot is not None:
print("Последний выстрел компьютера ",self.last_shoot.x+1,self.last_shoot.y+1)
# Учтим стрелять рядом
if shoot_near:
while True:
try:
print("стреляю рядом 1")
d = Dot(self.last_shoot.x, self.last_shoot.y + 1)
break
except BoardException as e:
print(e)
try:
print("стреляю рядом 2")
d = Dot(self.last_shoot.x, self.last_shoot.y - 1)
break
except BoardException as e:
print(e)
try:
print("стреляю рядом 3")
d = Dot(self.last_shoot.x + 1, self.last_shoot.y)
break
except BoardException as e:
print(e)
try:
print("стреляю рядом 4")
d = Dot(self.last_shoot.x - 1, self.last_shoot.y)
break
except BoardException as e:
print(e)
else:
d = Dot(randint(0, 5), randint(0, 5))
print(f"Ход компьютера: {d.x + 1} {d.y + 1}")
return d
class User(Player):
def ask(self,shoot_near):
if self.last_shoot is not None:
print("Последний выстрел игрока ", self.last_shoot.x+1,self.last_shoot.y+1)
while True:
cords = input("Ваш ход: ").split()
if len(cords) != 2:
print(" Введите 2 координаты! ")
continue
x, y = cords
if not (x.isdigit()) or not (y.isdigit()):
print(" Введите числа! ")
continue
x, y = int(x), int(y)
return Dot(x - 1, y - 1)
# После создаём наш главный класс — класс Game. Игра описывается параметрами:
#
# Игрок-пользователь, объект класса User.
# Доска пользователя.
# Игрок-компьютер, объект класса Ai.
# Доска компьютера.
# И имеет методы:
#
# random_board — метод генерирует случайную доску. Для этого мы просто пытаемся в случайные клетки изначально пустой доски расставлять корабли (в бесконечном цикле пытаемся поставить корабль в случайную току, пока наша попытка не окажется успешной). Лучше расставлять сначала длинные корабли, а потом короткие. Если было сделано много (несколько тысяч) попыток установить корабль, но это не получилось, значит доска неудачная и на неё корабль уже не добавить. В таком случае нужно начать генерировать новую доску.
# greet — метод, который в консоли приветствует пользователя и рассказывает о формате ввода.
# loop — метод с самим игровым циклом. Там мы просто последовательно вызываем метод mode для игроков и делаем проверку, сколько живых кораблей осталось на досках, чтобы определить победу.
# start — запуск игры. Сначала вызываем greet, а потом loop.
class Game:
def __init__(self, size=6):
self.size = size
choice = None
pl = None
while choice is None: # Запускаем выбор расстановки кораблей
choice = int(input("0 - случайная расстановка кораблей, 1 - раставить самостоятельно :"))
if choice == 0:
pl = self.random_board()
break
elif choice == 1:
pl = self.self_board()
break
else:
choice = None
print("Неверно выбрано значение")
co = self.random_board()
co.hid = True
self.ai = AI(co, pl)
self.us = User(pl, co)
self.all = All_board(self.us.board, self.ai.board)
def random_board(self):
board = None
while board is None:
board = self.random_place()
return board
def random_place(self):
lens = [3, 2, 2, 1, 1, 1, 1]
board = Board(size=self.size)
attempts = 0
for l in lens:
while True:
attempts += 1
if attempts > 2000:
return None
ship = Ship(Dot(randint(0, self.size), randint(0, self.size)), l, randint(0, 1))
try:
board.add_ship(ship)
break
except BoardWrongShipException:
pass
board.begin()
return board
# Даем игроку самому расставить корабли
def self_board(self):
lens = [3, 2, 2, 1, 1, 1, 1]
board = Board(size=self.size)
print("--------------------")
print("-Установите корабли-")
print(" формат ввода: x y z")
print(" x - номер строки ")
print(" y - номер столбца ")
print(" z - направление корабля (1-горизонтально, 0-вертикально)")
for l in lens:
while True:
print("-" * 20)
print("Доска пользователя:")
print(board)
bows = input(f"Введите координаты и направление для корабля длинной {l}: ").split()
if len(bows) != 3:
print(" Введите 3 значения! координтаы носа и направление ")
continue
x, y, z = bows
if not (x.isdigit()) or not (y.isdigit()) or not (z.isdigit()):
print(" Введите числа! ")
continue
x, y, z = int(x), int(y), int(z)
ship = Ship(Dot(x-1, y-1), l, z)
try:
board.add_ship(ship)
break
except BoardWrongShipException:
pass
board.begin()
return board
def greet(self):
print("-------------------")
print(" Приветсвуем вас ")
print(" в игре ")
print(" морской бой ")
print("-------------------")
print(" формат ввода: x y ")
print(" x - номер строки ")
print(" y - номер столбца ")
def loop(self):
num = 0
shoot_near = False
while True:
print("-" * 20)
# print("Доска пользователя:")
# print(self.us.board)
# print("-" * 20)
# print("Доска компьютера:")
# print(self.ai.board)
print(self.all)
if num % 2 == 0:
print("-" * 20)
print("Ходит пользователь!")
repeat = self.us.move(shoot_near)
else:
print("-" * 20)
print("Ходит компьютер!")
repeat = self.ai.move(shoot_near)
if repeat:
num -= 1
shoot_near = True
else:
shoot_near = False
if self.ai.board.count == 7:
print("-" * 20)
print("Пользователь выиграл!")
break
if self.us.board.count == 7:
print("-" * 20)
print("Компьютер выиграл!")
break
num += 1
def start(self):
self.greet()
self.loop()
# И останется просто создать экземпляр класса Game и вызвать метод start.
g = Game()
g.start() | [
"[email protected]"
] | |
bc11e7f5741884920b6652765910f0a404da24b5 | 05d11b9cda35371669195e7c07e476dfb95ccaef | /triple_net_tensorboard_random_multiGpus/multi_gpu_demo.py | 16c2d8afe76dc5e507503e631d42c7c1f7fcbd33 | [] | no_license | Continue7777/DSSM- | d32a105c033f4a8074d67c3fee56543d65622669 | af018562123cb3c81fde9b27becf0bc042bafd79 | refs/heads/master | 2021-04-15T09:33:14.255692 | 2018-04-14T16:05:19 | 2018-04-14T16:05:19 | 126,166,329 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,507 | py | # -*- coding: utf-8 -*-
from datetime import datetime
import os
import time
import tensorflow as tf
import mnist_inference
# 定义训练神经网络时需要用到的配置。这些配置与5.5节中定义的配置类似。
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.001
LEARNING_RATE_DECAY = 0.99
REGULARAZTION_RATE = 0.0001
TRAINING_STEPS = 1000
MOVING_AVERAGE_DECAY = 0.99
N_GPU = 4
# 定义日志和模型输出的路径。
MODEL_SAVE_PATH = "/path/to/logs_and_models/"
MODEL_NAME = "model.ckpt"
# 定义数据存储的路径。因为需要为不同的GPU提供不同的训练数据,所以通过placerholder
# 的方式就需要手动准备多份数据。为了方便训练数据的获取过程,可以采用第7章中介绍的输
# 入队列的方式从TFRecord中读取数据。于是在这里提供的数据文件路径为将MNIST训练数据
# 转化为TFRecords格式之后的路径。如何将MNIST数据转化为TFRecord格式在第7章中有
# 详细介绍,这里不再赘述。
DATA_PATH = "/path/to/data.tfrecords"
# 定义输入队列得到训练数据,具体细节可以参考第7章。
def get_input():
filename_queue = tf.train.string_input_producer([DATA_PATH])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
# 定义数据解析格式。
features = tf.parse_single_example(
serialized_example,
features={
'image_raw': tf.FixedLenFeature([], tf.string),
'pixels': tf.FixedLenFeature([], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
})
# 解析图片和标签信息。
decoded_image = tf.decode_raw(features['image_raw'], tf.uint8)
reshaped_image = tf.reshape(decoded_image, [784])
retyped_image = tf.cast(reshaped_image, tf.float32)
label = tf.cast(features['label'], tf.int32)
# 定义输入队列并返回。
min_after_dequeue = 10000
capacity = min_after_dequeue + 3 * BATCH_SIZE
return tf.train.shuffle_batch(
[retyped_image, label],
batch_size=BATCH_SIZE,
capacity=capacity,
min_after_dequeue=min_after_dequeue)
# 定义损失函数。对于给定的训练数据、正则化损失计算规则和命名空间,计算在这个命名空间
# 下的总损失。之所以需要给定命名空间是因为不同的GPU上计算得出的正则化损失都会加入名为
# loss的集合,如果不通过命名空间就会将不同GPU上的正则化损失都加进来。
def get_loss(x, y_, regularizer, scope):
# 沿用5.5节中定义的函数来计算神经网络的前向传播结果。
y = mnist_inference.inference(x, regularizer)
# 计算交叉熵损失。
cross_entropy = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(y, y_))
# 计算当前GPU上计算得到的正则化损失。
regularization_loss = tf.add_n(tf.get_collection('losses', scope))
# 计算最终的总损失。
loss = cross_entropy + regularization_loss
return loss
# 计算每一个变量梯度的平均值。
def average_gradients(tower_grads):
average_grads = []
# 枚举所有的变量和变量在不同GPU上计算得出的梯度。
for grad_and_vars in zip(*tower_grads):
# 计算所有GPU上的梯度平均值。
grads = []
for g, _ in grad_and_vars:
expanded_g = tf.expand_dims(g, 0)
grads.append(expanded_g)
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
# 将变量和它的平均梯度对应起来。
average_grads.append(grad_and_var)
# 返回所有变量的平均梯度,这将被用于变量更新。
return average_grads
# 主训练过程。
def main(argv=None):
# 将简单的运算放在CPU上,只有神经网络的训练过程放在GPU上。
with tf.Graph().as_default(), tf.device('/cpu:0'):
# 获取训练batch。
x, y_ = get_input()
regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
# 定义训练轮数和指数衰减的学习率。
global_step = tf.get_variable(
'global_step', [], initializer=tf.constant_initializer(0),
trainable=False)
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE, global_step, 60000 / BATCH_SIZE,
LEARNING_ RATE_DECAY)
# 定义优化方法。
opt = tf.train.GradientDescentOptimizer(learning_rate)
tower_grads = []
# 将神经网络的优化过程跑在不同的GPU上。
for i in range(N_GPU):
# 将优化过程指定在一个GPU上。
with tf.device('/gpu:%d' % i):
with tf.name_scope('GPU_%d' % i) as scope:
cur_loss = get_loss(x, y_, regularizer, scope)
# 在第一次声明变量之后,将控制变量重用的参数设置为True。这样可以
# 让不同的GPU更新同一组参数。注意tf.name_scope函数并不会影响
# tf.get_ variable的命名空间。
tf.get_variable_scope().reuse_variables()
# 使用当前GPU计算所有变量的梯度。
grads = opt.compute_gradients(cur_loss)
tower_grads.append(grads)
# 计算变量的平均梯度,并输出到TensorBoard日志中。
grads = average_gradients(tower_grads)
for grad, var in grads:
if grad is not None:
tf.histogram_summary(
'gradients_on_average/%s' % var.op.name, grad)
# 使用平均梯度更新参数。
apply_gradient_op = opt.apply_gradients(
grads, global_step=global_ step)
for var in tf.trainable_variables():
tf.histogram_summary(var.op.name, var)
# 计算变量的滑动平均值。
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(
tf.trainable_variables())
# 每一轮迭代需要更新变量的取值并更新变量的滑动平均值。
train_op = tf.group(apply_gradient_op, variables_averages_op)
saver = tf.train.Saver(tf.all_variables())
summary_op = tf.merge_all_summaries()
init = tf.initialize_all_variables()
# 训练过程。
with tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=True)) as sess:
# 初始化所有变量并启动队列。
init.run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
summary_writer = tf.train.SummaryWriter(
MODEL_SAVE_PATH, sess.graph)
for step in range(TRAINING_STEPS):
# 执行神经网络训练操作,并记录训练操作的运行时间。
start_time = time.time()
_, loss_value = sess.run([train_op, cur_loss])
duration = time.time() - start_time
# 每隔一段时间展示当前的训练进度,并统计训练速度。
if step != 0 and step % 10 == 0:
# 计算使用过的训练数据个数。因为在每一次运行训练操作时,每一个GPU
# 都会使用一个batch的训练数据,所以总共用到的训练数据个数为
# batch大小×GPU个数。
num_examples_per_step = BATCH_SIZE * N_GPU
# num_examples_per_step为本次迭代使用到的训练数据个数,
# duration为运行当前训练过程使用的时间,于是平均每秒可以处理的训
# 练数据个数为num_examples_per_step / duration。
examples_per_sec = num_examples_per_step / duration
# duration为运行当前训练过程使用的时间,因为在每一个训练过程中,
# 每一个GPU都会使用一个batch的训练数据,所以在单个batch上的训
# 练所需要时间为duration / GPU个数。
sec_per_batch = duration / N_GPU
# 输出训练信息。
format_str = ('step %d, loss = %.2f (%.1f examples/ '
' sec; %.3f sec/batch)')
print(format_str % (step, loss_value,
examples_per_sec, sec_per_batch))
# 通过TensorBoard可视化训练过程。
summary = sess.run(summary_op)
summary_writer.add_summary(summary, step)
# 每隔一段时间保存当前的模型。
if step % 1000 == 0 or (step + 1) == TRAINING_STEPS:
checkpoint_path = os.path.join(
MODEL_SAVE_PATH, MODEL_ NAME)
saver.save(sess, checkpoint_path, global_step=step)
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
tf.app.run() | [
"[email protected]"
] | |
130a1da7648c1cb9b3d0bdc2b94793d83b2e1729 | 999a7707806f941d334170e9909a268d102929b2 | /yelpCNN.py | 3057ac376eecfe679a7625817028c878379593e2 | [] | no_license | wanaaaa/yelpCNN1D | 7e089ab4ca60e3cf478a6d5b0a5a3b3e80253ba4 | 2f1f1ad9b8101d7a52f2f3c4d01d92e3f197b19b | refs/heads/main | 2023-02-12T20:54:31.046391 | 2021-01-10T18:12:19 | 2021-01-10T18:12:19 | 328,447,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | # https://chriskhanhtran.github.io/posts/cnn-sentence-classification/
from functionClass import *
from gensim.models import Word2Vec
import torch
import torch.optim as optim
device = 'cuda'
rateReviewTrainList, rateReviewTestList, maxListCount = dataRead()
xyDataLoader = DataLoaderFun(rateReviewTrainList, maxListCount, batchSize=2500)
textCNNmodel = trainFun(xyDataLoader, maxListCount, epochs=20)
# textCNNmodel = TextCnn(maxListCount).cuda(device=device)
textCNNmodel = TextCnn(maxListCount).cpu()
textCNNmodel.load_state_dict(torch.load('traindTextCNNmodel.model'))
textCNNmodel.eval()
# ================================================
# ================================================
# ================================================
xyTestDataLoader = DataLoaderFun(rateReviewTestList, maxListCount, batchSize=1)
for epoch in range(1):
# print("num of epochs->", epoch)
for step, batch in enumerate(xyTestDataLoader):
x_test, y_test = tuple(t.to('cpu') for t in batch)
y_pridict = textCNNmodel(x_test)
print("y_pridict->", y_pridict, 'y_test->', y_test)
# break
torch.cuda.empty_cache() | [
"[email protected]"
] | |
5dbe47764578bd0bad972363605507b01fd8cdfa | 12cdef3d9de846ac1c430f606bf862ecda6e2345 | /attractions/__init__.py | 4be87a5da7791f1c059468e21ff1aacb5221f3c6 | [] | no_license | kirksudduth/petting_zoo | 45865109dbc9c40fb54fd92cd7fac7b3809cbcd0 | ce9fb52ca0aff0cb640a2041b3996156f8bb8ca1 | refs/heads/master | 2022-11-20T19:22:15.611061 | 2020-07-21T20:21:55 | 2020-07-21T20:21:55 | 279,920,616 | 0 | 0 | null | 2020-07-21T20:21:56 | 2020-07-15T16:30:02 | Python | UTF-8 | Python | false | false | 285 | py | from .attraction import Attraction
from .petting_zoo import Petting_zoo
from .snake_pit import Snake_pit
from .wetlands import Wetlands
from .attractions_instances import creature_culdesac
from .attractions_instances import no_feet_knoll
from .attractions_instances import swimmy_jazz
| [
"[email protected]"
] | |
ac0bc0f07ccc5cf690d123d9225d15656bbe59e7 | 4c7aac98eff82b6dc82334755096df5ad00237e6 | /Python/menu.py | 66e3ba4c5b15a961c7e3ea0fd84e0ebe95f018a3 | [] | no_license | HolbertonSchoolTun/HackDay_mastermind | 05fe07993f322384a1c2c644c7ad80441161ef8e | 92c5bbb0d01bae8dfaae3015195db6f33942c5a5 | refs/heads/master | 2022-12-24T04:42:43.966128 | 2020-09-19T02:35:39 | 2020-09-19T02:35:39 | 296,698,599 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 895 | py | #!/usr/bin/python3
"""
"""
import pygame
import pygame_menu
from main import start_game
class Game():
pygame.init()
surface = pygame.display.set_mode((450, 600))
def set_difficulty(value, difficulty):
if value == 1:
return(1)
else:
return (2)
def start_the_game():
# Do the job here !
start_game()
def Play_Mode(mode, value):
pass
pygame.display.set_caption("Mastermind")
menu = pygame_menu.Menu(600, 450, 'MasterMind',
theme=pygame_menu.themes.THEME_DARK)
menu.add_selector('Difficulty : ', [('Hard', 1), ('Easy', 2)], onchange=set_difficulty)
menu.add_selector('Play Mode : ', [('Single Player', 1), ('Two Players', 2)], onchange=Play_Mode)
menu.add_button('Play', start_the_game)
menu.add_button('Quit', pygame_menu.events.EXIT)
menu.mainloop(surface) | [
"achrefbs"
] | achrefbs |
530f4767b7bb69cd945bd97def72737f1ad66039 | 7da328d5365788bec00b62e3c3de8b5133fba092 | /impala/tests/test_impala.py | 8c58516171a9ff74ed847675759c70ca285b5840 | [
"Apache-2.0"
] | permissive | attilajeges/impyla | f7520677e426f42e60ecf9199d8dacd38eae1b99 | 35297fd573bd8d8984f89eec91f12dbb1837549a | refs/heads/master | 2023-07-15T17:15:48.683389 | 2020-10-01T23:10:16 | 2020-10-01T23:10:16 | 260,346,345 | 0 | 0 | Apache-2.0 | 2020-05-01T00:18:06 | 2020-05-01T00:18:06 | null | UTF-8 | Python | false | false | 2,025 | py | # Copyright 2019 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from pytest import yield_fixture
BIGGER_TABLE_NUM_ROWS = 100
@yield_fixture(scope='module')
def bigger_table(cur):
table_name = 'tmp_bigger_table'
ddl = """CREATE TABLE {0} (s string)
STORED AS PARQUET""".format(table_name)
cur.execute(ddl)
dml = """INSERT INTO {0}
VALUES {1}""".format(table_name,
",".join(["('row{0}')".format(i) for i in xrange(BIGGER_TABLE_NUM_ROWS)]))
# Disable codegen and expr rewrites so query runs faster.
cur.execute("set disable_codegen=1")
cur.execute("set enable_expr_rewrites=0")
cur.execute(dml)
try:
yield table_name
finally:
cur.execute("DROP TABLE {0}".format(table_name))
def test_has_more_rows(cur, bigger_table):
"""Test that impyla correctly handles empty row batches returned with the
hasMoreRows flag."""
# Set the fetch timeout very low and add sleeps so that Impala will return
# empty batches. Run on a single node with a single thread to make as predictable
# as possible.
cur.execute("set fetch_rows_timeout_ms=1")
cur.execute("set num_nodes=1")
cur.execute("set mt_dop=1")
cur.execute("""select *
from {0}
where s != cast(sleep(2) as string)""".format(bigger_table))
expected_rows = [("row{0}".format(i),) for i in xrange(BIGGER_TABLE_NUM_ROWS)]
assert sorted(cur.fetchall()) == sorted(expected_rows)
| [
"[email protected]"
] | |
2be33a204326b77eed20224274574b433213be6a | 73501b9e3623c3a9338306dbe52d1d89700f3d91 | /upload_this_on_arduino/pyduino.py | 2e4bf4eb623b2c987b4a395798e2605767cf5739 | [] | no_license | rouanro/PS | 72af2d8f5f3d1c628b8ad599c244235781b04c61 | a474d5ac9d23d50388c1811ddf256efa408b33d6 | refs/heads/master | 2020-03-18T21:57:12.402332 | 2018-05-29T15:19:15 | 2018-05-29T15:19:15 | 135,315,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,357 | py | """
A library to interface Arduino through serial connection
"""
import serial
import smtplib
from email.message import EmailMessage
class Arduino():
"""
Models an Arduino connection
"""
def __init__(self, serial_port='/dev/ttyACM0', baud_rate=9600,
read_timeout=5):
"""
Initializes the serial connection to the Arduino board
"""
self.conn = serial.Serial(serial_port, baud_rate)
self.conn.timeout = read_timeout # Timeout for readline()
def set_pin_mode(self, pin_number, mode):
"""
Performs a pinMode() operation on pin_number
Internally sends b'M{mode}{pin_number} where mode could be:
- I for INPUT
- O for OUTPUT
- P for INPUT_PULLUP MO13
"""
# command = (''.join(('M',mode,str(pin_number)))).encode()
#print 'set_pin_mode =',command,(''.join(('M',mode,str(pin_number))))
# self.conn.write(command)
def digital_read(self, pin_number):
"""
Performs a digital read on pin_number and returns the value (1 or 0)
Internally sends b'RD{pin_number}' over the serial connection
"""
command = (''.join(('RD', str(pin_number)))).encode()
#self.conn.write(command)
line_received = self.conn.readline().decode().strip()
header, value = line_received.split(':') # e.g. D13:1
if header == ('D'+ str(pin_number)):
# If header matches
return int(value)
def digital_write(self, pin_number, digital_value):
"""
Writes the digital_value on pin_number
Internally sends b'WD{pin_number}:{digital_value}' over the serial
connection
"""
command = (''.join(('WD', str(pin_number), ':',
str(digital_value)))).encode()
#self.conn.write(command)
def analog_read(self, pin_number):
"""
Performs an analog read on pin_number and returns the value (0 to 1023)
Internally sends b'RA{pin_number}' over the serial connection
"""
command = (''.join(('RA', str(pin_number)))).encode()
self.conn.write(command)
print(command)
line_received = self.conn.readline().decode().strip()
#header, value = line_received.split(':') # e.g. A4:1
if line_received[0:2] == ("A0"):
value = line_received[3:]
# If header matches
return int(value)
if line_received[0:2] == ("A4"):
value = line_received[3:]
return value
# me == the sender's email address
# you == the recipient's email address
# msg = EmailMessage()
# msg['Subject'] = 'Teeeeeeeeeeest'
# msg['From'] = '[email protected]'
# msg['To'] = '[email protected]'
# Send the message via our own SMTP server.
# s = smtplib.SMTP('localhost')
# s.send_message(msg)
# s.quit()
def analog_write(self, pin_number, analog_value):
"""
Writes the analog value (0 to 255) on pin_number
Internally sends b'WA{pin_number}:{analog_value}' over the serial
connection
"""
command = (''.join(('WA', str(pin_number), ':',
str(analog_value)))).encode()
#self.conn.write(command)
def send_message(self, message):
command = message.encode()
self.conn.write(command)
def send_email(self, user, pwd, recipient, subject, body):
FROM = user
TO = recipient if isinstance(recipient, list) else [recipient]
SUBJECT = subject
TEXT = body
# Prepare actual message
message = """From: %s\nTo: %s\nSubject: %s\n\n%s
""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
try:
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login(user, pwd)
server.sendmail(FROM, TO, message)
server.close()
print('successfully sent the mail')
except:
print("failed to send mail")
def close(self):
"""
To ensure we are properly closing our connection to the
Arduino device.
"""
self.conn.close()
print ('Connection to Arduino closed')
| [
"[email protected]"
] | |
f031555a692495a482d208cf6100105e71ac4dbc | 79b38e6dad187bed26039f77611cc3feb7d75c1a | /issegm1/solve_ST.py | 70e0b3b0d45f420e221a8fc3e8d48bb954d43064 | [] | no_license | engrjavediqbal/MLSL | aa362c04a47b2bc921331bbb47dd4fe15bdb4bbe | 94ac81096fd6ba2c85352807dc93f6a6b6cc472d | refs/heads/master | 2023-08-04T11:22:13.335469 | 2023-07-25T13:55:41 | 2023-07-25T13:55:41 | 209,766,533 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,503 | py | from __future__ import print_function
from sklearn.datasets import fetch_mldata
import logging
import copy
from datetime import datetime
import argparse
import cPickle
import os
import os.path as osp
import re
import sys
import math
import time
from functools import partial
from PIL import Image
from multiprocessing import Pool
from sklearn.metrics import log_loss
import numpy as np
import mxnet as mx
import scipy.io
from util1 import mxutil
from util1 import transformer as ts
from util1 import util
from util1.lr_scheduler import FixedScheduler, LinearScheduler, PolyScheduler
from data1 import FileIter, make_divisible
#from data_src import FileIter, make_divisible, parse_split_file
def parse_split_file_tgt(dataset_tgt, split_tgt, data_root=''):
split_filename = 'issegm1/data_list/{}/{}.lst'.format(dataset_tgt, split_tgt)
image_list = []
label_gt_list = []
image_data_list = []
with open(split_filename) as f:
for item in f.readlines():
fields = item.strip().split('\t')
image_list.append(os.path.join(data_root, fields[0]))
image_data_list.append(fields[0])
label_gt_list.append(os.path.join(data_root, fields[1]))
return image_list, label_gt_list,image_data_list
def parse_model_label(args):
assert args.model is not None
fields = [_.strip() for _ in osp.basename(args.model).split('_')]
# parse fields
i = 0
num_fields = len(fields)
# database
dataset = fields[i] if args.dataset is None else args.dataset
dataset_tgt = args.dataset_tgt
i += 1
######################## network structure
assert fields[i].startswith('rn')
net_type = re.compile('rn[a-z]*').findall(fields[i])[0]
net_name = fields[i][len(net_type):].strip('-')
i += 1
# number of classes
assert fields[i].startswith('cls')
classes = int(fields[i][len('cls'):])
i += 1
######################## feature resolution
#feat_stride = 32
feat_stride = 8
if i < num_fields and fields[i].startswith('s'):
feat_stride = int(fields[i][len('s'):])
i += 1
# learning rate
lr_params = {
'type': 'fixed',
'base': 0.1,
'args': None,
}
if args.base_lr is not None:
lr_params['base'] = args.base_lr
if args.lr_type in ('linear',):
lr_params['type'] = args.lr_type
elif args.lr_type in ('poly',):
lr_params['type'] = args.lr_type
elif args.lr_type == 'step':
lr_params['args'] = {'step': [int(_) for _ in args.lr_steps.split(',')],
'factor': 0.1}
model_specs = {
# model
'lr_params': lr_params,
'net_type': net_type,
'net_name': net_name,
'classes': classes,
'feat_stride': feat_stride,
# data
'dataset': dataset,
'dataset_tgt': dataset_tgt
}
return model_specs
def parse_args():
parser = argparse.ArgumentParser(description='Tune FCRNs from ResNets.')
parser.add_argument('--dataset', default=None,
help='The source dataset to use, e.g. cityscapes, voc.')
parser.add_argument('--dataset-tgt', dest='dataset_tgt', default=None,
help='The target dataset to use, e.g. cityscapes, GM.')
parser.add_argument('--split', dest='split', default='train',
help='The split to use, e.g. train, trainval.')
parser.add_argument('--split-tgt', dest='split_tgt', default='val',
help='The split to use in target domain e.g. train, trainval.')
parser.add_argument('--data-root', dest='data_root',
help='The root data dir. for source domain',
default=None, type=str)
parser.add_argument('--data-root-tgt', dest='data_root_tgt',
help='The root data dir. for target domain',
default=None, type=str)
parser.add_argument('--output', default=None,
help='The output dir.')
parser.add_argument('--model', default=None,
help='The unique label of this model.')
parser.add_argument('--batch-images', dest='batch_images',
help='The number of images per batch.',
default=None, type=int)
parser.add_argument('--crop-size', dest='crop_size',
help='The size of network input during training.',
default=None, type=int)
parser.add_argument('--origin-size', dest='origin_size',
help='The size of images to crop from in source domain',
default=2048, type=int)
parser.add_argument('--origin-size-tgt', dest='origin_size_tgt',
help='The size of images to crop from in target domain',
default=2048, type=int)
parser.add_argument('--scale-rate-range', dest='scale_rate_range',
help='The range of rescaling',
default='0.7,1.3', type=str)
parser.add_argument('--weights', default=None,
help='The path of a pretrained model.')
parser.add_argument('--gpus', default='0',
help='The devices to use, e.g. 0,1,2,3')
#
parser.add_argument('--lr-type', dest='lr_type',
help='The learning rate scheduler, e.g., fixed(default)/step/linear',
default=None, type=str)
parser.add_argument('--base-lr', dest='base_lr',
help='The lr to start from.',
default=None, type=float)
parser.add_argument('--lr-steps', dest='lr_steps',
help='The steps when to reduce lr.',
default=None, type=str)
parser.add_argument('--weight-decay', dest='weight_decay',
help='The weight decay in sgd.',
default=0.0005, type=float)
#
parser.add_argument('--from-epoch', dest='from_epoch',
help='The epoch to start from.',
default=None, type=int)
parser.add_argument('--stop-epoch', dest='stop_epoch',
help='The index of epoch to stop.',
default=None, type=int)
parser.add_argument('--to-epoch', dest='to_epoch',
help='The number of epochs to run.',
default=None, type=int)
# how many rounds to generate pseudo labels
parser.add_argument('--idx-round', dest='idx_round',
help='The current number of rounds to generate pseudo labels',
default=0, type=int)
# initial portion of selected pseudo labels in target domain
parser.add_argument('--init-tgt-port', dest='init_tgt_port',
help='The initial portion of pixels selected in target dataset, both by global and class-wise threshold',
default=0.3, type=float)
parser.add_argument('--init-src-port', dest='init_src_port',
help='The initial portion of images selected in source dataset',
default=0.3, type=float)
parser.add_argument('--seed-int', dest='seed_int',
help='The random seed',
default=0, type=int)
parser.add_argument('--mine-port', dest='mine_port',
help='The portion of data being mined',
default=0.5, type=float)
#
parser.add_argument('--mine-id-number', dest='mine_id_number',
help='Thresholding value for deciding mine id',
default=3, type=int)
parser.add_argument('--mine-thresh', dest='mine_thresh',
help='The threshold to determine the mine id',
default=0.001, type=float)
parser.add_argument('--mine-id-address', dest='mine_id_address',
help='The address of mine id',
default=None, type=str)
#
parser.add_argument('--phase',
help='Phase of this call, e.g., train/val.',
default='train', type=str)
parser.add_argument('--with-prior', dest='with_prior',
help='with prior',
default='True', type=str)
# for testing
parser.add_argument('--test-scales', dest='test_scales',
help='Lengths of the longer side to resize an image into, e.g., 224,256.',
default=None, type=str)
parser.add_argument('--test-flipping', dest='test_flipping',
help='If average predictions of original and flipped images.',
default=False, action='store_true')
parser.add_argument('--test-steps', dest='test_steps',
help='The number of steps to take, for predictions at a higher resolution.',
default=1, type=int)
#
parser.add_argument('--kvstore', dest='kvstore',
help='The type of kvstore, e.g., local/device.',
default='local', type=str)
parser.add_argument('--prefetch-threads', dest='prefetch_threads',
help='The number of threads to fetch data.',
default=1, type=int)
parser.add_argument('--prefetcher', dest='prefetcher',
help='The type of prefetercher, e.g., process/thread.',
default='thread', type=str)
parser.add_argument('--cache-images', dest='cache_images',
help='If cache images, e.g., 0/1',
default=None, type=int)
parser.add_argument('--log-file', dest='log_file',
default=None, type=str)
parser.add_argument('--check-start', dest='check_start',
help='The first epoch to snapshot.',
default=1, type=int)
parser.add_argument('--check-step', dest='check_step',
help='The steps between adjacent snapshots.',
default=4, type=int)
parser.add_argument('--debug',
help='True means logging debug info.',
default=False, action='store_true')
parser.add_argument('--backward-do-mirror', dest='backward_do_mirror',
help='True means less gpu memory usage.',
default=False, action='store_true')
parser.add_argument('--no-cudnn', dest='no_mxnet_cudnn_autotune_default',
help='True means deploy cudnn.',
default=False, action='store_true')
parser.add_argument('--kc-policy', dest='kc_policy',
help='The kc determination policy, currently only "global" and "cb" (class-balanced)',
default='cb', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.debug:
os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'
if args.backward_do_mirror:
os.environ['MXNET_BACKWARD_DO_MIRROR'] = '1'
if args.no_mxnet_cudnn_autotune_default:
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
if args.output is None:
if args.phase == 'val':
args.output = osp.dirname(args.weights)
else:
args.output = 'output'
if args.weights is not None:
if args.model is None:
assert '_ep-' in args.weights
parts = osp.basename(args.weights).split('_ep-')
args.model = '_'.join(parts[:-1])
if args.phase == 'train':
if args.from_epoch is None:
assert '_ep-' in args.weights
parts = os.path.basename(args.weights).split('_ep-')
assert len(parts) == 2
from_model = parts[0]
if from_model == args.model:
parts = os.path.splitext(os.path.basename(args.weights))[0].split('-')
args.from_epoch = int(parts[-1])
if args.model is None:
raise NotImplementedError('Missing argument: args.model')
if args.from_epoch is None:
args.from_epoch = 0
if args.log_file is None:
if args.phase == 'train':
args.log_file = '{}.log'.format(args.model)
elif args.phase == 'val':
suffix = ''
if args.split_tgt != 'val':
suffix = '_{}'.format(args.split_tgt)
args.log_file = '{}{}.log'.format(osp.splitext(osp.basename(args.weights))[0], suffix)
else:
raise NotImplementedError('Unknown phase: {}'.format(args.phase))
model_specs = parse_model_label(args)
if args.data_root is None:
args.data_root = osp.join('data', model_specs['dataset'])
return args, model_specs
def get_dataset_specs_tgt(args, model_specs):
dataset = args.dataset
dataset_tgt = args.dataset_tgt
meta = {}
mine_id = None
mine_id_priority = None
mine_port = args.mine_port
mine_th = args.mine_thresh
cmap_path = 'data/shared/cmap.pkl'
cache_images = args.phase == 'train'
mx_workspace = 1650
sys.path.insert(0, 'data/cityscapesscripts/helpers')
if args.phase == 'train':
mine_id = np.load(args.mine_id_address + '/mine_id.npy')
mine_id_priority = np.load(args.mine_id_address + '/mine_id_priority.npy')
mine_th = np.zeros(len(mine_id)) # trainId starts from 0
if dataset == 'gta' and dataset_tgt == 'cityscapes':
from labels import id2label, trainId2label
#
label_2_id_tgt = 255 * np.ones((256,))
for l in id2label:
if l in (-1, 255):
continue
label_2_id_tgt[l] = id2label[l].trainId
id_2_label_tgt = np.array([trainId2label[_].id for _ in trainId2label if _ not in (-1, 255)])
valid_labels_tgt = sorted(set(id_2_label_tgt.ravel()))
id_2_label_src = id_2_label_tgt
label_2_id_src = label_2_id_tgt
valid_labels_src = valid_labels_tgt
#
cmap = np.zeros((256, 3), dtype=np.uint8)
for i in id2label.keys():
cmap[i] = id2label[i].color
#
ident_size = True
#
#max_shape_src = np.array((1052, 1914))
max_shape_src = np.array((1024, 2048))
max_shape_tgt = np.array((1024, 2048))
#
if args.split in ('train+', 'trainval+'):
cache_images = False
#
if args.phase in ('val',):
mx_workspace = 8192
elif dataset == 'synthia' and dataset_tgt == 'cityscapes':
from labels_cityscapes_synthia import id2label as id2label_tgt
from labels_cityscapes_synthia import trainId2label as trainId2label_tgt
from labels_synthia import id2label as id2label_src
label_2_id_src = 255 * np.ones((256,))
for l in id2label_src:
if l in (-1, 255):
continue
label_2_id_src[l] = id2label_src[l].trainId
label_2_id_tgt = 255 * np.ones((256,))
for l in id2label_tgt:
if l in (-1, 255):
continue
label_2_id_tgt[l] = id2label_tgt[l].trainId
id_2_label_tgt = np.array([trainId2label_tgt[_].id for _ in trainId2label_tgt if _ not in (-1, 255)])
valid_labels_tgt = sorted(set(id_2_label_tgt.ravel()))
id_2_label_src = None
valid_labels_src = None
#
cmap = np.zeros((256, 3), dtype=np.uint8)
for i in id2label_tgt.keys():
cmap[i] = id2label_tgt[i].color
#
ident_size = True
#
max_shape_src = np.array((760, 1280))
max_shape_tgt = np.array((1024, 2048))
#
if args.split in ('train+', 'trainval+'):
cache_images = False
#
if args.phase in ('val',):
mx_workspace = 8192
else:
raise NotImplementedError('Unknow dataset: {}'.format(args.dataset))
if cmap is None and cmap_path is not None:
if osp.isfile(cmap_path):
with open(cmap_path) as f:
cmap = cPickle.load(f)
meta['gpus'] = args.gpus
meta['mine_port'] = mine_port
meta['mine_id'] = mine_id
meta['mine_id_priority'] = mine_id_priority
meta['mine_th'] = mine_th
meta['label_2_id_tgt'] = label_2_id_tgt
meta['id_2_label_tgt'] = id_2_label_tgt
meta['valid_labels_tgt'] = valid_labels_tgt
meta['label_2_id_src'] = label_2_id_src
meta['id_2_label_src'] = id_2_label_src
meta['valid_labels_src'] = valid_labels_src
meta['cmap'] = cmap
meta['ident_size'] = ident_size
meta['max_shape_src'] = meta.get('max_shape_src', max_shape_src)
meta['max_shape_tgt'] = meta.get('max_shape_tgt', max_shape_tgt)
meta['cache_images'] = args.cache_images if args.cache_images is not None else cache_images
meta['mx_workspace'] = mx_workspace
return meta
'''def _get_metric():
def _eval_func(label, pred):
# global sxloss
gt_label = label.ravel()
valid_flag = gt_label != 255
labels = gt_label[valid_flag].astype(int)
n,c,h,w = pred.shape
valid_inds = np.where(valid_flag)[0]
probmap = np.rollaxis(pred.astype(np.float32),1).reshape((c, -1))
valid_probmap = probmap[labels, valid_inds]
log_valid_probmap = -np.log(valid_probmap+1e-32)
sum_metric = log_valid_probmap.sum()
num_inst = valid_flag.sum()
return (sum_metric, num_inst + (num_inst == 0))
return mx.metric.CustomMetric(_eval_func, 'loss')'''
class Multi_Accuracy(mx.metric.EvalMetric):
"""Calculate accuracies of multi label"""
def __init__(self, num=None):
self.num = num
super(Multi_Accuracy, self).__init__('multi-accuracy')
def reset(self):
"""Resets the internal evaluation result to initial state."""
self.num_inst = 0 if self.num is None else [0] * self.num
self.sum_metric = 0.0 if self.num is None else [0.0] * self.num
def update(self, labels, preds):
mx.metric.check_label_shapes(labels, preds)
if self.num is not None:
assert len(labels) == self.num
for i in range(len(labels)):
#print ('I am here in accuracy')
#pred_label = mx.nd.argmax_channel(preds[i]).asnumpy().astype('int32')
pred_label = preds[i].asnumpy().astype('float')
label = labels[i].asnumpy().astype('int32')
mx.metric.check_label_shapes(label, pred_label)
if self.num is None:
#self.sum_metric += (pred_label.flat == label.flat).sum()
#self.num_inst += len(pred_label.flat)
outEval = _eval_func(label, pred_label)
self.sum_metric = outEval[0]
self.num_inst = outEval[1]
else:
if i==0:
outEval = _eval_func(label, pred_label)
self.sum_metric[i] = outEval[0]
self.num_inst[i] = outEval[1]
else:
#self.sum_metric[i] = (pred_label.flat == label.flat).sum()
#print(label.shape, pred_label.shape, label, pred_label)
#self.sum_metric[i] = log_loss(label.flat, pred_label.flat)
self.sum_metric[i] = cross_entropy(label.flatten(), pred_label.flatten())
self.num_inst[i] = len(pred_label.flat)
#print self.sum_metric[i], self.num_inst[i]
def get(self):
"""Gets the current evaluation result.
Returns
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self.num is None:
return super(Multi_Accuracy, self).get()
else:
return zip(*(('%s-task%d'%(self.name, i), float('nan') if self.num_inst[i] == 0
else self.sum_metric[i] / self.num_inst[i])
for i in range(self.num)))
def get_name_value(self):
"""Returns zipped name and value pairs.
Returns
-------
list of tuples
A (name, value) tuple list.
"""
if self.num is None:
return super(Multi_Accuracy, self).get_name_value()
name, value = self.get()
return list(zip(name, value))
def _eval_func(label, pred):
# global sxloss
gt_label = label.ravel()
valid_flag = gt_label != 255
labels = gt_label[valid_flag].astype(int)
n,c,h,w = pred.shape
valid_inds = np.where(valid_flag)[0]
probmap = np.rollaxis(pred.astype(np.float32),1).reshape((c, -1))
valid_probmap = probmap[labels, valid_inds]
log_valid_probmap = -np.log(valid_probmap+1e-32)
sum_metric = log_valid_probmap.sum()
num_inst = valid_flag.sum()
return (sum_metric, num_inst + (num_inst == 0))
def cross_entropy(targets, predictions):
N = predictions.shape[0]
lo = np.log(predictions+ 1e-6)
#print predictions,lo
ce = -np.sum(targets*lo)/N
return ce
def _get_scalemeanstd():
if model_specs['net_type'] in ('rna',):
return (1.0 / 255,
np.array([0.485, 0.456, 0.406]).reshape((1, 1, 3)),
np.array([0.229, 0.224, 0.225]).reshape((1, 1, 3)))
return None, None, None
def _get_transformer_image():
scale, mean_, std_ = _get_scalemeanstd()
transformers = []
if scale > 0:
transformers.append(ts.ColorScale(np.single(scale)))
transformers.append(ts.ColorNormalize(mean_, std_))
return transformers
def _get_module(args, margs, dargs, net=None):
if net is None:
# the following lines show how to create symbols for our networks
if model_specs['net_type'] == 'rna':
from util1.symbol.symbol import cfg as symcfg
symcfg['lr_type'] = 'alex'
symcfg['workspace'] = dargs.mx_workspace
symcfg['bn_use_global_stats'] = True
if model_specs['net_name'] == 'a1':
from util1.symbol.resnet_v2 import fcrna_model_a1, fcrna_model_a1_1
#net = fcrna_model_a1(margs.classes, margs.feat_stride, bootstrapping=False)
net = fcrna_model_a1_1(margs.classes, margs.feat_stride, bootstrapping=False)
if net is None:
raise NotImplementedError('Unknown network: {}'.format(vars(margs)))
contexts = [mx.gpu(int(_)) for _ in args.gpus.split(',')]
#mod = mx.mod.Module(net, context=contexts)
mod = mx.mod.Module(net, context=contexts, label_names=['softmax_label', 'sigmoid_label'])
return mod
def _make_dirs(path):
if not osp.isdir(path):
os.makedirs(path)
def facc(label, pred):
pred = pred.argmax(1).ravel()
label = label.ravel()
return (pred == label).mean()
def fentropy(label, pred):
pred_source = pred[:, 1, :, :].ravel()
label = label.ravel()
return -(label * np.log(pred_source + 1e-12) + (1. - label) * np.log(1. - pred_source + 1e-12)).mean()
def _interp_preds_as_impl(num_classes, im_size, pred_stride, imh, imw, pred):
imh0, imw0 = im_size
pred = pred.astype(np.single, copy=False)
input_h, input_w = pred.shape[0] * pred_stride, pred.shape[1] * pred_stride
assert pred_stride >= 1.
this_interp_pred = np.array(Image.fromarray(pred).resize((input_w, input_h), Image.CUBIC))
if imh0 == imh:
interp_pred = this_interp_pred[:imh, :imw]
else:
interp_method = util.get_interp_method(imh, imw, imh0, imw0)
interp_pred = np.array(Image.fromarray(this_interp_pred[:imh, :imw]).resize((imw0, imh0), interp_method))
return interp_pred
def interp_preds_as(im_size, net_preds, pred_stride, imh, imw, threads=4):
num_classes = net_preds.shape[0]
worker = partial(_interp_preds_as_impl, num_classes, im_size, pred_stride, imh, imw)
if threads == 1:
ret = [worker(_) for _ in net_preds]
else:
pool = Pool(threads)
ret = pool.map(worker, net_preds)
pool.close()
return np.array(ret)
class ScoreUpdater(object):
def __init__(self, valid_labels, c_num, x_num, logger=None, label=None, info=None):
self._valid_labels = valid_labels
self._confs = np.zeros((c_num, c_num, x_num))
self._pixels = np.zeros((c_num, x_num))
self._logger = logger
self._label = label
self._info = info
@property
def info(self):
return self._info
def reset(self):
self._start = time.time()
self._computed = np.zeros((self._pixels.shape[1],))
self._confs[:] = 0
self._pixels[:] = 0
@staticmethod
def calc_updates(valid_labels, pred_label, label):
num_classes = len(valid_labels)
pred_flags = [set(np.where((pred_label == _).ravel())[0]) for _ in valid_labels]
class_flags = [set(np.where((label == _).ravel())[0]) for _ in valid_labels]
conf = [len(class_flags[j].intersection(pred_flags[k])) for j in xrange(num_classes) for k in
xrange(num_classes)]
pixel = [len(class_flags[j]) for j in xrange(num_classes)]
return np.single(conf).reshape((num_classes, num_classes)), np.single(pixel)
def do_updates(self, conf, pixel, i, computed=True):
if computed:
self._computed[i] = 1
self._confs[:, :, i] = conf
self._pixels[:, i] = pixel
def update(self, pred_label, label, i, computed=True):
conf, pixel = ScoreUpdater.calc_updates(self._valid_labels, pred_label, label)
self.do_updates(conf, pixel, i, computed)
self.scores(i)
def scores(self, i=None, logger=None):
confs = self._confs
pixels = self._pixels
num_classes = pixels.shape[0]
x_num = pixels.shape[1]
class_pixels = pixels.sum(1)
class_pixels += class_pixels == 0
scores = confs[xrange(num_classes), xrange(num_classes), :].sum(1)
acc = scores.sum() / pixels.sum()
cls_accs = scores / class_pixels
class_preds = confs.sum(0).sum(1)
ious = scores / (class_pixels + class_preds - scores)
logger = self._logger if logger is None else logger
if logger is not None:
if i is not None:
speed = 1. * self._computed.sum() / (time.time() - self._start)
logger.info('Done {}/{} with speed: {:.2f}/s'.format(i + 1, x_num, speed))
name = '' if self._label is None else '{}, '.format(self._label)
logger.info('{}pixel acc: {:.2f}%, mean acc: {:.2f}%, mean iou: {:.2f}%'. \
format(name, acc * 100, cls_accs.mean() * 100, ious.mean() * 100))
with util.np_print_options(formatter={'float': '{:5.2f}'.format}):
logger.info('\n{}'.format(cls_accs * 100))
logger.info('\n{}'.format(ious * 100))
return acc, cls_accs, ious
def overall_scores(self, logger=None):
acc, cls_accs, ious = self.scores(None, logger)
return acc, cls_accs.mean(), ious.mean()
def _train_impl(args, model_specs, logger):
if len(args.output) > 0:
_make_dirs(args.output)
# dataiter
dataset_specs_tgt = get_dataset_specs_tgt(args, model_specs)
scale, mean_, _ = _get_scalemeanstd()
if scale > 0:
mean_ /= scale
margs = argparse.Namespace(**model_specs)
dargs = argparse.Namespace(**dataset_specs_tgt)
# number of list_lines
split_filename = 'issegm1/data_list/{}/{}.lst'.format(margs.dataset, args.split)
num_source = 0
with open(split_filename) as f:
for item in f.readlines():
num_source = num_source + 1
#
batches_per_epoch = num_source // args.batch_images
# optimizer
assert args.to_epoch is not None
if args.stop_epoch is not None:
assert args.stop_epoch > args.from_epoch and args.stop_epoch <= args.to_epoch
else:
args.stop_epoch = args.to_epoch
from_iter = args.from_epoch * batches_per_epoch
to_iter = args.to_epoch * batches_per_epoch
lr_params = model_specs['lr_params']
base_lr = lr_params['base']
if lr_params['type'] == 'fixed':
scheduler = FixedScheduler()
elif lr_params['type'] == 'step':
left_step = []
for step in lr_params['args']['step']:
if from_iter > step:
base_lr *= lr_params['args']['factor']
continue
left_step.append(step - from_iter)
model_specs['lr_params']['step'] = left_step
scheduler = mx.lr_scheduler.MultiFactorScheduler(**lr_params['args'])
elif lr_params['type'] == 'linear':
scheduler = LinearScheduler(updates=to_iter + 1, frequency=50,
stop_lr=min(base_lr / 100., 1e-6),
offset=from_iter)
elif lr_params['type'] == 'poly':
scheduler = PolyScheduler(updates=to_iter + 1, frequency=50,
stop_lr=min(base_lr / 100., 1e-8),
power=0.9,
offset=from_iter)
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type='in', magnitude=2)
optimizer_params = {
'learning_rate': base_lr,
'momentum': 0.9,
'wd': args.weight_decay,
'lr_scheduler': scheduler,
'rescale_grad': 1.0 / len(args.gpus.split(',')),
}
data_src_port = args.init_src_port
data_src_num = int(num_source * data_src_port)
mod = _get_module(args, margs, dargs)
addr_weights = args.weights # first weights should be xxxx_ep-0000.params!
addr_output = args.output
# initializer
net_args = None
net_auxs = None
###
if addr_weights is not None:
net_args, net_auxs = mxutil.load_params_from_file(addr_weights)
print ('feat_stride', margs.feat_stride)
####################################### training model
to_model = osp.join(addr_output, str(args.idx_round), '{}_ep'.format(args.model))
dataiter = FileIter(dataset=margs.dataset,
split=args.split,
data_root=args.data_root,
num_sel_source=data_src_num,
num_source=num_source,
seed_int=args.seed_int,
dataset_tgt=args.dataset_tgt,
split_tgt=args.split_tgt,
data_root_tgt=args.data_root_tgt,
sampler='random',
batch_images=args.batch_images,
meta=dataset_specs_tgt,
rgb_mean=mean_,
feat_stride=margs.feat_stride,
label_stride=margs.feat_stride,
origin_size=args.origin_size,
origin_size_tgt=args.origin_size_tgt,
crop_size=args.crop_size,
scale_rate_range=[float(_) for _ in args.scale_rate_range.split(',')],
transformer=None,
transformer_image=ts.Compose(_get_transformer_image()),
prefetch_threads=args.prefetch_threads,
prefetcher_type=args.prefetcher,
)
dataiter.reset()
#ad = dataiter.next()
#label_shapes = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in label_shapes]
#print (ad)
mod.fit(
dataiter,
eval_metric=Multi_Accuracy(2),
#eval_metric=_get_metric(),
batch_end_callback=mx.callback.log_train_metric(10, auto_reset=False),
epoch_end_callback=mx.callback.do_checkpoint(to_model),
kvstore=args.kvstore,
optimizer='sgd',
optimizer_params=optimizer_params,
initializer=initializer,
arg_params=net_args,
aux_params=net_auxs,
allow_missing=args.from_epoch == 0,
begin_epoch=args.from_epoch,
num_epoch=args.stop_epoch,
)
# @profile
# MST:
def _val_impl(args, model_specs, logger):
if len(args.output) > 0:
_make_dirs(args.output)
# dataiter
dataset_specs_tgt = get_dataset_specs_tgt(args, model_specs)
scale, mean_, _ = _get_scalemeanstd()
if scale > 0:
mean_ /= scale
#print (model_specs)
margs = argparse.Namespace(**model_specs)
dargs = argparse.Namespace(**dataset_specs_tgt)
mod = _get_module(args, margs, dargs)
addr_weights = args.weights # first weights should be xxxx_ep-0000.params!
addr_output = args.output
# current round index
cround = args.idx_round
net_args = None
net_auxs = None
###
if addr_weights is not None:
net_args, net_auxs = mxutil.load_params_from_file(addr_weights)
######
save_dir = osp.join(args.output, str(cround), 'results')
save_dir_self_train = osp.join(args.output, str(cround), 'self_train')
# pseudo labels
save_dir_pseudo_labelIds = osp.join(save_dir_self_train, 'pseudo_labelIds')
save_dir_pseudo_color = osp.join(save_dir_self_train, 'pseudo_color')
# without sp
save_dir_nplabelIds = osp.join(save_dir, 'nplabelIds')
save_dir_npcolor = osp.join(save_dir, 'npcolor')
# probability map
save_dir_probmap = osp.join(args.output, 'probmap')
save_dir_stats = osp.join(args.output, 'stats')
_make_dirs(save_dir)
_make_dirs(save_dir_pseudo_labelIds)
_make_dirs(save_dir_pseudo_color)
_make_dirs(save_dir_nplabelIds)
_make_dirs(save_dir_npcolor)
_make_dirs(save_dir_probmap)
_make_dirs(save_dir_stats)
if args.with_prior == 'True':
# with sp
save_dir_splabelIds = osp.join(save_dir_self_train, 'splabelIds')
save_dir_spcolor = osp.join(save_dir_self_train, 'spcolor')
_make_dirs(save_dir_splabelIds)
_make_dirs(save_dir_spcolor)
if args.kc_policy == 'cb':
# reweighted prediction map
save_dir_rwlabelIds = osp.join(save_dir_self_train, 'rwlabelIds')
save_dir_rwcolor = osp.join(save_dir_self_train, 'rwcolor')
_make_dirs(save_dir_rwlabelIds)
_make_dirs(save_dir_rwcolor)
######
dataset_tgt = model_specs['dataset_tgt']
image_list_tgt, label_gt_list_tgt,image_tgt_list = parse_split_file_tgt(margs.dataset_tgt, args.split_tgt)
has_gt = args.split_tgt in ('train', 'val',)
crop_sizes = sorted([int(_) for _ in args.test_scales.split(',')])[::-1]
crop_size = crop_sizes[0]
assert len(crop_sizes) == 1, 'multi-scale testing not implemented'
label_stride = margs.feat_stride
x_num = len(image_list_tgt)
do_forward = True
# for all images that has the same resolution
if do_forward:
batch = None
transformers = [ts.Scale(crop_size, Image.CUBIC, False)]
transformers += _get_transformer_image()
transformer = ts.Compose(transformers)
scorer_np = ScoreUpdater(dargs.valid_labels_tgt, margs.classes, x_num, logger)
scorer_np.reset()
# with prior
if args.with_prior == 'True':
scorer = ScoreUpdater(dargs.valid_labels_tgt, margs.classes, x_num, logger)
scorer.reset()
done_count = 0 # for multi-scale testing
num_classes = margs.classes
init_tgt_port = float(args.init_tgt_port)
# class-wise
cls_exist_array = np.zeros([1, num_classes], dtype=int)
cls_thresh = np.zeros([num_classes]) # confidence thresholds for all classes
cls_size = np.zeros([num_classes]) # number of predictions in each class
array_pixel = 0.0
# prior
if args.with_prior == 'True':
in_path_prior = 'spatial_prior/{}/prior_array.mat'.format(args.dataset)
sprior = scipy.io.loadmat(in_path_prior)
prior_array = sprior["prior_array"].astype(np.float32)
#prior_array = np.maximum(prior_array,0)
############################ network forward
for i in xrange(x_num):
start = time.time()
############################ network forward on single image (from official ResNet-38 implementation)
sample_name = osp.splitext(osp.basename(image_list_tgt[i]))[0]
im_path = osp.join(args.data_root_tgt, image_list_tgt[i])
rim = np.array(Image.open(im_path).convert('RGB'), np.uint8)
if do_forward:
im = transformer(rim)
imh, imw = im.shape[:2]
# init
if batch is None:
if dargs.ident_size:
input_h = make_divisible(imh, margs.feat_stride)
input_w = make_divisible(imw, margs.feat_stride)
else:
input_h = input_w = make_divisible(crop_size, margs.feat_stride)
label_h, label_w = input_h / label_stride, input_w / label_stride
test_steps = args.test_steps
pred_stride = label_stride / test_steps
pred_h, pred_w = label_h * test_steps, label_w * test_steps
input_data = np.zeros((1, 3, input_h, input_w), np.single)
input_label = 255 * np.ones((1, label_h * label_w), np.single)
#dataiter_tgt = mx.io.NDArrayIter(input_data, input_label)
input_label2 = np.ones((1, 19), np.single)
label = {'softmax_label':input_label, 'sigmoid_label':input_label2}
dataiter_tgt = mx.io.NDArrayIter(input_data, label)
batch = dataiter_tgt.next()
mod.bind(dataiter_tgt.provide_data, dataiter_tgt.provide_label, for_training=False, force_rebind=True)
if not mod.params_initialized:
mod.init_params(arg_params=net_args, aux_params=net_auxs)
nim = np.zeros((3, imh + label_stride, imw + label_stride), np.single)
sy = sx = label_stride // 2
nim[:, sy:sy + imh, sx:sx + imw] = im.transpose(2, 0, 1)
net_preds = np.zeros((margs.classes, pred_h, pred_w), np.single)
sy = sx = pred_stride // 2 + np.arange(test_steps) * pred_stride
for ix in xrange(test_steps):
for iy in xrange(test_steps):
input_data = np.zeros((1, 3, input_h, input_w), np.single)
input_data[0, :, :imh, :imw] = nim[:, sy[iy]:sy[iy] + imh, sx[ix]:sx[ix] + imw]
batch.data[0] = mx.nd.array(input_data)
mod.forward(batch, is_train=False)
this_call_preds = mod.get_outputs()[0].asnumpy()[0]
if args.test_flipping:
batch.data[0] = mx.nd.array(input_data[:, :, :, ::-1])
mod.forward(batch, is_train=False)
# average the original and flipped image prediction
this_call_preds = 0.5 * (
this_call_preds + mod.get_outputs()[0].asnumpy()[0][:, :, ::-1])
net_preds[:, iy:iy + pred_h:test_steps, ix:ix + pred_w:test_steps] = this_call_preds
interp_preds_np = interp_preds_as(rim.shape[:2], net_preds, pred_stride, imh, imw)
########################### #save predicted labels and confidence score vectors in target domains
# no prior prediction with trainIDs
pred_label_np = interp_preds_np.argmax(0)
# no prior prediction with labelIDs
if dargs.id_2_label_tgt is not None:
pred_label_np = dargs.id_2_label_tgt[pred_label_np]
# no prior color prediction
im_to_save_np = Image.fromarray(pred_label_np.astype(np.uint8))
im_to_save_npcolor = im_to_save_np.copy()
if dargs.cmap is not None:
im_to_save_npcolor.putpalette(dargs.cmap.ravel())
# save no prior prediction with labelIDs and colors
out_path_np = osp.join(save_dir_nplabelIds, '{}.png'.format(sample_name))
out_path_npcolor = osp.join(save_dir_npcolor, '{}.png'.format(sample_name))
im_to_save_np.save(out_path_np)
im_to_save_npcolor.save(out_path_npcolor)
# with prior
if args.with_prior == 'True':
probmap = np.multiply(prior_array,interp_preds_np).astype(np.float32)
elif args.with_prior == 'False':
probmap = interp_preds_np.copy().astype(np.float32)
pred_label = probmap.argmax(0)
probmap_max = np.amax(probmap, axis=0)
############################ save confidence scores of target domain as class-wise vectors
for idx_cls in np.arange(0, num_classes):
idx_temp = pred_label == idx_cls
sname = 'array_cls' + str(idx_cls)
if not (sname in locals()):
exec ("%s = np.float32(0)" % sname)
if idx_temp.any():
cls_exist_array[0, idx_cls] = 1
probmap_max_cls_temp = probmap_max[idx_temp].astype(np.float32)
len_cls = probmap_max_cls_temp.size
# downsampling by rate 4
probmap_cls = probmap_max_cls_temp[0:len_cls:4]
exec ("%s = np.append(%s,probmap_cls)" % (sname, sname))
############################ save prediction
# save prediction probablity map
out_path_probmap = osp.join(save_dir_probmap, '{}.npy'.format(sample_name))
np.save(out_path_probmap, probmap.astype(np.float32))
# save predictions with spatial priors, if sp exist.
if args.with_prior == 'True':
if dargs.id_2_label_tgt is not None:
pred_label = dargs.id_2_label_tgt[pred_label]
im_to_save_sp = Image.fromarray(pred_label.astype(np.uint8))
im_to_save_spcolor = im_to_save_sp.copy()
if dargs.cmap is not None: # save color seg map
im_to_save_spcolor.putpalette(dargs.cmap.ravel())
out_path_sp = osp.join(save_dir_splabelIds, '{}.png'.format(sample_name))
out_path_spcolor = osp.join(save_dir_spcolor, '{}.png'.format(sample_name))
im_to_save_sp.save(out_path_sp)
im_to_save_spcolor.save(out_path_spcolor)
# log information
done_count += 1
if not has_gt:
logger.info(
'Done {}/{} with speed: {:.2f}/s'.format(i + 1, x_num, 1. * done_count / (time.time() - start)))
continue
if args.split_tgt in ('train', 'val'):
# evaluate with ground truth
label_path = osp.join(args.data_root_tgt, label_gt_list_tgt[i])
label = np.array(Image.open(label_path), np.uint8)
if args.with_prior == 'True':
scorer.update(pred_label, label, i)
scorer_np.update(pred_label_np, label, i)
# save target training list
fout = 'issegm1/data_list/{}/{}_training_gpu{}.lst'.format(args.dataset_tgt,args.split_tgt,args.gpus)
fo = open(fout, "w")
for idx_image in range(x_num):
sample_name = osp.splitext(osp.basename(image_list_tgt[idx_image]))[0]
fo.write(image_tgt_list[idx_image] + '\t' + osp.join(save_dir_pseudo_labelIds, '{}.png'.format(sample_name)) + '\n')
fo.close()
############################ kc generation
start_sort = time.time()
# threshold for each class
if args.kc_policy == 'global':
for idx_cls in np.arange(0,num_classes):
tname = 'array_cls' + str(idx_cls)
exec ("array_pixel = np.append(array_pixel,%s)" % tname) # reverse=False for ascending losses and reverse=True for descending confidence
array_pixel = sorted(array_pixel, reverse = True)
len_cls = len(array_pixel)
len_thresh = int(math.floor(len_cls * init_tgt_port))
cls_size[:] = len_cls
cls_thresh[:] = array_pixel[len_thresh-1].copy()
array_pixel = 0.0
if args.kc_policy == 'cb':
for idx_cls in np.arange(0, num_classes):
tname = 'array_cls' + str(idx_cls)
if cls_exist_array[0, idx_cls] == 1:
exec("%s = sorted(%s,reverse=True)" % (tname, tname)) # reverse=False for ascending losses and reverse=True for descending confidence
exec("len_cls = len(%s)" % tname)
cls_size[idx_cls] = len_cls
len_thresh = int(math.floor(len_cls * init_tgt_port))
if len_thresh != 0:
exec("cls_thresh[idx_cls] = %s[len_thresh-1].copy()" % tname)
exec("%s = %d" % (tname, 0.0))
# threshold for mine_id with priority
mine_id_priority = np.nonzero(cls_size / np.sum(cls_size) < args.mine_thresh)[0]
# chosen mine_id
mine_id_all = np.argsort(cls_size / np.sum(cls_size))
mine_id = mine_id_all[:args.mine_id_number]
print(mine_id)
np.save(save_dir_stats + '/mine_id.npy', mine_id)
np.save(save_dir_stats + '/mine_id_priority.npy', mine_id_priority)
np.save(save_dir_stats + '/cls_thresh.npy', cls_thresh)
np.save(save_dir_stats + '/cls_size.npy', cls_size)
logger.info('Kc determination done in %.2f s.', time.time() - start_sort)
############################ pseudo-label generation
for i in xrange(x_num):
sample_name = osp.splitext(osp.basename(image_list_tgt[i]))[0]
sample_pseudo_label_name = osp.join(save_dir_pseudo_labelIds, '{}.png'.format(sample_name))
sample_pseudocolor_label_name = osp.join(save_dir_pseudo_color, '{}.png'.format(sample_name))
out_path_probmap = osp.join(save_dir_probmap, '{}.npy'.format(sample_name))
probmap = np.load(out_path_probmap)
rw_probmap = np.zeros(probmap.shape, np.single)
cls_thresh[cls_thresh == 0] = 1.0 # cls_thresh = 0 means there is no prediction in this class
############# pseudo-label assignment
for idx_cls in np.arange(0, num_classes):
cls_thresh_temp = cls_thresh[idx_cls]
cls_probmap = probmap[idx_cls,:,:]
cls_rw_probmap = np.true_divide(cls_probmap,cls_thresh_temp)
rw_probmap[idx_cls,:,:] = cls_rw_probmap.copy()
rw_probmap_max = np.amax(rw_probmap, axis=0)
pseudo_label = np.argmax(rw_probmap,axis=0)
############# pseudo-label selection
idx_unconfid = rw_probmap_max < 1
idx_confid = rw_probmap_max >= 1
# pseudo-labels with labelID
pseudo_label = pseudo_label.astype(np.uint8)
pseudo_label_labelID = dargs.id_2_label_tgt[pseudo_label]
rw_pred_label = pseudo_label_labelID.copy()
# ignore label assignment, compatible with labelIDs
pseudo_label_labelID[idx_unconfid] = 0
############# save pseudo-label
im_to_save_pseudo = Image.fromarray(pseudo_label_labelID.astype(np.uint8))
im_to_save_pseudocol = im_to_save_pseudo.copy()
if dargs.cmap is not None: # save segmentation prediction with color
im_to_save_pseudocol.putpalette(dargs.cmap.ravel())
out_path_pseudo = osp.join(save_dir_pseudo_labelIds, '{}.png'.format(sample_name))
out_path_colpseudo = osp.join(save_dir_pseudo_color, '{}.png'.format(sample_name))
im_to_save_pseudo.save(out_path_pseudo)
im_to_save_pseudocol.save(out_path_colpseudo)
############# save reweighted pseudo-label in cbst
if args.kc_policy == 'cb':
im_to_save_rw = Image.fromarray(rw_pred_label.astype(np.uint8))
im_to_save_rwcolor = im_to_save_rw.copy()
if dargs.cmap is not None:
im_to_save_rwcolor.putpalette(dargs.cmap.ravel())
out_path_rw = osp.join(save_dir_rwlabelIds, '{}.png'.format(sample_name))
out_path_rwcolor = osp.join(save_dir_rwcolor, '{}.png'.format(sample_name))
# save no prior prediction with labelIDs and colors
im_to_save_rw.save(out_path_rw)
im_to_save_rwcolor.save(out_path_rwcolor)
## remove probmap folder
import shutil
shutil.rmtree(save_dir_probmap)
##
if __name__ == "__main__":
util.cfg['choose_interpolation_method'] = True
args, model_specs = parse_args()
if len(args.output) > 0:
_make_dirs(args.output)
logger = util.set_logger(args.output, args.log_file, args.debug)
logger.info('start with arguments %s', args)
logger.info('and model specs %s', model_specs)
if args.phase == 'train':
_train_impl(args, model_specs, logger)
elif args.phase == 'val':
_val_impl(args, model_specs, logger)
else:
raise NotImplementedError('Unknown phase: {}'.format(args.phase))
| [
"[email protected]"
] | |
7782690aa20fda3d04ab9b554b0255a2f03efd70 | 1e1a5b0e7c939ef9b2aafb3a7c86a1b78d1d014e | /GUI/viewRecord.py | c5d4ef1aab6674c4511dc52fe5d499529a1c34f9 | [] | no_license | SAR2652/MedRec | deac530534974e64d1e204620a58cde02c27804b | b0c0bdf34d67bb115f6bb7379cc6c8ade51e1117 | refs/heads/master | 2020-04-24T11:37:21.954485 | 2019-07-25T07:26:35 | 2019-07-25T07:26:35 | 171,931,435 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | import sys
from PyQt5.QtWidgets import QWidget, QListWidget, QLabel, QComboBox
from PyQt5.QtGui import QFont
from PyQt5.QtCore import QUrl
path = 'C:/MedRec'
sys.path.append(path + '/GUI/')
from autocompletecombo import Autocomplete
class ViewRecord(QWidget):
def __init__(self, parent = None):
super(ViewRecord, self).__init__(parent)
self.initViewRecordUI()
def initViewRecordUI(self):
self.setGeometry(525, 225, 1080, 720)
#initialize labels
self.patient_name_label = QLabel('Patient Name : ', self)
self.case_name_label = QLabel('Case Name : ', self)
#initialize fields
self.patient_name_entry = Autocomplete(self)
self.case_name_entry = Autocomplete(self)
#initi
| [
"[email protected]"
] | |
2549b51f9b74bd83a48077d8573f285fddd9ebc2 | 70054615f56be28373b00c9df96544ec822be683 | /res/scripts/common/offers.py | d85a601ecaff58e94484a30537cc4c8545a98445 | [] | no_license | wanyancan/WOTDecompiled | c646ad700f5ec3fb81fb4e87862639ce0bdf0000 | 9ffb09007a61d723cdb28549e15db39c34c0ea1e | refs/heads/master | 2020-04-17T23:13:15.649069 | 2013-11-15T16:37:10 | 2013-11-15T16:37:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,339 | py | import time
from collections import namedtuple
import BigWorld
from constants import IS_BASEAPP
from debug_utils import *
ENTITY_TYPE_ACCOUNT = 0
ENTITY_TYPE_CLAN = 1
ENTITY_TYPE_NAMES_BY_IDS = ('Account', 'Clan')
ENTITY_TYPE_IDS_BY_NAMES = {'Account': ENTITY_TYPE_ACCOUNT,
'Clan': ENTITY_TYPE_CLAN}
ENTITY_TYPE_IDS = (ENTITY_TYPE_ACCOUNT, ENTITY_TYPE_CLAN)
OFFER_SELL = 0
_OFFER_KIND_MASK = 192
SRC_WARE_GOLD = 0
SRC_WARE_CREDITS = 256
SRC_WARE_ITEMS = 512
SRC_WARE_VEHICLE = 768
SRC_WARE_TANKMAN = 1024
SRC_WARE_KINDS = (SRC_WARE_GOLD,
SRC_WARE_CREDITS,
SRC_WARE_ITEMS,
SRC_WARE_VEHICLE,
SRC_WARE_TANKMAN)
SRC_WARE_MONEY_KINDS = (SRC_WARE_GOLD, SRC_WARE_CREDITS)
_SRC_WARE_KIND_MASK = 3840
DST_WARE_GOLD = 0
DST_WARE_CREDITS = 4096
DST_WARE_KINDS = (DST_WARE_GOLD, DST_WARE_CREDITS)
_DST_WARE_KIND_MASK = 61440
def makeOfferFlags(offerKind, srcWareKind, dstWareKind, srcEntityType, dstEntityType):
return offerKind | srcWareKind | dstWareKind | srcEntityType | dstEntityType << 3
ParsedOfferFlags = namedtuple('ParsedOfferFlags', 'offerKind srcWareKind dstWareKind srcEntityType dstEntityType')
def parseOfferFlags(flags):
raw = (flags & _OFFER_KIND_MASK,
flags & _SRC_WARE_KIND_MASK,
flags & _DST_WARE_KIND_MASK,
flags & 7,
flags >> 3 & 7)
return ParsedOfferFlags._make(raw)
def parseSrcEntityTypeFromFlags(flags):
return flags & 7
def parseDstEntityTypeFromFlags(flags):
return flags >> 3 & 7
class OutOffers(object):
Offer = namedtuple('Offer', 'flags dstDBID dstName srcWares dstWares validTill fee')
def __init__(self, offersDict, outWriterGetter = None):
offersDict.setdefault('nextID', 0)
offersDict.setdefault('done', {})
offersDict.setdefault('out', {})
self.__data = offersDict
self.__outWriter = outWriterGetter if outWriterGetter is not None else _WriterGetter(offersDict['out'])
return
def __getitem__(self, offerID):
return _makeOutOffer(self.__data['out'][offerID])
def get(self, offerID):
offer = self.__data['out'].get(offerID)
if offer is not None:
return _makeOutOffer(offer)
else:
return
def getExt(self, offerID, default = None):
outExt = self.__data.get('outExt')
if outExt is None:
return default
else:
return outExt.get(offerID, default)
def items(self):
return [ (id, _makeOutOffer(data)) for id, data in self.__data['out'].iteritems() ]
def clear(self):
self.__data['out'].clear()
self.__data['done'].clear()
self.__data.pop('outExt', None)
self.__data['nextID'] += 1
return
def count(self):
return len(self.__data['out'])
def doneOffers(self):
return self.__data['done']
def timedOutOffers(self):
res = []
currTime = int(time.time())
for offerID, offer in self.__data['out'].iteritems():
if offer[5] <= currTime:
res.append(offerID)
return res
def inventorySlots(self):
vehs = []
numTmen = 0
for offer in self.__data['out'].itervalues():
srcWareKind = offer[0] & _SRC_WARE_KIND_MASK
if srcWareKind == SRC_WARE_VEHICLE:
vehs.append(offer[3][0])
elif srcWareKind == SRC_WARE_TANKMAN:
numTmen += 1
return (vehs, numTmen)
def moveToDone(self, offerID):
data = self.__data
data['done'][offerID] = self.__outWriter().pop(offerID)
outExt = data.get('outExt')
if outExt is not None:
outExt.pop(offerID, None)
data['nextID'] += 1
return len(data['done'])
def remove(self, offerID):
if self.__outWriter().pop(offerID, None) is not None:
self.__data['nextID'] += 1
outExt = self.__data.get('outExt')
if outExt is not None:
outExt.pop(offerID, None)
return
def removeDone(self, offerID):
self.__data['done'].pop(offerID, None)
return
def updateDestination(self, offerID, dstEntityType, dstEntityDBID, dstEntityName):
raise self.__data['out'][offerID][1] == dstEntityDBID or AssertionError
def createOffer(self, flags, srcDBID, srcName, dstDBID, dstName, validSec, srcWares, srcFee, dstWares, dstFee, ext = None):
currTime = int(time.time())
validTill = currTime + int(validSec)
offer = (flags,
dstDBID,
dstName,
srcWares,
dstWares,
validTill,
srcFee)
data = self.__data
offerID = ((currTime & 1048575) << 12) + (data['nextID'] & 4095)
data['nextID'] += 1
if not (offerID not in data['out'] and offerID not in data['done']):
raise AssertionError
self.__outWriter()[offerID] = offer
data.setdefault('outExt', {})[offerID] = ext is not None and ext
return (offerID, (offerID,
flags,
srcDBID,
srcName,
srcWares,
dstWares,
validTill,
dstFee))
class InOffers(object):
Offer = namedtuple('Offer', 'srcOfferID flags srcDBID srcName srcWares dstWares validTill fee')
def __init__(self, offersDict, inWriterGetter = None):
offersDict.setdefault('nextID', 0)
offersDict.setdefault('in', {})
self.__data = offersDict
self.__inWriter = inWriterGetter if inWriterGetter is not None else _WriterGetter(offersDict['in'])
return
def __getitem__(self, offerID):
return _makeInOffer(self.__data['in'][offerID])
def get(self, offerID):
offer = self.__data['in'].get(offerID)
if offer is not None:
return _makeInOffer(offer)
else:
return
def items(self):
return [ (id, _makeOutOffer(data)) for id, data in self.__data['in'].iteritems() ]
def clear(self):
self.__data['in'].clear()
self.__data['nextID'] += 1
def count(self):
return len(self.__data['in'])
def timedOutOffers(self):
res = []
currTime = int(time.time())
for offerID, offer in self.__data['in'].iteritems():
if offer[6] <= currTime:
res.append(offerID)
return res
def findOfferBySource(self, srcEntityType, srcEntityDBID, srcOfferID):
for inOfferID, offer in self.__data['in'].iteritems():
if offer[0] == srcOfferID and offer[2] == srcEntityDBID and parseSrcEntityTypeFromFlags(offer[1]) == srcEntityType:
return inOfferID
return None
def add(self, offer):
data = self.__data
offerID = data['nextID']
data['nextID'] += 1
self.__inWriter()[offerID] = tuple(offer)
return offerID
def remove(self, offerID):
if self.__inWriter().pop(offerID, None) is not None:
self.__data['nextID'] += 1
return
def collectOutOfferResults(outOffer):
offerFlags = parseOfferFlags(outOffer.flags)
gold = 0
credits = 0
items = None
if offerFlags.srcWareKind == SRC_WARE_GOLD:
gold -= outOffer.srcWares + outOffer.fee
elif offerFlags.srcWareKind == SRC_WARE_CREDITS:
credits -= outOffer.srcWares + outOffer.fee
else:
items = outOffer.srcWares
if offerFlags.dstWareKind == DST_WARE_GOLD:
gold += outOffer.dstWares
else:
credits += outOffer.dstWares
return (offerFlags,
gold,
credits,
items)
def collectInOfferResults(inOffer):
offerFlags = parseOfferFlags(inOffer.flags)
gold = 0
credits = 0
items = None
if offerFlags.srcWareKind == SRC_WARE_GOLD:
gold += inOffer.srcWares
elif offerFlags.srcWareKind == SRC_WARE_CREDITS:
credits += inOffer.srcWares
else:
items = inOffer.srcWares
if offerFlags.dstWareKind == DST_WARE_GOLD:
gold -= inOffer.dstWares + inOffer.fee
else:
credits -= inOffer.dstWares + inOffer.fee
return (offerFlags,
gold,
credits,
items)
_makeOutOffer = OutOffers.Offer._make
_makeInOffer = InOffers.Offer._make
class _WriterGetter(object):
def __init__(self, dict):
self.__d = dict
def __call__(self):
return self.__d
| [
"[email protected]"
] | |
5310941c8e4e3eab87b903780fb19e7edf078c70 | f5d1e8b54ddbc51a9ef1b868eee93096d9b0fbeb | /weapp/wapi/mall/__init__.py | 79511ca63e741640f660e8b960872f86ac13619a | [] | no_license | chengdg/weizoom | 97740c121724fae582b10cdbe0ce227a1f065ece | 8b2f7befe92841bcc35e0e60cac5958ef3f3af54 | refs/heads/master | 2021-01-22T20:29:30.297059 | 2017-03-30T08:39:25 | 2017-03-30T08:39:25 | 85,268,003 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | # -*- coding: utf-8 -*-
import product
import promotion
| [
"[email protected]"
] | |
4d75a2fa3fbfcd227da641b06f2ce1f1a779e02e | 6a07912090214567f77e9cd941fb92f1f3137ae6 | /cs212/Unit 4/28.py | ae381957925468dc57906a2813b0cfd324dea8d0 | [] | no_license | rrampage/udacity-code | 4ab042b591fa3e9adab0183d669a8df80265ed81 | bbe968cd27da7cc453eada5b2aa29176b0121c13 | refs/heads/master | 2020-04-18T08:46:00.580903 | 2012-08-25T08:44:24 | 2012-08-25T08:44:24 | 5,352,942 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,983 | py | # cs212 ; Unit 4 ; 28
# -----------------
# User Instructions
#
# In this problem, you will generalize the bridge problem
# by writing a function bridge_problem3, that makes a call
# to lowest_cost_search.
def bridge_problem3(here):
"""Find the fastest (least elapsed time) path to
the goal in the bridge problem."""
# your code here
return lowest_cost_search() # <== your arguments here
# your code here if necessary
def lowest_cost_search(start, successors, is_goal, action_cost):
"""Return the lowest cost path, starting from start state,
and considering successors(state) => {state:action,...},
that ends in a state for which is_goal(state) is true,
where the cost of a path is the sum of action costs,
which are given by action_cost(action)."""
explored = set() # set of states we have visited
frontier = [ [start] ] # ordered list of paths we have blazed
while frontier:
path = frontier.pop(0)
state1 = final_state(path)
if is_goal(state1):
return path
explored.add(state1)
pcost = path_cost(path)
for (state, action) in successors(state1).items():
if state not in explored:
total_cost = pcost + action_cost(action)
path2 = path + [(action, total_cost), state]
add_to_frontier(frontier, path2)
return Fail
def final_state(path): return path[-1]
def path_cost(path):
"The total cost of a path (which is stored in a tuple with the final action)."
if len(path) < 3:
return 0
else:
action, total_cost = path[-2]
return total_cost
def add_to_frontier(frontier, path):
"Add path to frontier, replacing costlier path if there is one."
# (This could be done more efficiently.)
# Find if there is an old path to the final state of this path.
old = None
for i,p in enumerate(frontier):
if final_state(p) == final_state(path):
old = i
break
if old is not None and path_cost(frontier[old]) < path_cost(path):
return # Old path was better; do nothing
elif old is not None:
del frontier[old] # Old path was worse; delete it
## Now add the new path and re-sort
frontier.append(path)
frontier.sort(key=path_cost)
def bsuccessors2(state):
"""Return a dict of {state:action} pairs. A state is a (here, there) tuple,
where here and there are frozensets of people (indicated by their times) and/or
the light."""
here, there = state
if 'light' in here:
return dict(((here - frozenset([a, b, 'light']),
there | frozenset([a, b, 'light'])),
(a, b, '->'))
for a in here if a is not 'light'
for b in here if b is not 'light')
else:
return dict(((here | frozenset([a, b, 'light']),
there - frozenset([a, b, 'light'])),
(a, b, '<-'))
for a in there if a is not 'light'
for b in there if b is not 'light')
def bcost(action):
"Returns the cost (a number) of an action in the bridge problem."
# An action is an (a, b, arrow) tuple; a and b are times; arrow is a string
a, b, arrow = action
return max(a, b)
def test():
here = [1, 2, 5, 10]
assert bridge_problem3(here) == [
(frozenset([1, 2, 'light', 10, 5]), frozenset([])),
((2, 1, '->'), 2),
(frozenset([10, 5]), frozenset([1, 2, 'light'])),
((2, 2, '<-'), 4),
(frozenset(['light', 10, 2, 5]), frozenset([1])),
((5, 10, '->'), 14),
(frozenset([2]), frozenset([1, 10, 5, 'light'])),
((1, 1, '<-'), 15),
(frozenset([1, 2, 'light']), frozenset([10, 5])),
((2, 1, '->'), 17),
(frozenset([]), frozenset([1, 10, 2, 5, 'light']))]
return 'test passes'
print test()
| [
"[email protected]"
] | |
c37ff8cfcff227220d098069e2f3040dce7f56e8 | 9145d24e2517d7f3cea6e89158806b95919449b8 | /doc/conf.py | 37c50aca46644bd4ce262e466fa2696daa55957c | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | pombredanne/coveragepy | b6de846694156581ee0b9a3348f4cfd48719855f | 2364947d7814a065cf2c05d930eda94203b20f1c | refs/heads/master | 2021-01-22T23:43:21.800229 | 2017-03-18T11:14:13 | 2017-03-18T11:14:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,618 | py | # -*- coding: utf-8 -*-
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
#
# coverage.py documentation build configuration file, created by
# sphinx-quickstart on Wed May 13 22:18:33 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'sphinxcontrib.spelling',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Coverage.py'
copyright = u'2009\N{EN DASH}2017, Ned Batchelder' # CHANGEME
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '4.3.4' # CHANGEME
# The full version, including alpha/beta/rc tags.
release = '4.3.4' # CHANGEME
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme = 'default'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
#html_style = "neds.css"
#html_add_permalinks = ""
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_templates']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = '.htm'
# Output file base name for HTML help builder.
htmlhelp_basename = 'coveragepydoc'
# -- Spelling ---
spelling_word_list_filename = 'dict.txt'
spelling_show_suggestions = False
# When auto-doc'ing a class, write the class' docstring and the __init__ docstring
# into the class docs.
autoclass_content = "class"
prerelease = bool(max(release).isalpha())
def setup(app):
app.add_stylesheet('coverage.css')
app.add_config_value('prerelease', False, 'env')
app.info("** Prerelease = %r" % prerelease)
| [
"[email protected]"
] | |
78c29f84ffce566ea51e7c3404a5822445abcd29 | 230159b8ec7f83369cd5fb04623f901908aaf73d | /src/hackerrank/algo/implementation/kangaroo.py | 33151a6d503e1f4f7182f49c698990759b49d8dd | [] | no_license | nikhilkuria/algo | e006c50c880df0fae882db9bb92d1a08eff36a97 | 1981d6101f345f6ea0bd0da002c6e4e45f6f4523 | refs/heads/master | 2021-01-17T20:16:16.612384 | 2018-06-27T07:36:56 | 2018-06-27T07:36:56 | 60,084,240 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the kangaroo function below.
def kangaroo(x1, v1, x2, v2):
kangaroo_one_pos = x1
kangaroo_two_pos = x2
while True:
if kangaroo_one_pos == kangaroo_two_pos:
return "YES"
if kangaroo_one_pos > kangaroo_two_pos and v1 >= v2:
break
if kangaroo_two_pos > kangaroo_one_pos and v2 >= v1:
break
kangaroo_one_pos = kangaroo_one_pos + v1
kangaroo_two_pos = kangaroo_two_pos + v2
return "NO"
print(kangaroo(0,2,5,3))
| [
"[email protected]"
] | |
c45e6ce9c846d77c6611d7c5fa1d641c22336a01 | 4b8c81f54cc52e096ad9ae751f00e88254aab0ca | /20-01-21 while홀.py | 631fadc6b7eb53e75d2df8df8fc563a8e1db0e4e | [] | no_license | dlatpdbs/python | 50305cfcc92bb6c9bae409ec31ebd9e4aa868075 | 2f740941fe1ef172d40cb10a63c1ed19c5925e68 | refs/heads/main | 2022-12-27T15:24:31.243739 | 2020-10-14T05:26:32 | 2020-10-14T05:26:32 | 301,933,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py |
q=1
while q <=100:
print(q)
q=q+2
| [
"[email protected]"
] | |
744b2b5f9edcfd6d59f3a65ebfda69a83917795e | 8c4ef53ec6c7df2eeeb633a53d1d931558596366 | /propertyestimator/properties/solvation.py | 846f77dd90fa87534dec104a50d994e4dbc33f4f | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | MSchauperl/propertyestimator | ff7bf2d3b6bc441141258483ec991f8806b09469 | 9a67cb61498024c511f9bbe55536ac8e1a3c93be | refs/heads/master | 2020-09-08T07:04:39.660322 | 2019-11-08T21:15:23 | 2019-11-08T21:15:23 | 221,055,340 | 0 | 0 | NOASSERTION | 2019-11-14T21:47:11 | 2019-11-11T19:34:28 | null | UTF-8 | Python | false | false | 8,120 | py | """
A collection of physical property definitions relating to
solvation free energies.
"""
from propertyestimator import unit
from propertyestimator.properties import PhysicalProperty
from propertyestimator.properties.plugins import register_estimable_property
from propertyestimator.protocols import coordinates, forcefield, miscellaneous, yank, simulation, groups
from propertyestimator.substances import Substance
from propertyestimator.thermodynamics import Ensemble
from propertyestimator.workflow import WorkflowOptions
from propertyestimator.workflow.schemas import WorkflowSchema
from propertyestimator.workflow.utils import ProtocolPath
@register_estimable_property()
class SolvationFreeEnergy(PhysicalProperty):
"""A class representation of a solvation free energy property."""
@staticmethod
def get_default_workflow_schema(calculation_layer, options=None):
if calculation_layer == 'SimulationLayer':
# Currently reweighting is not supported.
return SolvationFreeEnergy.get_default_simulation_workflow_schema(options)
return None
@staticmethod
def get_default_simulation_workflow_schema(options=None):
"""Returns the default workflow to use when estimating this property
from direct simulations.
Parameters
----------
options: WorkflowOptions
The default options to use when setting up the estimation workflow.
Returns
-------
WorkflowSchema
The schema to follow when estimating this property.
"""
# Setup the fully solvated systems.
build_full_coordinates = coordinates.BuildCoordinatesPackmol('build_solvated_coordinates')
build_full_coordinates.substance = ProtocolPath('substance', 'global')
build_full_coordinates.max_molecules = 2000
assign_full_parameters = forcefield.BuildSmirnoffSystem(f'assign_solvated_parameters')
assign_full_parameters.force_field_path = ProtocolPath('force_field_path', 'global')
assign_full_parameters.substance = ProtocolPath('substance', 'global')
assign_full_parameters.coordinate_file_path = ProtocolPath('coordinate_file_path',
build_full_coordinates.id)
# Perform a quick minimisation of the full system to give
# YANK a better starting point for its minimisation.
energy_minimisation = simulation.RunEnergyMinimisation('energy_minimisation')
energy_minimisation.system_path = ProtocolPath('system_path', assign_full_parameters.id)
energy_minimisation.input_coordinate_file = ProtocolPath('coordinate_file_path',
build_full_coordinates.id)
equilibration_simulation = simulation.RunOpenMMSimulation('equilibration_simulation')
equilibration_simulation.ensemble = Ensemble.NPT
equilibration_simulation.steps_per_iteration = 100000
equilibration_simulation.output_frequency = 10000
equilibration_simulation.timestep = 2.0 * unit.femtosecond
equilibration_simulation.thermodynamic_state = ProtocolPath('thermodynamic_state', 'global')
equilibration_simulation.system_path = ProtocolPath('system_path', assign_full_parameters.id)
equilibration_simulation.input_coordinate_file = ProtocolPath('output_coordinate_file',
energy_minimisation.id)
# Create a substance which only contains the solute (e.g. for the
# vacuum phase simulations).
filter_solvent = miscellaneous.FilterSubstanceByRole('filter_solvent')
filter_solvent.input_substance = ProtocolPath('substance', 'global')
filter_solvent.component_role = Substance.ComponentRole.Solvent
filter_solute = miscellaneous.FilterSubstanceByRole('filter_solute')
filter_solute.input_substance = ProtocolPath('substance', 'global')
filter_solute.component_role = Substance.ComponentRole.Solute
# Setup the solute in vacuum system.
build_vacuum_coordinates = coordinates.BuildCoordinatesPackmol('build_vacuum_coordinates')
build_vacuum_coordinates.substance = ProtocolPath('filtered_substance', filter_solute.id)
build_vacuum_coordinates.max_molecules = 1
assign_vacuum_parameters = forcefield.BuildSmirnoffSystem(f'assign_parameters')
assign_vacuum_parameters.force_field_path = ProtocolPath('force_field_path', 'global')
assign_vacuum_parameters.substance = ProtocolPath('filtered_substance', filter_solute.id)
assign_vacuum_parameters.coordinate_file_path = ProtocolPath('coordinate_file_path',
build_vacuum_coordinates.id)
# Set up the protocol to run yank.
run_yank = yank.SolvationYankProtocol('run_solvation_yank')
run_yank.solute = ProtocolPath('filtered_substance', filter_solute.id)
run_yank.solvent_1 = ProtocolPath('filtered_substance', filter_solvent.id)
run_yank.solvent_2 = Substance()
run_yank.thermodynamic_state = ProtocolPath('thermodynamic_state', 'global')
run_yank.steps_per_iteration = 500
run_yank.checkpoint_interval = 50
run_yank.solvent_1_coordinates = ProtocolPath('output_coordinate_file', equilibration_simulation.id)
run_yank.solvent_1_system = ProtocolPath('system_path', assign_full_parameters.id)
run_yank.solvent_2_coordinates = ProtocolPath('coordinate_file_path', build_vacuum_coordinates.id)
run_yank.solvent_2_system = ProtocolPath('system_path', assign_vacuum_parameters.id)
# Set up the group which will run yank until the free energy has been determined to within
# a given uncertainty
conditional_group = groups.ConditionalGroup(f'conditional_group')
conditional_group.max_iterations = 20
if options.convergence_mode != WorkflowOptions.ConvergenceMode.NoChecks:
condition = groups.ConditionalGroup.Condition()
condition.condition_type = groups.ConditionalGroup.ConditionType.LessThan
condition.right_hand_value = ProtocolPath('target_uncertainty', 'global')
condition.left_hand_value = ProtocolPath('estimated_free_energy.uncertainty',
conditional_group.id,
run_yank.id)
conditional_group.add_condition(condition)
# Define the total number of iterations that yank should run for.
total_iterations = miscellaneous.MultiplyValue('total_iterations')
total_iterations.value = 2000
total_iterations.multiplier = ProtocolPath('current_iteration', conditional_group.id)
# Make sure the simulations gets extended after each iteration.
run_yank.number_of_iterations = ProtocolPath('result',
total_iterations.id)
conditional_group.add_protocols(total_iterations, run_yank)
# Define the full workflow schema.
schema = WorkflowSchema(property_type=SolvationFreeEnergy.__name__)
schema.id = '{}{}'.format(SolvationFreeEnergy.__name__, 'Schema')
schema.protocols = {
build_full_coordinates.id: build_full_coordinates.schema,
assign_full_parameters.id: assign_full_parameters.schema,
energy_minimisation.id: energy_minimisation.schema,
equilibration_simulation.id: equilibration_simulation.schema,
filter_solvent.id: filter_solvent.schema,
filter_solute.id: filter_solute.schema,
build_vacuum_coordinates.id: build_vacuum_coordinates.schema,
assign_vacuum_parameters.id: assign_vacuum_parameters.schema,
conditional_group.id: conditional_group.schema
}
schema.final_value_source = ProtocolPath('estimated_free_energy', conditional_group.id, run_yank.id)
return schema
| [
"[email protected]"
] | |
9abb3baada0faed6fe83d3c15b41aa7c7958cb80 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_27357.py | 1163c19de3fb005d7b6fa68a6a453f6f2e63147f | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46 | py | # pyplot.savefig with empty export
plt.show()
| [
"[email protected]"
] | |
bb452e72141b555c7dd30f34a66fc3fe30f86fbd | 220a2a22f7ecbb960e6a09b1153ec5094aef15f5 | /Log-Parsers/Recognition_Long_Talks/general_classes.py | a374a5df875af86c516cbe3be40426c999673ee0 | [] | no_license | jrweis01/Rubidium | 89b27b8376891b42eb6b8bf952f70d92dd81768c | 6050241aa19401bd5196939aadfc4a095f771d0a | refs/heads/master | 2020-05-30T05:29:11.649283 | 2019-06-02T07:03:19 | 2019-06-02T07:03:19 | 189,561,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,156 | py | from templates_data import *
import openpyxl
import os
import sys
import shutil
import datetime
class Utils(object):
def fetch_files_from_folder(self, pathToFolder):
_pathToFiles = []
_fileNames = []
for dirPath, dirNames, fileNames in os.walk(pathToFolder):
selected_path = [os.path.join(dirPath, item) for item in fileNames]
_pathToFiles.extend(selected_path)
selectedFile = [item for item in fileNames]
_fileNames.extend(selectedFile)
# Try to remove empty entries if none of the required files are in directory
try:
_pathToFiles.remove('')
_imageFiles.remove('')
except ValueError:
pass
# Warn if nothing was found in the given path
if selectedFile == []:
print
'No files with given parameters were found in:\n', dirPath, '\n'
print
len(_fileNames), 'files were found is searched folder(s)'
return _pathToFiles, _fileNames
def get_excel_worksheet(self):
pass
@staticmethod
def insertion_sort(items):
for i in range(1, len(items)):
j = i
while j > 0 and items[j] > items[j - 1]:
items[j - 1], items[j] = items[j], items[j - 1]
j = j - 1
return items
def sort_order_dict(self,order_dict):
for key in order_dict:
items = order_dict[key]
items = self.insertion_sort(items)
def sorting_headers(self,sorting_dict,order_dict):
sorted_list = []
for m in order_dict["noise_file_name"]:
for i in order_dict["trig_to_ASR_delay"]:
for j in order_dict["signal_dB"]:
for k in order_dict["noise_dB"]:
for key in sorting_dict:
if (sorting_dict[key]["noise_file_name"] == str(m) and
sorting_dict[key]["trig_to_ASR_delay"] == str(int(i)) and
sorting_dict[key]["signal_dB"] == str(int(j)) and
sorting_dict[key]["noise_dB"] == str(int(k))):
sorted_list.append(key)
return sorted_list
def clear_dict_values(self,dict):
for key in dict:
dict[key].clear()
def get_folder_location_path(self,folder):
program_path = os.path.dirname(sys.argv[0])
template_path = program_path + '\\' + folder
return template_path
class ExcelHandler(object):
def __init__(self, workbook_name):
self.wb_name = workbook_name
self.wb_name_with_dt = self._creat_new_excel_from_template_with_name_and_datetime(workbook_name)
self.wb = openpyxl.load_workbook(str(self.wb_name_with_dt))
self.template_info = {}
self.template_indexes = {'TRIG_ONLY': 4, 'MP_mTRIG_sASR': 4 ,'LJ_sTRIG_mASR' : 4}
self.sheet_MP = None
self.sheet_trig_only = None
self.sheet_LJ_sTRIG_mASR = None
def run_log_printing_LJ_sTRIG_mASR(self,log_dict):
''' for 'LJ_sTRIG_mASR' SHEET TEMPLATE'''
asr_section = log_dict['asr_results_dict']
trig_section = log_dict['trig_results_dict_format']
if self.sheet_LJ_sTRIG_mASR is None:
self.sheet_LJ_sTRIG_mASR = self._open_sheet('LJ_sTRIG_mASR')
ROW = self.template_indexes['LJ_sTRIG_mASR']
''' printing header section'''
self._write_line_to_excel_sheet(self.sheet_LJ_sTRIG_mASR, ROW, 1, log_dict,EXCEL_LJ_sTRIG_mASR_TEMPLATE_HEADER_SECTION)
''' printing trig section'''
self._write_line_to_excel_sheet(self.sheet_LJ_sTRIG_mASR,ROW,27,trig_section,EXCEL_LJ_sTRIG_mASR_TEMPLATE_TRIG_SECTION)
''' printing asr section'''
cmd_template_order = ['volume_down' , 'volume_up' , 'next_song', 'pause' , 'resume', 'what_distance_have_i_done']
cmd_template_dict = {'volume_down': 'empty1.wav' , 'volume_up' : 'empty2.wav' , 'next_song' : 'empty3.wav', 'pause' : 'empty4.wav',
'resume' : 'empty5.wav' , 'what_distance_have_i_done' : 'empty6.wav'}
for command in cmd_template_order:
curr_key = cmd_template_dict[command]
if curr_key in asr_section.keys():
curr_cmd_dict = asr_section[curr_key]
self._write_line_to_excel_sheet(self.sheet_LJ_sTRIG_mASR, ROW, 10, curr_cmd_dict,
EXCEL_LJ_sTRIG_mASR_TEMPLATE_ASR_SECTION)
else:
pass
ROW += 1
self.template_indexes['LJ_sTRIG_mASR']+=6
def run_log_printing_TRIG_ONLY(self,log_dict,exl_tab_name):
''' for 'TRIG_ONLY' SHEET TEMPLATE'''
if self.sheet_trig_only is None:
self.sheet_trig_only = self._open_sheet(exl_tab_name)
ROW = self.template_indexes[exl_tab_name]
self._write_line_to_excel_sheet(self.sheet_trig_only,ROW,1,log_dict,EXCEL_TRIG_TEMPLATE_TUPLE)
self.template_indexes[exl_tab_name] += 1
def run_log_printing_TRIG_ASR_MP(self,log_dict):
''' for 'MP_mTrig_sASR' SHEET TEMPLATE'''
if self.sheet_MP is None:
self.sheet_MP = self._open_sheet("MP_mTRIG_sASR")
ROW = self.template_indexes["MP_mTRIG_sASR"]
self._write_line_to_excel_sheet(self.sheet_MP,ROW,1,log_dict,EXCEL_MP_CMD_TEMPLATE)
self.template_indexes['MP_mTRIG_sASR']+=1
def get_new_wb_name(self):
return self.wb_name_with_dt
def _creat_new_excel_from_template_with_name_and_datetime(self,project_name):
program_path = os.path.dirname(sys.argv[0])
template_path = program_path + '\\template\exl.xlsx'
shutil.copy2(str(template_path), str(program_path))
date_time = datetime.datetime.strftime(datetime.datetime.now(), '_%Y-%m-%d__%H_%M_%S')
exl_file_name = str(project_name) + str(date_time) + ".xlsx"
os.rename("exl.xlsx", str(exl_file_name))
return str(exl_file_name)
def _write_line_to_excel_sheet(self,sheet,row,column,val_dict,template_list):
row = str(row)
start_col = column
for i, key in enumerate(template_list):
col = self._num_to_excel_alphabeit_colms(i+start_col)
try:
# sheet[col + row] = str(val_dict[key])
sheet[col + row] = val_dict[key]
except : print key
def _open_sheet(self,sheet_name):
sheet = self.wb.get_sheet_by_name(sheet_name)
return sheet
def _num_to_excel_alphabeit_colms(self,index_num):
cal1 = index_num % 27
cal2 = index_num // 26
new = index_num - cal2 * 26
if new == 0:
new = 26
cal2 -= 1
if cal2:
mychar = chr(cal2 + 64) + chr(new + 64)
else:
mychar = chr(index_num + 64)
return mychar
def save_workbook(self):
self.wb.save(str(self.wb_name_with_dt))
| [
"[email protected]"
] | |
92929d241384233660875a5731e7b8bdb4618600 | 75b1f503e695dd5251e00b6bd66207b99c9c83ff | /caesar_cipher.py | f4a48db54a62b7b6068e748444f02a88f468a015 | [] | no_license | rongoodbin/secret_messages | 2d3a4881b4f06a88ba777832eb1ae59202fb3725 | ff91786d4ef4f467e9a95c36df66b22641033424 | refs/heads/master | 2021-05-04T15:09:51.264542 | 2018-03-05T01:09:45 | 2018-03-05T01:09:45 | 120,221,827 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,324 | py | import string
from ciphers import Cipher
class Caesar(Cipher):
FORWARD = string.ascii_uppercase * 3
def __init__(self, keyword=None, offset=3):
self.offset = offset
self.FORWARD = string.ascii_uppercase + string.ascii_uppercase[:self.offset+1]
self.BACKWARD = string.ascii_uppercase[:self.offset+1] + string.ascii_uppercase
def encrypt(self, text):
output = []
text = text.upper()
for char in text:
try:
index = self.FORWARD.index(char)
except ValueError:
output.append(char)
else:
output.append(self.FORWARD[index+self.offset])
return ''.join(output)
def decrypt(self, text):
output = []
text = text.upper()
for char in text:
try:
index = self.BACKWARD.index(char)
except ValueError:
output.append(char)
else:
output.append(self.BACKWARD[index-self.offset])
return ''.join(output)
if __name__ == "__main__":
atbash = Caesar()
encrypted_text = atbash.encrypt("testing this code! 2pm")
print(encrypted_text)
decrypted_text = atbash.decrypt(encrypted_text)
print(decrypted_text)
| [
"[email protected]"
] | |
087457541661af279dddac07823ebcb457b7ee3f | 1eacd671cf9c71f486bbaddabe7701caf0d5e1ff | /ironic_python_agent/config.py | 94a255f880b6927251a8a3b0bc097c8963a6368c | [
"Apache-2.0"
] | permissive | tyws/ipa-customize | 65e04be381b7c9b538c02603f4ceead0b25b0265 | 962c9e0b1f904fdc14c0ce542809b11b741d41fb | refs/heads/master | 2020-07-24T13:10:22.269466 | 2019-09-30T05:47:53 | 2019-09-30T05:47:53 | 207,939,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,954 | py | # Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from ironic_python_agent import inspector
from ironic_python_agent import netutils
from ironic_python_agent import utils
CONF = cfg.CONF
APARAMS = utils.get_agent_params()
cli_opts = [
cfg.StrOpt('api_url',
default=APARAMS.get('ipa-api-url'),
deprecated_name='api-url',
regex='^http(s?):\/\/.+',
help='URL of the Ironic API. '
'Can be supplied as "ipa-api-url" kernel parameter.'
'The value must start with either http:// or https://.'),
cfg.StrOpt('listen_host',
default=APARAMS.get('ipa-listen-host',
netutils.get_wildcard_address()),
sample_default='::',
deprecated_name='listen-host',
help='The IP address to listen on. '
'Can be supplied as "ipa-listen-host" kernel parameter.'),
cfg.IntOpt('listen_port',
default=int(APARAMS.get('ipa-listen-port', 9999)),
deprecated_name='listen-port',
help='The port to listen on. '
'Can be supplied as "ipa-listen-port" kernel parameter.'),
cfg.StrOpt('advertise_host',
default=APARAMS.get('ipa-advertise-host', None),
deprecated_name='advertise_host',
help='The host to tell Ironic to reply and send '
'commands to. '
'Can be supplied as "ipa-advertise-host" '
'kernel parameter.'),
cfg.IntOpt('advertise_port',
default=int(APARAMS.get('ipa-advertise-port', 9999)),
deprecated_name='advertise-port',
help='The port to tell Ironic to reply and send '
'commands to. '
'Can be supplied as "ipa-advertise-port" '
'kernel parameter.'),
cfg.IntOpt('ip_lookup_attempts',
default=int(APARAMS.get('ipa-ip-lookup-attempts', 3)),
deprecated_name='ip-lookup-attempts',
help='The number of times to try and automatically '
'determine the agent IPv4 address. '
'Can be supplied as "ipa-ip-lookup-attempts" '
'kernel parameter.'),
cfg.IntOpt('ip_lookup_sleep',
default=int(APARAMS.get('ipa-ip-lookup-timeout', 10)),
deprecated_name='ip-lookup-sleep',
help='The amount of time to sleep between attempts '
'to determine IP address. '
'Can be supplied as "ipa-ip-lookup-timeout" '
'kernel parameter.'),
cfg.StrOpt('network_interface',
default=APARAMS.get('ipa-network-interface', None),
deprecated_name='network-interface',
help='The interface to use when looking for an IP address. '
'Can be supplied as "ipa-network-interface" '
'kernel parameter.'),
cfg.IntOpt('lookup_timeout',
default=int(APARAMS.get('ipa-lookup-timeout', 300)),
deprecated_name='lookup-timeout',
help='The amount of time to retry the initial lookup '
'call to Ironic. After the timeout, the agent '
'will exit with a non-zero exit code. '
'Can be supplied as "ipa-lookup-timeout" '
'kernel parameter.'),
cfg.IntOpt('lookup_interval',
default=int(APARAMS.get('ipa-lookup-interval', 1)),
deprecated_name='lookup-interval',
help='The initial interval for retries on the initial '
'lookup call to Ironic. The interval will be '
'doubled after each failure until timeout is '
'exceeded. '
'Can be supplied as "ipa-lookup-interval" '
'kernel parameter.'),
cfg.FloatOpt('lldp_timeout',
default=APARAMS.get('ipa-lldp-timeout',
APARAMS.get('lldp-timeout', 30.0)),
help='The amount of seconds to wait for LLDP packets. '
'Can be supplied as "ipa-lldp-timeout" '
'kernel parameter.'),
cfg.BoolOpt('collect_lldp',
default=APARAMS.get('ipa-collect-lldp', False),
help='Whether IPA should attempt to receive LLDP packets for '
'each network interface it discovers in the inventory. '
'Can be supplied as "ipa-collect-lldp" '
'kernel parameter.'),
cfg.BoolOpt('standalone',
default=APARAMS.get('ipa-standalone', False),
help='Note: for debugging only. Start the Agent but suppress '
'any calls to Ironic API. '
'Can be supplied as "ipa-standalone" '
'kernel parameter.'),
cfg.StrOpt('inspection_callback_url',
default=APARAMS.get('ipa-inspection-callback-url'),
help='Endpoint of ironic-inspector. If set, hardware inventory '
'will be collected and sent to ironic-inspector '
'on start up. '
'Can be supplied as "ipa-inspection-callback-url" '
'kernel parameter.'),
cfg.StrOpt('inspection_collectors',
default=APARAMS.get('ipa-inspection-collectors',
inspector.DEFAULT_COLLECTOR),
help='Comma-separated list of plugins providing additional '
'hardware data for inspection, empty value gives '
'a minimum required set of plugins. '
'Can be supplied as "ipa-inspection-collectors" '
'kernel parameter.'),
cfg.IntOpt('inspection_dhcp_wait_timeout',
default=APARAMS.get('ipa-inspection-dhcp-wait-timeout',
inspector.DEFAULT_DHCP_WAIT_TIMEOUT),
help='Maximum time (in seconds) to wait for the PXE NIC '
'(or all NICs if inspection_dhcp_all_interfaces is True) '
'to get its IP address via DHCP before inspection. '
'Set to 0 to disable waiting completely. '
'Can be supplied as "ipa-inspection-dhcp-wait-timeout" '
'kernel parameter.'),
cfg.BoolOpt('inspection_dhcp_all_interfaces',
default=APARAMS.get('ipa-inspection-dhcp-all-interfaces',
False),
help='Whether to wait for all interfaces to get their IP '
'addresses before inspection. If set to false '
'(the default), only waits for the PXE interface. '
'Can be supplied as '
'"ipa-inspection-dhcp-all-interfaces" '
'kernel parameter.'),
cfg.IntOpt('hardware_initialization_delay',
default=APARAMS.get('ipa-hardware-initialization-delay', 0),
help='How much time (in seconds) to wait for hardware to '
'initialize before proceeding with any actions. '
'Can be supplied as "ipa-hardware-initialization-delay" '
'kernel parameter.'),
cfg.IntOpt('disk_wait_attempts',
default=APARAMS.get('ipa-disk-wait-attempts', 10),
help='The number of times to try and check to see if '
'at least one suitable disk has appeared in inventory '
'before proceeding with any actions. '
'Can be supplied as "ipa-disk-wait-attempts" '
'kernel parameter.'),
cfg.IntOpt('disk_wait_delay',
default=APARAMS.get('ipa-disk-wait-delay', 3),
help='How much time (in seconds) to wait between attempts '
'to check if at least one suitable disk has appeared '
'in inventory. Set to zero to disable. '
'Can be supplied as "ipa-disk-wait-delay" '
'kernel parameter.'),
cfg.BoolOpt('insecure',
default=APARAMS.get('ipa-insecure', False),
help='Verify HTTPS connections. Can be supplied as '
'"ipa-insecure" kernel parameter.'),
cfg.StrOpt('cafile',
help='Path to PEM encoded Certificate Authority file '
'to use when verifying HTTPS connections. '
'Default is to use available system-wide configured CAs.'),
cfg.StrOpt('certfile',
help='Path to PEM encoded client certificate cert file. '
'Must be provided together with "keyfile" option. '
'Default is to not present any client certificates to '
'the server.'),
cfg.StrOpt('keyfile',
help='Path to PEM encoded client certificate key file. '
'Must be provided together with "certfile" option. '
'Default is to not present any client certificates to '
'the server.'),
]
CONF.register_cli_opts(cli_opts)
def list_opts():
return [('DEFAULT', cli_opts)]
| [
"[email protected]"
] | |
ef6cd9dcf7e940c06b7afc339d98f4454ab71b4f | 421c9b869f4391a08a216df784a7c36b8b666557 | /spy.py | a3094bcf5d05106f9d3b7990cef14725c29a9f04 | [] | no_license | Pramod37/spychatcode | 96d6de810010f4f9c9ac2690577442b2e8c7aec9 | 2bdd688e3e0736e229824111a9c8aa4d1bd41f71 | refs/heads/master | 2021-01-11T05:28:34.360188 | 2017-06-22T07:31:11 | 2017-06-22T07:31:11 | 95,031,615 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,650 | py | from details import spy, friends,ChatMessage,Spy
from steganography.steganography import Steganography
from datetime import datetime
status_message = ['on work','updating....','on mood to learn']
print 'Hello let\s get started'
existing = raw_input(" Do You Want continue as " + spy.salutation + " " + spy.name + " (Y/N)? ").upper()
def add_status(current_status_message) :
updated_status_message = None
if current_status_message != None :
print 'your current status message is %s \n' % (current_status_message)
else :
print 'you don\'t have any status message..\n'
default = raw_input("do you want to select from the older status message(y/n)? Or want to write new?(n)")
if default.upper() == "N" :
new_status_message = raw_input("what stauts do you want to set?")
if len(new_status_message) > 0:
status_message.append(new_status_message)
updated_status_message = new_status_message
if updated_status_message.isspace():
print 'you don\'t have any status..'
else:
updated_status_message = updated_status_message.strip()
print updated_status_message
elif default.upper() == 'Y' :
item_position = 1
for message in status_message :
print '%d. %s' % (item_position, message)
item_position = item_position + 1
message_selection = int(raw_input("\n choose from the above message"))
if len(status_message) >= message_selection :
updated_status_message = status_message[message_selection - 1]
else:
print 'the option you choose not available'
if updated_status_message:
print 'Your updated status message is: %s' % (updated_status_message)
else:
updated_status_message.startswith(" ")
print 'You current don\'t have a status update'
return updated_status_message
def add_friend() :
present_friend = spy('','',0,0.0)
present_friend.name = raw_input("please add your friend's name")
present_friend.salutation = raw_input("are they mr. or miss.?")
present_friend.name = present_friend.salutation + " " + present_friend.name
present_friend.age = raw_input("age?")
present_friend.age = int(present_friend.age)
present_friend.rating = raw_input("rating?")
present_friend.rating = float(present_friend.rating)
if len(present_friend.name) > 0 and present_friend.age >= 20 and present_friend.rating >= 2.0:
friends.append(present_friend)
print 'Friend Added!'
else:
print 'sorry! unable to add..invalid entry!'
return len(friends)
def select_friend():
item_number = 0
for friend in friends:
print '%d %s with age %d with rating %.2f is online' % (item_number + 1, friend.name,
friend.age,
friend.rating)
item_number = item_number + 1
friend_choice = raw_input("Choose from your friends")
friend_choice_position = int(friend_choice) - 1
return friend_choice_position
def send_message():
friend_choice = select_friend()
original_image = raw_input("What is the name of image?")
output_path = "output.jpg "
text = raw_input("what do you want to say? ")
Steganography.encode(original_image , output_path, text)
new_chat = ChatMessage(text,True)
friends[friend_choice].chats.append(new_chat)
print "Your secret message image is ready!"
def read_message():
sender = select_friend()
output_path = raw_input("What is the name of the file?")
secret_text = Steganography.decode(output_path)
new_chat = ChatMessage(secret_text,False)
friends[sender].chats.append(new_chat)
print "Your secret message has been saved!"
def read_chat_history():
read_for = select_friend()
print '\n5'
for chat in friends[read_for].chats:
if chat.sent_by_me:
print '[%s] %s: %s' % (chat.time.strftime("%d %B %Y"), 'You said:', chat.message)
else:
print '[%s] %s said: %s' % (chat.time.strftime("%d %B %Y"), friends[read_for].name, chat.message)
def start_chat(spy) :
current_status_message = None
spy.name = spy.salutation + " " + spy.name
if spy.age >=20 and spy.age <=50 :
print "Authentication Complete. Welcome " + spy.name + " age: " + str(spy.age) + " and rating of spy:" + str(
spy.rating) \
+ " Proud to Have You onboard.."
show_menu = True
while show_menu :
menu_choices = "What do you want to do?\n 1. Add a Status\n 2. Add a Friend\n 3. Send a Secret Message\n 4. Read a Secret Message\n" \
" 5. Read chat history\n 6. show status \n 7. show friends list\n 8. exit apllication\n"
menu_choice = raw_input(menu_choices)
if len(menu_choice) > 0 :
menu_choice = int(menu_choice)
if menu_choice == 1 :
print 'you choose to Status Update'
current_status_message = add_status(current_status_message)
elif menu_choice == 2 :
print 'you can add a friend now!'
number_of_friends = add_friend()
print 'You have %d friends' % (number_of_friends)
elif menu_choice == 3 :
print 'you can send a secret message here!'
send_message()
elif menu_choice == 4 :
print 'you can read a secret message here!'
read_message()
elif menu_choice == 5 :
print 'Your chat history'
read_chat_history()
elif menu_choice == 6:
print 'your staus message here!\n'
if current_status_message.startswith(" "):
print 'you don\'t have status.. '
elif current_status_message.isspace():
print'you don\'t have any status..'
else:
current_status_message = add_status(current_status_message)
elif menu_choice == 7 :
print 'your friends are..\n'
for i in friends:
print i.name
elif menu_choice == 8 :
exit()
else :
show_menu = False
else:
print 'sorry You are not eligible to be a spy'
if existing == "Y":
start_chat(spy)
else:
spy = Spy('','',0,0.0)
spy.name = raw_input("welcome to spy chat,tou need to tell your name first:")
if len (spy.name) > 0:
spy.salutation = raw_input("Should I call you Mr. or Ms.?: ")
spy.age = int(raw_input("What is your Age?"))
spy.age = int(spy.age)
spy.rating = float(raw_input("what is your rating:"))
if spy.rating >= 4.5:
print "wow! Great Ace."
elif spy.rating >= 4.0 and spy.rating < 4.5 :
print "you are good."
elif spy.rating >= 3.0 and spy.rating < 4.0 :
print "you can do better."
else:
print 'We can always need to help in Office..'
spy_rating = float(spy.rating)
spy_is_online = True
start_chat(spy)
else :
print "A Spy needs a valid Name!"
| [
"[email protected]"
] | |
c20a34f0a583217bc2954583f5023db885908a21 | 6dd08ec6b4f6351de8450a3d7e592fd6b4994119 | /cbase/server/cbase-1.8.1/testrunner/lib/cli_interface.py | e6a6f9806a3859205b951f3f754ca879f82d6278 | [
"Apache-2.0"
] | permissive | zhgwenming/appstack | d015e96b911fe318f9fba1bdeeea9d888d57dfba | 8fe6c1dfc2f5ed4a36c335e86ae28c17b3769276 | refs/heads/master | 2021-01-23T13:30:19.507537 | 2015-11-09T06:48:35 | 2015-11-09T06:48:35 | 7,576,644 | 1 | 2 | null | 2016-01-05T09:16:22 | 2013-01-12T15:13:21 | C | UTF-8 | Python | false | false | 6,194 | py | #!/usr/bin/env python
#
# Copyright 2010 Membase, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# PYTHONPATH needs to be set up to point to mc_bin_client
import os
import subprocess
DEF_USERNAME = "Administrator"
DEF_PASSWORD = "password"
DEF_KIND = "json"
DEF_MOXI_PORT = 11211
DEF_HTTP_PORT = 8091
DEF_RAMSIZE = 256
DEF_REPLICA = 1
CLI_EXE_LOC = "../membase-cli/membase"
SSH_EXE_LOC = "/opt/membase/bin/cli/membase"
class CLIInterface(object):
def __init__(self, server, http_port=DEF_HTTP_PORT, username=DEF_USERNAME, password=DEF_PASSWORD, kind=DEF_KIND, debug=False, ssh=False, sshkey=None):
self.server = server
self.http_port = http_port
self.username = username
self.password = password
self.kind = kind
self.debug = debug
self.ssh = ssh
self.sshkey = sshkey
if (debug):
self.acting_server_args = "-c %s:%d -u %s -p %s -o %s -d" % (self.server, self.http_port, self.username, self.password, self.kind)
else:
self.acting_server_args = "-c %s:%d -u %s -p %s -o %s" % (self.server, self.http_port, self.username, self.password, self.kind)
def server_list(self):
cmd = " server-list " + self.acting_server_args
return self.execute_command(cmd)
def server_info(self):
cmd = " server-info " + self.acting_server_args
return self.execute_command(cmd)
def server_add(self, server_to_add, rebalance=False):
if (rebalance):
cmd = " rebalance " + self.acting_server_args + " --server-add=%s:%d --server-add-username=%s --server-add-password=%s"\
% (server_to_add, self.http_port, self.username, self.password)
else:
cmd = " server-add " + self.acting_server_args + " --server-add=%s:%d --server-add-username=%s --server-add-password=%s"\
% (server_to_add, self.http_port, self.username, self.password)
return self.execute_command(cmd)
def server_readd(self, server_to_readd):
cmd = " server-readd " + self.acting_server_args + " --server-add=%s:%d --server-add-username=%s --server-add-password=%s"\
% (server_to_readd, self.http_port, self.username, self.password)
return self.execute_command(cmd)
def rebalance(self):
cmd = " rebalance " + self.acting_server_args
return self.execute_command(cmd)
def rebalance_stop(self):
cmd = " reblance-stop " + self.acting_server_args
return self.execute_command(cmd)
def rebalance_status(self):
cmd = " rebalance-status " + self.acting_server_args
return self.execute_command(cmd)
def failover(self, server_to_failover):
cmd = " failover " + self.acting_server_args + " --server-failover %s" % (server_to_failover)
return self.execute_command(cmd)
def cluster_init(self, c_username=DEF_USERNAME, c_password=DEF_PASSWORD, c_port=DEF_HTTP_PORT, c_ramsize=DEF_RAMSIZE):
cmd = " cluster-init " + self.acting_server_args\
+ " --cluster-init-username=%s --cluster-init-password=%s --cluster-init-port=%d --cluster-init-ramsize=%d"\
% (c_username, c_password, c_port, c_ramsize)
return self.execute_command(cmd)
def node_init(self, path):
cmd = " node-init " + self.acting_server_args + " --node-init-data-path=%s" % (path)
return self.execute_command(cmd)
def bucket_list(self):
cmd = " bucket-list " + self.acting_server_args
return self.execute_command(cmd)
def bucket_create(self, bucket_name, bucket_type, bucket_port, bucket_password="", bucket_ramsize=DEF_RAMSIZE, replica_count=DEF_REPLICA):
cmd = " bucket-create " + self.acting_server_args\
+ " --bucket=%s --bucket-type=%s --bucket-port=%d --bucket-password=%s --bucket-ramsize=%d --bucket-replica=%d"\
% (bucket_name, bucket_type, bucket_port, bucket_password, bucket_ramsize, replica_count)
return self.execute_command(cmd)
def bucket_edit(self, bucket_name, bucket_type, bucket_port, bucket_password, bucket_ramsize, replica_count):
cmd = " bucket-edit " + self.acting_server_args\
+ " --bucket=%s --bucket-type=%s --bucket-port=%d --bucket-password=%s --bucket-ramsize=%d --bucket-replica=%d"\
% (bucket_name, bucket_type, bucket_port, bucket_password, bucket_ramsize, replica_count)
return self.execute_command(cmd)
def bucket_delete(self, bucket_name):
cmd = " bucket-delete " + self.acting_server_args + " --bucket=%s" % (bucket_name)
return self.execute_command(cmd)
def bucket_flush(self):
return "I don't work yet :-("
def execute_command(self, cmd):
if (self.ssh):
return self.execute_ssh(SSH_EXE_LOC + cmd)
else:
return self.execute_local(CLI_EXE_LOC + cmd)
def execute_local(self, cmd):
rtn = ""
process = subprocess.Popen(cmd ,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdoutdata,stderrdata=process.communicate()
rtn += stdoutdata
return rtn
def execute_ssh(self, cmd):
rtn=""
if (self.sshkey == None):
process = subprocess.Popen("ssh root@%s \"%s\"" % (self.server,cmd),shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
else:
process = subprocess.Popen("ssh -i %s root@%s \"%s\"" % (self.sshkey, self.server, cmd),shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdoutdata,stderrdata=process.communicate()
rtn += stdoutdata
return rtn
| [
"[email protected]"
] | |
b10bd3e6fce28ba55ca234a9dcb7dd608cd4763a | 0de115b69243361e7926d0a5400c1fb475a642f5 | /4.5.4 CodingExercise2.py | 7769a572921fc132cf0a40d0db1879e526643fc9 | [] | no_license | Bill-Fujimoto/Intro-to-Python-Course | f475f1c578e33ac37a796038fdaa6ad247876c55 | afe365b0233c4fadb78b2818164ab5726ecd92bb | refs/heads/master | 2020-04-12T21:19:08.688112 | 2018-12-21T21:50:09 | 2018-12-21T21:50:09 | 162,759,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,977 | py | #Recall last exercise that you wrote a function, word_lengths,
#which took in a string and returned a dictionary where each
#word of the string was mapped to an integer value of how
#long it was.
#
#This time, write a new function called length_words so that
#the returned dictionary maps an integer, the length of a
#word, to a list of words from the sentence with that length.
#If a word occurs more than once, add it more than once. The
#words in the list should appear in the same order in which
#they appeared in the sentence.
#
#For example:
#
# length_words("I ate a bowl of cereal out of a dog bowl today.")
# -> {3: ['ate', 'dog', 'out'], 1: ['a', 'a', 'i'],
# 5: ['today'], 2: ['of', 'of'], 4: ['bowl'], 6: ['cereal']}
#
#As before, you should remove any punctuation and make the
#string lowercase.
#
#Hint: To create a new list as the value for a dictionary key,
#use empty brackets: lengths[wordLength] = []. Then, you would
#be able to call lengths[wordLength].append(word). Note that
#if you try to append to the list before creating it for that
#key, you'll receive a KeyError.
#Write your function here!
def length_words(string):
to_replace = ".,'!?"
for mark in to_replace:
string = string.replace(mark, "")
string=string.lower()
word_list=string.split()
len_words={}
for word in word_list:
if not len(word)in len_words:
len_words[len(word)] = []
len_words[len(word)].append(word)
return len_words
#Below are some lines of code that will test your function.
#You can change the value of the variable(s) to test your
#function with different inputs.
#
#If your function works correctly, this will originally
#print:
#{1: ['i', 'a', 'a'], 2: ['of', 'of'], 3: ['ate', 'out', 'dog'], 4: ['bowl', 'bowl'], 5: ['today'], 6: ['cereal']}
#
#The keys may appear in a different order, but within each
#list the words should appear in the order shown above.
print(length_words("I ate a bowl of cereal out of a dog bowl today."))
| [
"@vfr1200f1#"
] | @vfr1200f1# |
2695f532057b561bf9fbf9c8c1505f68f8c04fb4 | 5d03ee41677bbe4c37a873932f4e2ca63cb50df1 | /gae.sonstige.d/gae.mariahilferstrasse.d/gdata_samples.py | db7a574db198ef71ff3d35ffe6a27715b837f2a3 | [] | no_license | wolfhesse/saikogallery | 159acc1bab431070e8156da8d355e9e51ec0d4ac | f719f29be54d1e2190f3c841ddeeb58997aa555a | refs/heads/master | 2016-09-06T00:41:58.012920 | 2013-05-23T22:11:13 | 2013-05-23T22:11:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | import pickle
import gdata.spreadsheet.text_db
client = gdata.spreadsheet.text_db.DatabaseClient()
client.SetCredentials('wolfgang.schuessel','iybnrxaseld')
#client.SetCredentials('ohramweltgeschehen','kidman')
databases=client.GetDatabases(name='imported-from-query')
tables=databases[0].GetTables(name='mhs')
target=tables[0]
source=tables[1]
print 'target table is ' + target.name
print 'source table is ' + source.name
databases=client.GetDatabases(name='geo20080813')
db=databases[0]
tables=db.GetTables(name='')
table=tables[0]
records=table.GetRecords(1,100)
print [r.content for r in records]
print [r.content for r in records if r.content['pickled']!=None]
ap=[r.content['pickled'] for r in records]
print len(ap)
print ap
au=[pickle.loads(i) for i in ap]
print au
#['', '', {'test': 'true', 'name': 'show'}, '', {'hausnummer': 5, 'has_content': False}, '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', {'items': {'lokal': 'Asia Cooking'}, 'wifi': True}, '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
print len(au)
#50
for i in range(0,len(au)):
print i,au[i]
print records[30].content
#{'fundstelle': 'TRUE', 'hausnummer': '31', 'pickled': "(dp0\nS'items'\np1\n(dp2\nS'lokal'\np3\nS'Asia Cooking'\np4\nssS'wifi'\np5\nI01\ns.", 'address': 'mariahilferstrasse 31 wien', 'name': 'mhs:31'}
| [
"[email protected]"
] | |
010ca186f50f28bb57286f398c214119a7c6dfd3 | 4a439662a39631095c75c6a76b88ca3d18f3fad5 | /logisticRegression.py | 9dab37ff36c2176f59125b8e6cc7a8c824057c80 | [] | no_license | allen9408/ICDM_Features | 9cdee93526f776954b5d2610cb8ba4e3bb8ea52c | 293d49f106bb18d93b6a894a10ddd4f3b0fdd27f | refs/heads/master | 2020-12-03T00:15:04.507551 | 2017-07-16T20:06:43 | 2017-07-16T20:06:43 | 96,002,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,627 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
import os
from six.moves import urllib
import pandas as pd
import tensorflow as tf
from featureloader import featureloader
# load training features
train_data = featureloader('TRAIN', 'ECG5000')
df_train, feature_column = train_data.featureloader_UCR()
# df_train.to_csv('tmp_1.csv')
# load test training
test_data = featureloader('TEST', 'ECG5000')
df_test, feature_column = test_data.featureloader_UCR()
# df_test.to_csv('tmp_2.csv')
# remove \n in feature_column
feature_column[-1] = feature_column[-1].strip()
print(feature_column)
def input_fn(df, feature_column):
feature_cols = {k: tf.constant(df[k].values, shape=[df[k].size, 1]) for k in feature_column}
label = tf.constant(df["label"].values)
print(df["label"])
return feature_cols, label
def train_input_fn():
return input_fn(df_train, feature_column)
def eval_input_fn():
return input_fn(df_test, feature_column)
# crossed_columns = tf.contrib.layers.crossed_columns(feature_column)
index = 0
layer=[]
for feature in feature_column:
layer.append(tf.contrib.layers.real_valued_column(feature))
index+= 1
model_dir = tempfile.mkdtemp()
m = tf.contrib.learn.LinearClassifier(feature_columns=layer,
model_dir=model_dir)
# m = tf.contrib.learn.DNNClassifier(feature_columns=layer,
# model_dir=model_dir,
# hidden_units=[100,50])
m.fit(input_fn = train_input_fn, steps=200)
results = m.evaluate(input_fn=eval_input_fn, steps=1)
for key in sorted(results):
print("%s: %s" % (key, results[key])) | [
"[email protected]"
] | |
93ec5f04c17f0e8560d908d5e69d8182511e13bd | 443043c276f5c467db3c1af544f5c0aae53aea8b | /tests/test_helpers.py | a43f15f329ebcb5bd6e8fb32f08d40ee79cb2e09 | [
"MIT"
] | permissive | kurtjd/chesscorpy | ac6dda5bd4e3eb6901d525ea1d9411d6352b9903 | 127c4b1f4983f08c824970c04841071e7533fad9 | refs/heads/master | 2023-07-17T18:25:52.933288 | 2021-09-02T15:29:18 | 2021-09-02T15:29:18 | 388,607,838 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | from chesscorpy.helpers import get_player_colors, determine_player_colors
def test_get_player_colors():
assert get_player_colors(5, 5) == ('White', 'black')
assert get_player_colors(5, 2) == ('Black', 'white')
def test_determine_player_colors():
# TODO: Test 'random' color
assert determine_player_colors('white', 1, 2) == (1, 2)
assert determine_player_colors('black', 1, 2) == (2, 1)
| [
"[email protected]"
] | |
8c9f901a8df6c0267bbdc70f47a911e544131ccb | 9a59d7b8a23e848ba08941f293e1a9c97107e8f1 | /models/basic.py | 4a9078a823b08463d1926b548feba56025521290 | [] | no_license | KellerJordan/CapsNet-Adversarial | 23a6c965d0955e4686af5579183dc9fe2df553cc | 9f7b090c367f62249c23d7d2f378d558ad777052 | refs/heads/master | 2021-03-30T21:45:15.874058 | 2018-05-24T17:06:59 | 2018-05-24T17:06:59 | 124,352,303 | 19 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | import torch
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
# extremely simple network to do basic science with training methods
class BasicNetwork(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 100)
self.fc2 = nn.Linear(100, 10)
def forward(self, x):
x = x.view(-1, 784)
x = F.relu(self.fc1(x))
out = self.fc2(x)
return out
# simple CNN for experiments on CIFAR10
class KrizhevskyNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 64, 5)
self.pool1 = nn.MaxPool2d(3, 2)
self.conv2 = nn.Conv2d(64, 64, 5)
self.pool2 = nn.MaxPool2d(3, 2)
self.fc1 = nn.Linear(64*3*3, 384)
self.fc2 = nn.Linear(384, 192)
self.fc3 = nn.Linear(192, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool1(x)
x = F.relu(self.conv2(x))
x = self.pool2(x)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
logits = self.fc3(x)
return logits
| [
"[email protected]"
] | |
c7a6bbfb9e4f4606a0720e7f9c0efa56e7d90f30 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/DataQuality/DataQualityConfigurations/python/TCTDisplay.py | 6fa11e45427f043ea1f2b19da409200372d1fc14 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,330 | py | # Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
from DataQualityUtils.DQWebDisplayConfig import DQWebDisplayConfig
dqconfig = DQWebDisplayConfig()
dqconfig.config = "TCT"
dqconfig.hcfg = "/afs/cern.ch/user/a/atlasdqm/dqmdisk/tier0/han_config/Collisions/collisions_run.1.41.hcfg"
dqconfig.hcfg_min10 = "/afs/cern.ch/user/a/atlasdqm/dqmdisk/tier0/han_config/Collisions/collisions_minutes10.1.9.hcfg"
dqconfig.hcfg_min30 = "/afs/cern.ch/user/a/atlasdqm/dqmdisk/tier0/han_config/Collisions/collisions_minutes30.1.5.hcfg"
dqconfig.hanResultsDir = "/afs/cern.ch/atlas/offline/external/FullChainTest/tier0/dqm/han_results"
dqconfig.htmlDir = "/afs/cern.ch/atlas/offline/external/FullChainTest/tier0/dqm/www"
dqconfig.htmlWeb = "http://atlas-project-fullchaintest.web.cern.ch/atlas-project-FullChainTest/tier0/dqm/www"
dqconfig.runlist = "runlist_TCT.xml"
dqconfig.indexFile = "results_TCT.html"
dqconfig.lockFile = "DQWebDisplay_TCT.lock"
dqconfig.dbConnection = "sqlite://;schema=MyCOOL_histo.db;dbname=OFLP200"
dqconfig.dqmfOfl = "/GLOBAL/DETSTATUS/DQMFOFL"
dqconfig.dbConnectionHisto = "sqlite://;schema=MyCOOL_histo.db;dbname=OFLP200"
dqconfig.dqmfOflHisto = "/GLOBAL/DETSTATUS/DQMFOFLH"
dqconfig.dbTagName = "DetStatusDQMFOFL-TCT"
| [
"[email protected]"
] | |
1533905896294b79dff04e1b69b2cda7c0496874 | fa1dc1d0d2a169326c97dab863e15403bbd6bdbd | /CS486-686_A2Q2ANN.py | c52223b2857731732b02c8b7a75ccd93868316f2 | [
"MIT"
] | permissive | mojivalipour/nnscratch | f07b893f7ac9792f5c9bb8e8ca5c664e392b6786 | 5e0b7f100d1057fab2c166df5696163634acd726 | refs/heads/master | 2022-11-18T11:43:15.553593 | 2020-07-17T05:19:10 | 2020-07-17T05:19:10 | 271,581,705 | 3 | 8 | null | null | null | null | UTF-8 | Python | false | false | 21,331 | py | #!/usr/bin/env python
# coding: utf-8
# Design and Programming by Lead TA: Mojtaba Valipour @ Data Analytics Lab - UWaterloo.ca
# COURSE: CS 486/686 - Artificial Intelligence - University of Waterloo - Spring 2020 - Alice Gao
# Please let me know if you find any bugs in the code: [email protected]
# The code will be available at https://github.com/mojivalipour/nnscratch
# Version: 0.9.0
# Implement a neural network from scratch
''' Sources:
- http://neuralnetworksanddeeplearning.com/chap2.html
'''
print('Life is easy, you just need to do your best to find your place!')
# Libraries
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from sklearn import datasets
from sklearn.manifold import TSNE # visualization for data with more than two features
from os import path
import pandas as pd
import csv
import copy
import random
# Helper functions
def fixSeed(seed=1010):
np.random.seed(seed)
random.seed(seed)
# The hyper-parameters for the neural network
nSamples = None # use None if you want to use full sample size
# frogsSmall is the same dataset in Q1 that you have to use for comparision
dataset = '2moons' # 2moons/frogsSmall/frogs
noise = 0.05 # Noise in artificial datasets
visNumSamples = 500 # number of samples to visualize
# for regression, we use mean squared error.
# for classification, we use cross entropy.
# for now only mse is supported!
lossFunction = 'mse'
gdMethod = 'batch' # batch gradient descent method
batchSize = 64 # only for minibatch gradient descent
numEpochs = 200 # number of epochs
learningRate = [0.5,0.05,0.005] # learning rates
# for now only relu and sigmoid is supported
lastActivationFunc = 'sigmoid' # relu/sigmoid/softmax
# last layer activation function, this one is important
# because we need to use it for classification later
crossValidationFlag = True # if you like to run cross validation, set this flag to True
kFold = 3 # k-fold cross validation, at least need to be 2
seed = 6565 # Do not change the seed for Assignment
fixSeed(seed=seed) # fix the seed of random generator to make sure comparision is possible
# Some Useful Notes for those students who are interested to know more:
'''
- Neural networks are prone to overfitting. Increasing the number of parameters
could lead to models that have complexity bigger than data.
- Regularization, Normalization and Dropout are popular solutions to overfitting!
- In a neural network, we usually use the softmax function as last layer
activation for multi-class classification and sigmoid for single class
classification.
- For regression problems, we usually use Relu as last layer activation function
and MSE as the loss function that we want to minimize.
- Cross-entropy is the most useful loss function for multi-class classification.
- Sometimes we need to use multiple neurons in the output layer, which means
that we consider a neuron for each class. In this case, we need to use
one-hot vectors to encode the labels.
- Weight initialization is important! Gradient descent is not robust to
weight initialization! Xavier initialization is the most popular method
to initialize weights in neural networks.
'''
# Load data
colorBox = ['#377eb8','#FA0000','#344AA7', '#1EFA39','#00FBFF','#C500FF','#000000','#FFB600']
if dataset == '2moons':
nSamples = 1000 if nSamples is None else nSamples
X,y = datasets.make_moons(n_samples=nSamples, noise=noise, random_state=seed)
numSamples, numFeatures, numClasses = X.shape[0], X.shape[1], 2
# shuffle X,y
idxList = list(range(nSamples))
random.shuffle(idxList) # inplace
X, y = X[idxList,:], y[idxList]
elif dataset == 'frogsSmall' or dataset == 'frogs':
if dataset == 'frogs':
# original dataset
name = 'Frogs_MFCCs.csv'
else:
# a small subset of frogs original dataset, same as A2Q1
name = 'frogs-small.csv'
# check if we already have the file in the directory
if not path.isfile(name):
# otherwise ask user to upload it
print("Please put this {} file in the current directory using choose files ...".format(name))
# just load the csv file
X = pd.read_csv(name, sep=',')
X["Family"] = X["Family"].astype('category')
X["FamilyCat"] = X["Family"].cat.codes # added to the last column
X, y = X.iloc[:,0:22].to_numpy(), X.iloc[:,-1].to_numpy()
nSamples = X.shape[0] if nSamples is None else nSamples
X, y = X[:nSamples,:], y[:nSamples] # filter number of samples
numSamples, numFeatures, numClasses = X.shape[0], X.shape[1], len(np.unique(y))
print('#INFO: N (Number of Samples): {}, D (Number of Features): {}, C (Number of Classes): {}'.format(numSamples, numFeatures, numClasses))
plt.figure()
# if y min is not zero, make it zero
y = y - y.min()
assert y.min() == 0
# sample required sample for visualization
indices = list(range(numSamples))
selectedIndices = np.random.choice(indices, visNumSamples)
colors = [colorBox[y[idx]] for idx in selectedIndices]
if numFeatures == 2:
XR = X[selectedIndices, :]
else:
# use tsne to reduce dimensionality for visualization
XR = TSNE(n_components=2).fit_transform(X[selectedIndices,:])
plt.scatter(XR[:, 0], XR[:, 1], s=10, color=colors)
plt.savefig('dataset.png')
if len(y.shape) < 2:
y = np.expand_dims(y,-1) # shape of y should be N x 1
# Define the network structure
# # 2-Layer Network
# config = {
# # Layer Name: [Number of Nodes (in and out), Bias, Activation Function]
# 'Hidden Layer 0': [[numFeatures, 30], True, 'relu'], # w1
# 'Fully Connected': [[30, 1], True, lastActivationFunc] # w2
# }
# overfit network example
config = {
# Layer Name: [Number of Nodes (in and out), Bias, Activation Function]
'Hidden Layer 0': [[numFeatures, 1000], True, 'sigmoid'], # w1
'Fully Connected': [[1000, 1], True, lastActivationFunc] # w2
}
# 3-Layer Network
# config = {
# # Layer Name: [Number of Nodes (in and out), Bias, Activation Function]
# 'Hidden Layer 0': [[numFeatures, 3], True, 'sigmoid'], # w1
# 'Hidden Layer 1': [[3, 5], True, 'sigmoid'], # w2
# 'Fully Connected': [[5, 1], True, lastActivationFunc] # w2
# }
# 4-layer Network
# config = {
# # Layer Name: [Number of Nodes (in and out), Bias, Activation Function]
# 'Hidden Layer 0': [[numFeatures, 100], True, 'relu'], # w1
# 'Hidden Layer 1': [[100, 50], True, 'relu'], # w2
# 'Hidden Layer 2': [[50, 5], True, 'relu'], # w3
# 'Fully Connected': [[5, 1], True, lastActivationFunc] # w4
# }
# Fully Connected Neural Network Class
class neuralNetwork():
# initializing network
def __init__(self, config=None, numClass=2, learningRate=0.005,
numEpochs=10, batchSize= 64, lossFunction='mse'):
self.config = config
self.configKeyList = list(self.config.keys())
self.lossFunction = lossFunction
self.numLayers = len(self.config)
self.layers = {}
self.layerShapes = {}
self.learningRate = learningRate
self.numEpochs = numEpochs
self.loss = []
self.lossT = []
self.acc = []
self.accT = []
self.batchSize = batchSize
self.numClass = numClass
self.initWeights()
# random init
def initWeights(self):
self.loss = []
self.lossT = []
self.acc = []
self.accT = []
if self.config != None:
for key in config:
# w is parameters, b is bias, a is activation function
self.layers[key] = {'W':np.random.randn(self.config[key][0][0],
self.config[key][0][1])/np.sqrt(self.config[key][0][1]),
'b':np.random.randn(self.config[key][0][1],
) if self.config[key][1]==True else [], 'a':self.config[key][2]}
# keep track of shape only for better understanding
self.layerShapes[key] = {'IS':self.config[key][0][0],'OS':self.config[key][0][1],
'NP':np.prod(self.layers[key]['W'].shape)+len(self.layers[key]['b'])}
else:
raise '#Err: Make sure you set a configuration correctly!'
# activation functions
def relu(self, X):
return np.maximum(0, X)
def sigmoid(self, X):
#TODO: fix the overflow problem in Numpy exp function
return 1./(1. + np.exp(-X))
def activationFunc(self, X, type='sigmoid'):
if type == 'sigmoid':
return self.sigmoid(X)
elif type == 'relu':
return self.relu(X)
elif type == 'None':
return X # do nothing
else:
raise '#Err: Not implemented activation function!'
# objective/loss/cost functions
def mse(self, y, yPred): # mean square error
return np.mean(np.power(y-yPred,2))
def lossFunc(self, y, yPred, type='mse'):
if type == 'mse':
return self.mse(y, yPred)
else:
raise '#Err: Not implemented objective function!'
# back-propagation learning
# forward pass
def forward(self, X):
# apply a(W.T x X + b) for each layer
for key in config:
#print(X.shape, self.layers[key]['W'].shape)
# save input of each layer for backward pass
self.layers[key]['i'] = X
z = np.dot(X, self.layers[key]['W'])
z = z + self.layers[key]['b'] if len(self.layers[key]['b'])!=0 else z
# save middle calculation for backward pass
self.layers[key]['z'] = z
X = self.activationFunc(z, type=self.layers[key]['a'])
# save middle calculation for backward pass
self.layers[key]['o'] = X
return X # yPred
# backward pass
def backward(self, y, yPred):
# derivative of sigmoid
def sigmoidPrime(x):
return self.sigmoid(x) * (1-self.sigmoid(x))
# derivative of relu
def reluPrime(x):
return np.where(x <= 0, 0, 1)
def identity(x):
return x
#TODO: It's not necessary to use double for,
# it is possible to implement faster and more efficient version
# for each parameter (weights and bias) in each layer
for idx, key in enumerate(config):
# calculate derivatives
if self.layers[key]['a'] == 'sigmoid':
fPrime = sigmoidPrime
elif self.layers[key]['a'] == 'relu':
fPrime = reluPrime
elif self.layers[key]['a'] == 'softmax':
fPrime = softmaxPrime
else: # None
fPrime = identity
deWRTdyPred = -(y-yPred) if self.lossFunction == 'mse' else 1 # de/dyPred
# print('de/dy')
# dyPred/dyPredBeforeActivation # in case of sigmoid g(x) x (1-g(x))
dyPredWRTdyPredPre = fPrime(self.layers[self.configKeyList[-1]]['o'])
# print('dy/dz')
# element wise multiplication/ hadamard product
delta = np.multiply(deWRTdyPred, dyPredWRTdyPredPre)
for idxW in range(len(config),idx,-1): # reverse
if idxW-1 == idx:
# calculating the derivative for the last one is different
# because it is respected to that specific weight
#print('\nWeights of layer',idx)
deltaB = delta
dxWRTdW = self.layers[key]['i'].T # dxWRTdW
delta = np.dot(dxWRTdW,delta)
#print('dz/dw')
else:
# this loop is depended to the number of layers in the configuration
# print('\nWeights of layer',idxW-1)
# the weights of current layer
# how fast the cost is changing as a function of the output activation
dxWRTdh = self.layers[self.configKeyList[idxW-1]]['W'].T # dxPreWRTdx-1
# print('dz/da')
# print('output of layer',idxW-1-1)
# the output of previous layer
# how fast the activation function is changing
dhWRTdhPre = fPrime(self.layers[self.configKeyList[idxW-1-1]]['o']) # dx-1WRTdx-1Pre
# print('da/dz')
delta = np.dot(delta, dxWRTdh) * dhWRTdhPre
# sanity check: Numerical Gradient Checking
# f'(x) = lim (f(x+deltax)-f(x))/deltax when deltax -> 0
# update parameters
# W = W - Gamma * dL/dW
self.layers[key]['djWRTdw'] = delta
self.layers[key]['W'] = self.layers[key]['W'] - self.learningRate/y.shape[0] * delta
# b = b - Gamma * dL/db
self.layers[key]['djWRTdb'] = deltaB
if len(self.layers[key]['b'])!=0:
self.layers[key]['b'] = self.layers[key]['b'] - self.learningRate/y.shape[0] * np.sum(deltaB, axis=0)
# Utility Functions
def summary(self, space=20):
print('{: <{}} | {: <{}} | {: <{}} | {: <{}}'.format("Layer Name", space,
"Input Shape", space,
"Output Shape", space,
"Number of Parameters",space))
for key in config:
print('{: <{}} | {: <{}} | {: <{}} | {: <{}}'.format(key, space,
self.layerShapes[key]['IS'], space,
self.layerShapes[key]['OS'], space,
self.layerShapes[key]['NP'], space))
def fit(self, X, y, XT=None, yT=None, method='batch', batchSize=None, numEpochs=None,
learningRate=None, initialState=None):
if numEpochs is None: # overwrite
numEpochs = self.numEpochs
if learningRate is not None:
self.learningRate = learningRate
if batchSize is not None:
self.batchSize = batchSize
# if initialState is not None:
# # use the given initial parameters (weights and bias)
# self.layers = initialState
if method == 'batch':
# this is infact mini-batch gradient descent, just for consistency in course material
# same as batched gradient descent in class to make it easier for you
pBar = tqdm(range(numEpochs))
for edx in pBar:
for idx in range(0, X.shape[0], self.batchSize):
start = idx
end = start + self.batchSize
end = end if end < X.shape[0] else X.shape[0]
#TODO: Support variable batchsize
if end-start != self.batchSize:
continue
x_, y_ = X[start:end, :], y[start:end, :]
yPred = self.forward(x_)
loss = self.lossFunc(y_, yPred, type=self.lossFunction)
self.backward(y_, yPred)
yPred,yPredOrig = self.predict(X)
loss = self.lossFunc(y, yPredOrig, type=self.lossFunction)
self.loss.append(loss)
acc = self.accuracy(y, yPred)
self.acc.append(acc)
if XT is not None:
yPred, yPredOrig = self.predict(XT)
loss = self.lossFunc(yT, yPredOrig, type=self.lossFunction)
self.lossT.append(loss)
acc = self.accuracy(yT, yPred)
self.accT.append(acc)
else:
raise '#Err: {} Gradient Descent Method is Not implemented!'.format(method)
def predict(self, X):
yPred = self.forward(X)
yPredOrigin = copy.deepcopy(yPred)
# last layer activation function, class prediction should be single
# and the output is between zero and one
if self.config[self.configKeyList[-1]][-1] == 'sigmoid':
yPred[yPred < 0.5] = 0
yPred[yPred >= 0.5] = 1
# multi-class problem
elif self.config[self.configKeyList[-1]][-1] == 'softmax':
raise '#Err: Prediction is not supported for softmax yet!'
# single/multi class problem, single node and it can be anything greater than 0
elif self.config[self.configKeyList[-1]][-1] == 'relu':
yPred = np.round(yPred)
yPred = np.clip(yPred, 0, self.numClass-1) # sanity check
return yPred, yPredOrigin
def error(self, y, yPred):
return self.lossFunc(y, yPred, type=self.lossFunction)
def accuracy(self, y, yPred):
return 100*np.sum(y==yPred)/y.shape[0]
def plotLoss(self, loss=None, ax=None):
if loss is None:
loss = self.loss
if ax is None:
plt.plot(loss)
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Loss Per Epoch")
plt.show()
else:
ax.plot(loss)
ax.set_xlabel("Epochs")
ax.set_ylabel("Loss")
ax.set_title("Loss Per Epoch")
def crossValidationIndices(self, index, k=5):
# index is a list of indexes
cvList = []
for idx in range(k): # iterate over k-folds
interval = int(len(index)/k)
start = idx * interval
end = start + interval
testIndexes = list(range(start,end))
trainIndexes = list(range(0,start)) + list(range(end,len(index)))
cvList.append((trainIndexes, testIndexes))
return cvList
if crossValidationFlag:
if len(learningRate) == 1:
fig, ax = plt.subplots(3,len(learningRate),figsize=(8,15))
else:
fig, ax = plt.subplots(3,len(learningRate),figsize=(30,3*(len(learningRate)+2)))
else:
fig, ax = plt.subplots(1,1+len(learningRate),figsize=(30,1+len(learningRate)))
for ldx, lr in enumerate(learningRate):
nn = neuralNetwork(config=config, numClass=numClasses, numEpochs=numEpochs,
learningRate=lr, lossFunction=lossFunction)
# Initialize the network and the weights
nn.initWeights()
if crossValidationFlag:
indexes = list(range(X.shape[0]))
cvIndices = nn.crossValidationIndices(indexes, k=kFold)
accList = []
accTList = []
lossList = []
lossTList = []
for k in range(kFold):
nn.initWeights()
XTrain, yTrain = X[cvIndices[k][0],:], y[cvIndices[k][0],:]
XTest, yTest = X[cvIndices[k][1],:], y[cvIndices[k][1],:]
# Train the network
nn.fit(XTrain, yTrain, XTest, yTest, method=gdMethod, batchSize=batchSize,
numEpochs=numEpochs, learningRate=lr)
accList.append(nn.acc)
accTList.append(nn.accT)
lossList.append(nn.loss)
lossTList.append(nn.lossT)
acc = np.mean(accList, axis=0)
accT = np.mean(accTList, axis=0)
loss = np.mean(lossList, axis=0)
lossT = np.mean(lossTList, axis=0)
# print the network structure
nn.summary()
yPred, yPredOrig = nn.predict(X)
print('#INFO: Mean squared error is {}'.format(nn.error(y,yPred)))
colors = [colorBox[int(yPred[idx])] for idx in selectedIndices]
if len(learningRate) == 1:
ax[2].scatter(XR[:, 0], XR[:, 1], s=10, color=colors)
ax[2].set_xlabel("X1")
ax[2].set_ylabel("X2")
ax[2].set_title("Data, LR: {}".format(lr))
ax[0].plot(acc)
ax[0].plot(accT)
ax[0].legend(['Train','Test'])
ax[0].set_xlabel("Epochs")
ax[0].set_ylabel("Accuracy")
ax[0].set_title("Accuracy Per Epoch"+", LR: {}".format(lr))
ax[1].plot(loss)
ax[1].plot(lossT)
ax[1].legend(['Train','Test'])
ax[1].set_xlabel("Epochs")
ax[1].set_ylabel("Loss")
ax[1].set_title("Loss Per Epoch"+", LR: {}".format(lr))
else:
ax[2,ldx].scatter(XR[:, 0], XR[:, 1], s=10, color=colors)
ax[2,ldx].set_xlabel("X1")
ax[2,ldx].set_ylabel("X2")
ax[2,ldx].set_title("Data, LR: {}".format(lr))
ax[0,ldx].plot(acc)
ax[0,ldx].plot(accT)
ax[0,ldx].legend(['Train','Test'])
ax[0,ldx].set_xlabel("Epochs")
ax[0,ldx].set_ylabel("Accuracy")
ax[0,ldx].set_title("Accuracy Per Epoch"+", LR: {}".format(lr))
ax[1,ldx].plot(loss)
ax[1,ldx].plot(lossT)
ax[1,ldx].legend(['Train','Test'])
ax[1,ldx].set_xlabel("Epochs")
ax[1,ldx].set_ylabel("Loss")
ax[1,ldx].set_title("Loss Per Epoch"+", LR: {}".format(lr))
else:
# Perform a single run for visualization.
nn.fit(X, y, method=gdMethod, batchSize=batchSize, numEpochs=numEpochs,
learningRate=lr)
# print the network structure
nn.summary()
yPred, yPredOrig = nn.predict(X)
print('#INFO: Mean squared error is {}'.format(nn.error(y,yPred)))
colors = [colorBox[int(yPred[idx])] for idx in selectedIndices]
ax[ldx+1].scatter(XR[:, 0], XR[:, 1], s=10, color=colors)
ax[ldx+1].set_xlabel("X1")
ax[ldx+1].set_ylabel("X2")
ax[ldx+1].set_title("LR: {}".format(lr))
# Plot the mean squared error with respect to the nu
nn.plotLoss(ax=ax[0])
# train accuracy
acc = nn.accuracy(y.squeeze(-1),yPred.squeeze(-1))
print('#INFO: Train Accuracy is {}'.format(acc))
if not crossValidationFlag:
ax[0].legend(["LR: "+str(lr) for lr in learningRate])
# please feel free to save subplots for a better report
fig.savefig('results.png')
| [
"[email protected]"
] | |
3d97346bdf439f2d34bb79bcaaf9889159184176 | 46128392d3fc39d4fb75f07ac0b37234c9628644 | /models/vgg.py | 122ca008f5022af7313548698e8c80a8aa89a742 | [
"MIT"
] | permissive | Light-Alex/Multi-Style-Transfer | 494f1ac8c17a0cbd89eb73658ae9af0c663141a0 | 7d151108cc90a0abeffd2812c3950f516f39d932 | refs/heads/master | 2022-11-23T20:54:23.277987 | 2020-07-28T07:09:55 | 2020-07-28T07:09:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,307 | py | import mxnet as mx
from mxnet.gluon import nn, HybridBlock, Parameter
from mxnet.initializer import Xavier
class Vgg16(HybridBlock):
def __init__(self):
super(Vgg16, self).__init__()
self.conv1_1 = nn.Conv2D(in_channels=3, channels=64, kernel_size=3, strides=1, padding=1)
self.conv1_2 = nn.Conv2D(in_channels=64, channels=64, kernel_size=3, strides=1, padding=1)
self.conv2_1 = nn.Conv2D(in_channels=64, channels=128, kernel_size=3, strides=1, padding=1)
self.conv2_2 = nn.Conv2D(in_channels=128, channels=128, kernel_size=3, strides=1, padding=1)
self.conv3_1 = nn.Conv2D(in_channels=128, channels=256, kernel_size=3, strides=1, padding=1)
self.conv3_2 = nn.Conv2D(in_channels=256, channels=256, kernel_size=3, strides=1, padding=1)
self.conv3_3 = nn.Conv2D(in_channels=256, channels=256, kernel_size=3, strides=1, padding=1)
self.conv4_1 = nn.Conv2D(in_channels=256, channels=512, kernel_size=3, strides=1, padding=1)
self.conv4_2 = nn.Conv2D(in_channels=512, channels=512, kernel_size=3, strides=1, padding=1)
self.conv4_3 = nn.Conv2D(in_channels=512, channels=512, kernel_size=3, strides=1, padding=1)
self.conv5_1 = nn.Conv2D(in_channels=512, channels=512, kernel_size=3, strides=1, padding=1)
self.conv5_2 = nn.Conv2D(in_channels=512, channels=512, kernel_size=3, strides=1, padding=1)
self.conv5_3 = nn.Conv2D(in_channels=512, channels=512, kernel_size=3, strides=1, padding=1)
def hybrid_forward(self,F, X):
h = F.Activation(self.conv1_1(X), act_type='relu')
h = F.Activation(self.conv1_2(h), act_type='relu')
relu1_2 = h
h = F.Pooling(h, pool_type='max', kernel=(2, 2), stride=(2, 2))
h = F.Activation(self.conv2_1(h), act_type='relu')
h = F.Activation(self.conv2_2(h), act_type='relu')
relu2_2 = h
h = F.Pooling(h, pool_type='max', kernel=(2, 2), stride=(2, 2))
h = F.Activation(self.conv3_1(h), act_type='relu')
h = F.Activation(self.conv3_2(h), act_type='relu')
h = F.Activation(self.conv3_3(h), act_type='relu')
relu3_3 = h
h = F.Pooling(h, pool_type='max', kernel=(2, 2), stride=(2, 2))
h = F.Activation(self.conv4_1(h), act_type='relu')
h = F.Activation(self.conv4_2(h), act_type='relu')
h = F.Activation(self.conv4_3(h), act_type='relu')
relu4_3 = h
return [relu1_2, relu2_2, relu3_3, relu4_3]
def _init_weights(self, fixed=False, pretrain_path=None, ctx=None):
if pretrain_path is not None:
print('Loading parameters from {} ...'.format(pretrain_path))
self.collect_params().load(pretrain_path, ctx=ctx)
if fixed:
print('Setting parameters of VGG16 to fixed ...')
for param in self.collect_params().values():
param.grad_req = 'null'
else:
self.initialize(mx.initializer.Xavier(), ctx=ctx)
return_layers_id = {
11: [6, 13, 20, 27],
16: [5, 12, 22, 42]
}
vgg_spec = {11: ([1, 1, 2, 2, 2], [64, 128, 256, 512, 512]),
13: ([2, 2, 2, 2, 2], [64, 128, 256, 512, 512]),
16: ([2, 2, 3, 3, 3], [64, 128, 256, 512, 512]),
19: ([2, 2, 4, 4, 4], [64, 128, 256, 512, 512])}
class VGG(HybridBlock):
r"""VGG model from the `"Very Deep Convolutional Networks for Large-Scale Image Recognition"
<https://arxiv.org/abs/1409.1556>`_ paper.
Parameters
----------
layers : list of int
Numbers of layers in each feature block.
filters : list of int
Numbers of filters in each feature block. List length should match the layers.
classes : int, default 1000
Number of classification classes.
batch_norm : bool, default False
Use batch normalization.
"""
def __init__(self, num_layers, batch_norm=True, pretrain_path=None, ctx=None, **kwargs):
super(VGG, self).__init__(**kwargs)
layers, filters = vgg_spec[num_layers]
self.features = self._make_features(layers, filters, batch_norm)
self.features.add(nn.Dense(4096, activation='relu',
weight_initializer='normal',
bias_initializer='zeros'))
self.features.add(nn.Dropout(rate=0.5))
self.features.add(nn.Dense(4096, activation='relu',
weight_initializer='normal',
bias_initializer='zeros'))
self.features.add(nn.Dropout(rate=0.5))
self.output = nn.Dense(1000,
weight_initializer='normal',
bias_initializer='zeros')
self.return_id_list = return_layers_id[num_layers]
if pretrain_path is not None and os.path.isfile(pretrain_path):
self.pretrained = True
self.load_pretrained_param(pretrain_path, ctx)
def _make_features(self, layers, filters, batch_norm):
featurizer = nn.HybridSequential()
for i, num in enumerate(layers):
for _ in range(num):
featurizer.add(nn.Conv2D(filters[i], kernel_size=3, padding=1,
weight_initializer=Xavier(rnd_type='gaussian',
factor_type='out',
magnitude=2),
bias_initializer='zeros'))
if batch_norm:
featurizer.add(nn.BatchNorm())
featurizer.add(nn.Activation('relu'))
featurizer.add(nn.MaxPool2D(strides=2))
return featurizer
def hybrid_forward(self, F, x):
return_ = []
for id, layer in enumerate(self.features):
if isinstance(layer, nn.basic_layers.Dense):
break
x = layer(x)
if id in self.return_id_list:
return_.append(x)
#x = self.features(x)
#x = self.output(x)
return return_
def load_pretrained_param(self, pretrain_path, ctx):
print('Loading Parameters from {}'.format(pretrain_path))
self.load_parameters(pretrain_path, ctx=ctx) | [
"[email protected]"
] | |
b7955588bac5a73a7f7b1064c773845400d52ab2 | fca01c1f424e8554841fcc221a613fb0bd0a0114 | /zespol/admin.py | 5faf86ccc026a8468d45728f491d6fa65c2630f0 | [] | no_license | Bartoszmleczko/GigTicketsApp | 3bae86cb4cb8d17b90ebed2afa7dd5645b117f51 | 9fa013da7ec8a73aebca7ec00658470b067dee4a | refs/heads/master | 2021-01-26T08:20:47.629696 | 2020-02-26T22:54:46 | 2020-02-26T22:54:46 | 243,381,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | from django.contrib import admin
from .models import *
# Register your models here.
class ClubAdmin(admin.ModelAdmin):
list_display = ('name','address')
admin.site.register(Band)
admin.site.register(Club,ClubAdmin)
admin.site.register(Concert)
admin.site.register(Ticket)
admin.site.register(Profile)
admin.site.register(Genre)
| [
"[email protected]"
] | |
cfd644d146385683734341f86b5e62a3ee4cd227 | d5a196acb7531c89d930ba51e33e2319fab0972d | /150/A.py | 220217dd59ad3170a30a2c1ee380094618c0dce1 | [] | no_license | mido1003/atcorder | f1a073a850557c6f18176ad9ff3dfcfe5414afdf | 92639b15d982f29042883621c2fb874e1813a447 | refs/heads/master | 2020-09-20T16:12:53.708315 | 2020-05-25T09:48:16 | 2020-05-25T09:48:16 | 224,533,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | k,x = (int(x) for x in input().split())
if k * 500 >= x:
print("Yes")
else:
print("No") | [
"[email protected]"
] | |
affbdc260006818519072805edce1e7247140a64 | 12db36eaad77c99b97878e96f2c4924dcf2ed83f | /exception/__init__.py | 1847580c2e336851ee2594b864bd64590bf076c2 | [] | no_license | sevenler/orange | 0c442bc09dda1c811fd5e996bf240a1e98e788b7 | 370c04317a4f538f679deb7cab8f6d7a9c9b1d02 | refs/heads/master | 2021-01-11T22:41:25.748658 | 2017-01-17T18:13:40 | 2017-01-17T18:13:40 | 79,017,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | from error_status import ErrorStatusException
from authority import AuthorityException
| [
"[email protected]"
] | |
ef82571b3a9d413818632a92cb1e3edb2d75dab3 | 385a63d3c9e6f5815979165001f78ec3d7b90cd2 | /DrivingTDM_SetupMatlabOOP/headerAndFunctionsMotor/ximc/python-profiles/STANDA/8MT195X-540-4.py | 391e7db3d811458155873424999b6ceb86b43093 | [
"BSD-2-Clause"
] | permissive | Rasedujjaman/matlabOOP | 5abb6ec94998fda5e9214ed94cf67a42bf243d4f | e1f025ab9b00a3646719df23852079736d2b5701 | refs/heads/main | 2023-07-23T21:40:53.905045 | 2021-08-31T16:12:39 | 2021-08-31T16:12:39 | 378,249,559 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 22,654 | py | def set_profile_8MT195X_540_4(lib, id):
worst_result = Result.Ok
result = Result.Ok
feedback_settings = feedback_settings_t()
feedback_settings.IPS = 4000
class FeedbackType_:
FEEDBACK_ENCODER_MEDIATED = 6
FEEDBACK_NONE = 5
FEEDBACK_EMF = 4
FEEDBACK_ENCODER = 1
feedback_settings.FeedbackType = FeedbackType_.FEEDBACK_EMF
class FeedbackFlags_:
FEEDBACK_ENC_TYPE_BITS = 192
FEEDBACK_ENC_TYPE_DIFFERENTIAL = 128
FEEDBACK_ENC_TYPE_SINGLE_ENDED = 64
FEEDBACK_ENC_REVERSE = 1
FEEDBACK_ENC_TYPE_AUTO = 0
feedback_settings.FeedbackFlags = FeedbackFlags_.FEEDBACK_ENC_TYPE_SINGLE_ENDED | FeedbackFlags_.FEEDBACK_ENC_TYPE_AUTO
feedback_settings.CountsPerTurn = 4000
result = lib.set_feedback_settings(id, byref(feedback_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
home_settings = home_settings_t()
home_settings.FastHome = 500
home_settings.uFastHome = 0
home_settings.SlowHome = 500
home_settings.uSlowHome = 0
home_settings.HomeDelta = 500
home_settings.uHomeDelta = 0
class HomeFlags_:
HOME_USE_FAST = 256
HOME_STOP_SECOND_BITS = 192
HOME_STOP_SECOND_LIM = 192
HOME_STOP_SECOND_SYN = 128
HOME_STOP_SECOND_REV = 64
HOME_STOP_FIRST_BITS = 48
HOME_STOP_FIRST_LIM = 48
HOME_STOP_FIRST_SYN = 32
HOME_STOP_FIRST_REV = 16
HOME_HALF_MV = 8
HOME_MV_SEC_EN = 4
HOME_DIR_SECOND = 2
HOME_DIR_FIRST = 1
home_settings.HomeFlags = HomeFlags_.HOME_USE_FAST | HomeFlags_.HOME_STOP_SECOND_REV | HomeFlags_.HOME_STOP_FIRST_BITS | HomeFlags_.HOME_DIR_SECOND
result = lib.set_home_settings(id, byref(home_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
move_settings = move_settings_t()
move_settings.Speed = 1000
move_settings.uSpeed = 0
move_settings.Accel = 2000
move_settings.Decel = 4000
move_settings.AntiplaySpeed = 1000
move_settings.uAntiplaySpeed = 0
class MoveFlags_:
RPM_DIV_1000 = 1
result = lib.set_move_settings(id, byref(move_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_settings = engine_settings_t()
engine_settings.NomVoltage = 1
engine_settings.NomCurrent = 2100
engine_settings.NomSpeed = 2000
engine_settings.uNomSpeed = 0
class EngineFlags_:
ENGINE_LIMIT_RPM = 128
ENGINE_LIMIT_CURR = 64
ENGINE_LIMIT_VOLT = 32
ENGINE_ACCEL_ON = 16
ENGINE_ANTIPLAY = 8
ENGINE_MAX_SPEED = 4
ENGINE_CURRENT_AS_RMS = 2
ENGINE_REVERSE = 1
engine_settings.EngineFlags = EngineFlags_.ENGINE_LIMIT_RPM | EngineFlags_.ENGINE_ACCEL_ON | EngineFlags_.ENGINE_REVERSE
engine_settings.Antiplay = 575
class MicrostepMode_:
MICROSTEP_MODE_FRAC_256 = 9
MICROSTEP_MODE_FRAC_128 = 8
MICROSTEP_MODE_FRAC_64 = 7
MICROSTEP_MODE_FRAC_32 = 6
MICROSTEP_MODE_FRAC_16 = 5
MICROSTEP_MODE_FRAC_8 = 4
MICROSTEP_MODE_FRAC_4 = 3
MICROSTEP_MODE_FRAC_2 = 2
MICROSTEP_MODE_FULL = 1
engine_settings.MicrostepMode = MicrostepMode_.MICROSTEP_MODE_FRAC_256
engine_settings.StepsPerRev = 200
result = lib.set_engine_settings(id, byref(engine_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
entype_settings = entype_settings_t()
class EngineType_:
ENGINE_TYPE_BRUSHLESS = 5
ENGINE_TYPE_TEST = 4
ENGINE_TYPE_STEP = 3
ENGINE_TYPE_2DC = 2
ENGINE_TYPE_DC = 1
ENGINE_TYPE_NONE = 0
entype_settings.EngineType = EngineType_.ENGINE_TYPE_STEP | EngineType_.ENGINE_TYPE_NONE
class DriverType_:
DRIVER_TYPE_EXTERNAL = 3
DRIVER_TYPE_INTEGRATE = 2
DRIVER_TYPE_DISCRETE_FET = 1
entype_settings.DriverType = DriverType_.DRIVER_TYPE_INTEGRATE
result = lib.set_entype_settings(id, byref(entype_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
power_settings = power_settings_t()
power_settings.HoldCurrent = 50
power_settings.CurrReductDelay = 1000
power_settings.PowerOffDelay = 60
power_settings.CurrentSetTime = 300
class PowerFlags_:
POWER_SMOOTH_CURRENT = 4
POWER_OFF_ENABLED = 2
POWER_REDUCT_ENABLED = 1
power_settings.PowerFlags = PowerFlags_.POWER_SMOOTH_CURRENT | PowerFlags_.POWER_OFF_ENABLED | PowerFlags_.POWER_REDUCT_ENABLED
result = lib.set_power_settings(id, byref(power_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
secure_settings = secure_settings_t()
secure_settings.LowUpwrOff = 800
secure_settings.CriticalIpwr = 4000
secure_settings.CriticalUpwr = 5500
secure_settings.CriticalT = 800
secure_settings.CriticalIusb = 450
secure_settings.CriticalUusb = 520
secure_settings.MinimumUusb = 420
class Flags_:
ALARM_ENGINE_RESPONSE = 128
ALARM_WINDING_MISMATCH = 64
USB_BREAK_RECONNECT = 32
ALARM_FLAGS_STICKING = 16
ALARM_ON_BORDERS_SWAP_MISSET = 8
H_BRIDGE_ALERT = 4
LOW_UPWR_PROTECTION = 2
ALARM_ON_DRIVER_OVERHEATING = 1
secure_settings.Flags = Flags_.ALARM_ENGINE_RESPONSE | Flags_.ALARM_FLAGS_STICKING | Flags_.ALARM_ON_BORDERS_SWAP_MISSET | Flags_.H_BRIDGE_ALERT | Flags_.ALARM_ON_DRIVER_OVERHEATING
result = lib.set_secure_settings(id, byref(secure_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
edges_settings = edges_settings_t()
class BorderFlags_:
BORDERS_SWAP_MISSET_DETECTION = 8
BORDER_STOP_RIGHT = 4
BORDER_STOP_LEFT = 2
BORDER_IS_ENCODER = 1
edges_settings.BorderFlags = BorderFlags_.BORDER_STOP_RIGHT | BorderFlags_.BORDER_STOP_LEFT
class EnderFlags_:
ENDER_SW2_ACTIVE_LOW = 4
ENDER_SW1_ACTIVE_LOW = 2
ENDER_SWAP = 1
edges_settings.EnderFlags = EnderFlags_.ENDER_SWAP
edges_settings.LeftBorder = 175
edges_settings.uLeftBorder = 0
edges_settings.RightBorder = 25825
edges_settings.uRightBorder = 0
result = lib.set_edges_settings(id, byref(edges_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
pid_settings = pid_settings_t()
pid_settings.KpU = 0
pid_settings.KiU = 0
pid_settings.KdU = 0
pid_settings.Kpf = 0.003599999938160181
pid_settings.Kif = 0.03799999877810478
pid_settings.Kdf = 2.8000000384054147e-05
result = lib.set_pid_settings(id, byref(pid_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_in_settings = sync_in_settings_t()
class SyncInFlags_:
SYNCIN_GOTOPOSITION = 4
SYNCIN_INVERT = 2
SYNCIN_ENABLED = 1
sync_in_settings.ClutterTime = 4
sync_in_settings.Position = 0
sync_in_settings.uPosition = 0
sync_in_settings.Speed = 0
sync_in_settings.uSpeed = 0
result = lib.set_sync_in_settings(id, byref(sync_in_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_out_settings = sync_out_settings_t()
class SyncOutFlags_:
SYNCOUT_ONPERIOD = 64
SYNCOUT_ONSTOP = 32
SYNCOUT_ONSTART = 16
SYNCOUT_IN_STEPS = 8
SYNCOUT_INVERT = 4
SYNCOUT_STATE = 2
SYNCOUT_ENABLED = 1
sync_out_settings.SyncOutFlags = SyncOutFlags_.SYNCOUT_ONSTOP | SyncOutFlags_.SYNCOUT_ONSTART
sync_out_settings.SyncOutPulseSteps = 100
sync_out_settings.SyncOutPeriod = 2000
sync_out_settings.Accuracy = 0
sync_out_settings.uAccuracy = 0
result = lib.set_sync_out_settings(id, byref(sync_out_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extio_settings = extio_settings_t()
class EXTIOSetupFlags_:
EXTIO_SETUP_INVERT = 2
EXTIO_SETUP_OUTPUT = 1
extio_settings.EXTIOSetupFlags = EXTIOSetupFlags_.EXTIO_SETUP_OUTPUT
class EXTIOModeFlags_:
EXTIO_SETUP_MODE_OUT_BITS = 240
EXTIO_SETUP_MODE_OUT_MOTOR_ON = 64
EXTIO_SETUP_MODE_OUT_ALARM = 48
EXTIO_SETUP_MODE_OUT_MOVING = 32
EXTIO_SETUP_MODE_OUT_ON = 16
EXTIO_SETUP_MODE_IN_BITS = 15
EXTIO_SETUP_MODE_IN_ALARM = 5
EXTIO_SETUP_MODE_IN_HOME = 4
EXTIO_SETUP_MODE_IN_MOVR = 3
EXTIO_SETUP_MODE_IN_PWOF = 2
EXTIO_SETUP_MODE_IN_STOP = 1
EXTIO_SETUP_MODE_IN_NOP = 0
EXTIO_SETUP_MODE_OUT_OFF = 0
extio_settings.EXTIOModeFlags = EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_STOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_NOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_OUT_OFF
result = lib.set_extio_settings(id, byref(extio_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
brake_settings = brake_settings_t()
brake_settings.t1 = 300
brake_settings.t2 = 500
brake_settings.t3 = 300
brake_settings.t4 = 400
class BrakeFlags_:
BRAKE_ENG_PWROFF = 2
BRAKE_ENABLED = 1
brake_settings.BrakeFlags = BrakeFlags_.BRAKE_ENG_PWROFF
result = lib.set_brake_settings(id, byref(brake_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
control_settings = control_settings_t()
control_settings.MaxSpeed[0] = 100
control_settings.MaxSpeed[1] = 1000
control_settings.MaxSpeed[2] = 0
control_settings.MaxSpeed[3] = 0
control_settings.MaxSpeed[4] = 0
control_settings.MaxSpeed[5] = 0
control_settings.MaxSpeed[6] = 0
control_settings.MaxSpeed[7] = 0
control_settings.MaxSpeed[8] = 0
control_settings.MaxSpeed[9] = 0
control_settings.uMaxSpeed[0] = 0
control_settings.uMaxSpeed[1] = 0
control_settings.uMaxSpeed[2] = 0
control_settings.uMaxSpeed[3] = 0
control_settings.uMaxSpeed[4] = 0
control_settings.uMaxSpeed[5] = 0
control_settings.uMaxSpeed[6] = 0
control_settings.uMaxSpeed[7] = 0
control_settings.uMaxSpeed[8] = 0
control_settings.uMaxSpeed[9] = 0
control_settings.Timeout[0] = 1000
control_settings.Timeout[1] = 1000
control_settings.Timeout[2] = 1000
control_settings.Timeout[3] = 1000
control_settings.Timeout[4] = 1000
control_settings.Timeout[5] = 1000
control_settings.Timeout[6] = 1000
control_settings.Timeout[7] = 1000
control_settings.Timeout[8] = 1000
control_settings.MaxClickTime = 300
class Flags_:
CONTROL_BTN_RIGHT_PUSHED_OPEN = 8
CONTROL_BTN_LEFT_PUSHED_OPEN = 4
CONTROL_MODE_BITS = 3
CONTROL_MODE_LR = 2
CONTROL_MODE_JOY = 1
CONTROL_MODE_OFF = 0
control_settings.Flags = Flags_.CONTROL_MODE_LR | Flags_.CONTROL_MODE_OFF
control_settings.DeltaPosition = 1
control_settings.uDeltaPosition = 0
result = lib.set_control_settings(id, byref(control_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
joystick_settings = joystick_settings_t()
joystick_settings.JoyLowEnd = 0
joystick_settings.JoyCenter = 5000
joystick_settings.JoyHighEnd = 10000
joystick_settings.ExpFactor = 100
joystick_settings.DeadZone = 50
class JoyFlags_:
JOY_REVERSE = 1
result = lib.set_joystick_settings(id, byref(joystick_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
ctp_settings = ctp_settings_t()
ctp_settings.CTPMinError = 3
class CTPFlags_:
CTP_ERROR_CORRECTION = 16
REV_SENS_INV = 8
CTP_ALARM_ON_ERROR = 4
CTP_BASE = 2
CTP_ENABLED = 1
ctp_settings.CTPFlags = CTPFlags_.CTP_ERROR_CORRECTION
result = lib.set_ctp_settings(id, byref(ctp_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
uart_settings = uart_settings_t()
uart_settings.Speed = 115200
class UARTSetupFlags_:
UART_STOP_BIT = 8
UART_PARITY_BIT_USE = 4
UART_PARITY_BITS = 3
UART_PARITY_BIT_MARK = 3
UART_PARITY_BIT_SPACE = 2
UART_PARITY_BIT_ODD = 1
UART_PARITY_BIT_EVEN = 0
uart_settings.UARTSetupFlags = UARTSetupFlags_.UART_PARITY_BIT_EVEN
result = lib.set_uart_settings(id, byref(uart_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
controller_name = controller_name_t()
controller_name.ControllerName = bytes([0, 113, 252, 118, 36, 0, 72, 0, 3, 0, 0, 0, 104, 101, 103, 0])
class CtrlFlags_:
EEPROM_PRECEDENCE = 1
result = lib.set_controller_name(id, byref(controller_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
emf_settings = emf_settings_t()
emf_settings.L = 0.013000000268220901
emf_settings.R = 2.5999999046325684
emf_settings.Km = 0.015599999576807022
class BackEMFFlags_:
BACK_EMF_KM_AUTO = 4
BACK_EMF_RESISTANCE_AUTO = 2
BACK_EMF_INDUCTANCE_AUTO = 1
result = lib.set_emf_settings(id, byref(emf_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_advansed_setup = engine_advansed_setup_t()
engine_advansed_setup.stepcloseloop_Kw = 50
engine_advansed_setup.stepcloseloop_Kp_low = 1000
engine_advansed_setup.stepcloseloop_Kp_high = 33
result = lib.set_engine_advansed_setup(id, byref(engine_advansed_setup))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extended_settings = extended_settings_t()
extended_settings.Param1 = 0
result = lib.set_extended_settings(id, byref(extended_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_name = stage_name_t()
stage_name.PositionerName = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_name(id, byref(stage_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_information = stage_information_t()
stage_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
stage_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_information(id, byref(stage_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_settings = stage_settings_t()
stage_settings.LeadScrewPitch = 0
stage_settings.Units = bytes([0, 0, 0, 0, 0, 0, 0, 0])
stage_settings.MaxSpeed = 0
stage_settings.TravelRange = 0
stage_settings.SupplyVoltageMin = 0
stage_settings.SupplyVoltageMax = 0
stage_settings.MaxCurrentConsumption = 0
stage_settings.HorizontalLoadCapacity = 0
stage_settings.VerticalLoadCapacity = 0
result = lib.set_stage_settings(id, byref(stage_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_information = motor_information_t()
motor_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
motor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_motor_information(id, byref(motor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_settings = motor_settings_t()
class MotorType_:
MOTOR_TYPE_BLDC = 3
MOTOR_TYPE_DC = 2
MOTOR_TYPE_STEP = 1
MOTOR_TYPE_UNKNOWN = 0
motor_settings.MotorType = MotorType_.MOTOR_TYPE_UNKNOWN
motor_settings.ReservedField = 0
motor_settings.Poles = 0
motor_settings.Phases = 0
motor_settings.NominalVoltage = 0
motor_settings.NominalCurrent = 0
motor_settings.NominalSpeed = 0
motor_settings.NominalTorque = 0
motor_settings.NominalPower = 0
motor_settings.WindingResistance = 0
motor_settings.WindingInductance = 0
motor_settings.RotorInertia = 0
motor_settings.StallTorque = 0
motor_settings.DetentTorque = 0
motor_settings.TorqueConstant = 0
motor_settings.SpeedConstant = 0
motor_settings.SpeedTorqueGradient = 0
motor_settings.MechanicalTimeConstant = 0
motor_settings.MaxSpeed = 0
motor_settings.MaxCurrent = 0
motor_settings.MaxCurrentTime = 0
motor_settings.NoLoadCurrent = 0
motor_settings.NoLoadSpeed = 0
result = lib.set_motor_settings(id, byref(motor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_information = encoder_information_t()
encoder_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
encoder_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_encoder_information(id, byref(encoder_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_settings = encoder_settings_t()
encoder_settings.MaxOperatingFrequency = 0
encoder_settings.SupplyVoltageMin = 0
encoder_settings.SupplyVoltageMax = 0
encoder_settings.MaxCurrentConsumption = 0
encoder_settings.PPR = 0
class EncoderSettings_:
ENCSET_REVOLUTIONSENSOR_ACTIVE_HIGH = 256
ENCSET_REVOLUTIONSENSOR_PRESENT = 64
ENCSET_INDEXCHANNEL_PRESENT = 16
ENCSET_PUSHPULL_OUTPUT = 4
ENCSET_DIFFERENTIAL_OUTPUT = 1
result = lib.set_encoder_settings(id, byref(encoder_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_information = hallsensor_information_t()
hallsensor_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
hallsensor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_hallsensor_information(id, byref(hallsensor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_settings = hallsensor_settings_t()
hallsensor_settings.MaxOperatingFrequency = 0
hallsensor_settings.SupplyVoltageMin = 0
hallsensor_settings.SupplyVoltageMax = 0
hallsensor_settings.MaxCurrentConsumption = 0
hallsensor_settings.PPR = 0
result = lib.set_hallsensor_settings(id, byref(hallsensor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_information = gear_information_t()
gear_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
gear_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_gear_information(id, byref(gear_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_settings = gear_settings_t()
gear_settings.ReductionIn = 0
gear_settings.ReductionOut = 0
gear_settings.RatedInputTorque = 0
gear_settings.RatedInputSpeed = 0
gear_settings.MaxOutputBacklash = 0
gear_settings.InputInertia = 0
gear_settings.Efficiency = 0
result = lib.set_gear_settings(id, byref(gear_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
accessories_settings = accessories_settings_t()
accessories_settings.MagneticBrakeInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.MBRatedVoltage = 0
accessories_settings.MBRatedCurrent = 0
accessories_settings.MBTorque = 0
class MBSettings_:
MB_POWERED_HOLD = 2
MB_AVAILABLE = 1
accessories_settings.TemperatureSensorInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.TSMin = 0
accessories_settings.TSMax = 0
accessories_settings.TSGrad = 0
class TSSettings_:
TS_AVAILABLE = 8
TS_TYPE_BITS = 7
TS_TYPE_SEMICONDUCTOR = 2
TS_TYPE_THERMOCOUPLE = 1
TS_TYPE_UNKNOWN = 0
accessories_settings.TSSettings = TSSettings_.TS_TYPE_UNKNOWN
class LimitSwitchesSettings_:
LS_SHORTED = 16
LS_SW2_ACTIVE_LOW = 8
LS_SW1_ACTIVE_LOW = 4
LS_ON_SW2_AVAILABLE = 2
LS_ON_SW1_AVAILABLE = 1
result = lib.set_accessories_settings(id, byref(accessories_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
return worst_result
| [
"[email protected]"
] | |
875a564377d75822b6c87a33792ad8d32b40b7b6 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/datacatalog/outputs.py | 26d9e4bddb4ce2d56c83f67f19a73cd325ca56ef | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,362 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from ._enums import *
__all__ = [
'PrincipalsResponse',
]
@pulumi.output_type
class PrincipalsResponse(dict):
"""
User principals.
"""
def __init__(__self__, *,
object_id: Optional[str] = None,
upn: Optional[str] = None):
"""
User principals.
:param str object_id: Object Id for the user
:param str upn: UPN of the user.
"""
if object_id is not None:
pulumi.set(__self__, "object_id", object_id)
if upn is not None:
pulumi.set(__self__, "upn", upn)
@property
@pulumi.getter(name="objectId")
def object_id(self) -> Optional[str]:
"""
Object Id for the user
"""
return pulumi.get(self, "object_id")
@property
@pulumi.getter
def upn(self) -> Optional[str]:
"""
UPN of the user.
"""
return pulumi.get(self, "upn")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| [
"[email protected]"
] | |
3b91d9f42ee1ecda8632567b35ac5caa51d497c7 | 35053a371d85c2d45a4f52239d8a70b38194ef48 | /Count of Matches in Tournament.py | 96c8b115113e1096f964d3dcc4f40e3f4b7f16a1 | [] | no_license | Kuehar/LeetCode | 51d169c81a2e572ea854399fc78e1130220388f9 | 4555c20455f181f9dd7b3aba2a8779dea795edfb | refs/heads/master | 2023-04-16T10:13:03.584541 | 2023-04-06T11:47:21 | 2023-04-06T11:47:21 | 243,361,421 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | class Solution:
def numberOfMatches(self, n: int) -> int:
return n-1
# O(1) Solution.
# Always this answer is n-1. Sum of matches are always equals to sum of loser.
# Runtime: 28 ms, faster than 82.44% of Python3 online submissions for Count of Matches in Tournament.
# Memory Usage: 14.3 MB, less than 40.04% of Python3 online submissions for Count of Matches in Tournament.
| [
"[email protected]"
] | |
3e849edd794f2c41729ac050618dd2fa4f7ccd80 | 31d43b73e8104cd8aef3d97e39666022f2946223 | /test.py | 5cc8efc532a05bd28380d86159fac3a91718c95a | [] | no_license | kgelber1/SSX-Python | 2ed6b5e6b7b3775779464a7f624a70155ec8f657 | 4f5cded3acec68e24206af90ef5611db9adb1ac3 | refs/heads/master | 2020-06-24T07:08:33.486962 | 2019-10-24T18:11:18 | 2019-10-24T18:11:18 | 198,890,544 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig, ax = plt.subplots(1,1)
x=np.linspace(np.pi,4*np.pi,100)
N=len(x)
ax.set_xlim(len(x))
ax.set_ylim(-1.5,1.5)
line, = ax.plot([],[],'o-')
def init():
line.set_ydata(np.ma.array(x[:], mask=True))
return line,
def animate(i, *args, **kwargs):
y=np.sin(x*i)
line.set_data(np.arange(N),y) # update the data
return line,
ani = animation.FuncAnimation(fig, animate, init_func=init,
frames=100, interval=10, blit= False, repeat = False)
ani.save('2osc.mp4', writer="ffmpeg")
fig.show()
| [
"[email protected]"
] | |
1f24bf6dac22f50aece5a8dd643a221f8618bfc3 | 29d62d5523c703288d194b8a2cf15efb8423f166 | /preprocess_dataset.py | b60128ef686b4fc795595ba89976d40b64300b89 | [] | no_license | Jonlenes/clusters-news-headlines | 92c623a5a214ea21d5e66dc2ff8a984e268374c3 | 39d54337ef28476a82bd44d39958534a6f4e7368 | refs/heads/master | 2021-10-19T20:41:54.808979 | 2019-02-23T11:36:32 | 2019-02-23T11:36:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,468 | py | import pandas
import string
from nltk.stem.snowball import SnowballStemmer
from load_dataset import path_dataset
def remove_pnt_and_stemming(text_arr):
""" Remove pontuação e executa o o stemming de todo o dataset"""
stemmer = SnowballStemmer("english", ignore_stopwords=True)
for i in range(0, text_arr.shape[0]):
x[i] = x[i].translate(str.maketrans('', '', string.punctuation)) # removendo todas as pontuaçoes
words = x[i].split()
x[i] = ""
for word in words:
x[i] += stemmer.stem(word) + " "
x[i] = x[i].strip()
x[i] = re.sub(r'[^A-Za-z]+', ' ', x[i])
return text_final
def split_dataset_by_year(dataset, save_dataset=True):
""" Split dataset por ano - retorna/salva 1 dataset para cada ano no arquivo ogirinal """
key = str(dataset[0][0])[:4]
datasets = []
current_dataset = []
for data in dataset:
if key == str(data[0])[:4]:
current_dataset.append(data[1])
else:
datasets.append(current_dataset.copy())
key = str(data[0])[:4]
current_dataset.clear()
current_dataset.append(data[1])
datasets.append(current_dataset.copy())
if save_dataset:
for i in range(0, len(datasets)):
pandas.DataFrame(datasets[i]).to_csv("dataset_" + str(i + 1) + ".csv", index=False)
return datasets
if __name__ == '__main__':
split_dataset_by_year(path_dataset) | [
"[email protected]"
] | |
69e2f645ab6431a303076a1506514f479e530747 | 9fc5dd13e0595bd5796cd7ec109e3b7c290e2692 | /wikipedia-scape.py | a54f56c6c75b06d0d4069f56a187c27ded4d5b68 | [] | no_license | ronandoolan2/python-webscraping | 812d5190dfe5f24029b4737438c80e8d40716971 | 4dc83a331415c3e55f06b1a8d0de47710db5ccd0 | refs/heads/master | 2021-01-19T00:54:22.801053 | 2017-04-16T09:10:47 | 2017-04-16T09:10:47 | 87,218,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | from bs4 import BeautifulSoup
import urllib2
import re
wiki = "http://en.wikipedia.org/wiki/Mad_Max:_Fury_Road"
header = {'User-Agent': 'Mozilla/5.0'} #Needed to prevent 403 error on Wikipedia
req = urllib2.Request(wiki,headers=header)
page = urllib2.urlopen(req)
soup = BeautifulSoup(page)
rnd = ""
pick = ""
NFL = ""
player = ""
pos = ""
college = ""
conf = ""
notes = ""
table = soup.find("table", { "class" : "wikitable sortable" })
print table
#output = open('output.csv','w')
for row in table.findAll("tr"):
cells = row.findAll("href")
for cell in cells:
# search-term = re.search(r'director',cell)
# if search-term:
# print search-term
#print "---"
print cell.text
print cells.text
#print "---"
| [
"[email protected]"
] | |
ec50df0aa2a320ce0f88bb7eea72f3ddae60e3a7 | 476768e5629340efcbc11fd175c7db12e09c2d52 | /python/006.py | be26addbbddf5f50f6e7fff97a4484130aab1bf1 | [] | no_license | zero1hac/projecteuler | fb8ded5de8d4126865c11081e4b407e0ae35e304 | 7dc00e89c9870d5c7d9c6364f1e80e19d69655e5 | refs/heads/master | 2020-04-23T20:10:51.375485 | 2019-03-25T08:38:59 | 2019-03-25T08:38:59 | 171,430,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | if __name__ == "__main__":
n = 100
sum_of_squares = (n*(n+1)*(2*n+1))/6
square_of_sum = (n*(n+1)/2)**2
print square_of_sum - sum_of_squares | [
"[email protected]"
] | |
494c1e3a8da4af904b0d96a5540e85b475400cc2 | 0e4860fecfdd34a3255003cc8c8df086c14083dd | /python/practise/带你学Django资料及源码/课堂与博客代码/peace_blog/blog/admin.py | 9c1fb6228842fe4ec5d8931dc4a0aad2aa044aa9 | [] | no_license | anzhihe/learning | 503ab9a58f280227011da5eaa4b14b46c678e6f3 | 66f7f801e1395207778484e1543ea26309d4b354 | refs/heads/master | 2023-08-08T11:42:11.983677 | 2023-07-29T09:19:47 | 2023-07-29T09:19:47 | 188,768,643 | 1,443 | 617 | null | 2023-08-24T02:10:34 | 2019-05-27T04:04:10 | Python | UTF-8 | Python | false | false | 289 | py | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Banner)
admin.site.register(Category)
admin.site.register(Tag)
admin.site.register(Article)
admin.site.register(FriendLink)
admin.site.register(Comment)
admin.site.register(BlogUser)
| [
"[email protected]"
] | |
abaf5bf0704250f8f6056f02c645210cc6095283 | 33b3029d6efaa195a0530e8bafbbdc82e7aea697 | /scripts/test_01.py | 1cd1fbd9755fc06808f6eb20be588a2e5622a120 | [] | no_license | wuyun19890323/lesson001 | 333bc2239151c6337a797d57926f683c05fa0c60 | aa2e202b846664adfa5c1af8312b89000311ba8d | refs/heads/master | 2020-03-19T11:11:58.829176 | 2018-06-08T12:53:05 | 2018-06-08T12:53:05 | 136,438,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,639 | py | from selenium.webdriver.common.by import By
from base.base_driver import browser_fire
from page.page_load import PageLoad
import unittest
class TestLoad(unittest.TestCase):
# def get_text(self,loc):
# return self.scr_load.get_att(self.load_text)
def get_ass(self):
self.scr_load.get_scr(self.scr_load.load_get())
# 网址
url = "http://localhost/iwebshop/"
# 定位登录链接
load_mark = By.XPATH, "//a[@href='/iwebshop/index.php?controller=simple&action=login']"
# 定位用户名
username = By.XPATH, "//input[@type='text']"
# 定位密码
password = By.XPATH, "//input[@type='password']"
# 定位登录按钮
load_click = By.XPATH, "//input[@type='submit']"
# 定位登录后文本域
load_text = By.XPATH, "//p[@class='loginfo']"
# 定位退出按钮
load_quit = By.XPATH, "//a[@class='reg']"
# 定位登录前账户或错误提示
load_wrong = By.XPATH, "//div[@class ='prompt']"
# 定位登录前账户为空是提示填写用户名或邮箱
load_username_null = By.XPATH, "//tbody/tr[1]/td/label[@class='invalid-msg']"
# 定位登录前密码为空是提示填写密码
load_password_null = By.XPATH, "//tbody/tr[2]/td/label[@class='invalid-msg']"
def setUp(self):
self.driver = browser_fire()
self.scr_load = PageLoad(self.driver)
self.scr_load.get_url(self.url)
self.scr_load.maxi_wait(30)
# 正确账户正确密码
def test_load001(self):
# 点击登录链接
self.scr_load.click_load(self.load_mark)
# 输入用户名
self.scr_load.input_text(self.username, "admin")
# 输入密码
self.scr_load.input_text(self.password, "123456")
# 点击登录按钮
self.scr_load.click_load(self.load_click)
try:
self.assertIn("admin", self.scr_load.get_att(self.load_text))
except AssertionError:
self.get_ass()
raise
self.scr_load.click_load(self.load_quit)
def tearDown(self):
self.driver.quit()
# 正确账户错误密码
def test_load002(self):
# 点击登录链接
self.scr_load.click_load(self.load_mark)
# 输入用户名
self.scr_load.input_text(self.username, "admin")
# 输入密码
self.scr_load.input_text(self.password, "1234567")
# 点击登录按钮
self.scr_load.click_load(self.load_click)
try:
self.assertIn("用户名和密码不匹配", self.scr_load.get_att(self.load_wrong))
except AssertionError:
self.get_ass()
raise
# 正确账户密码为空
def test_load003(self):
# 点击登录链接
self.scr_load.click_load(self.load_mark)
# 输入用户名
self.scr_load.input_text(self.username, "admin")
# 输入密码
self.scr_load.input_text(self.password, "")
# 点击登录按钮
self.scr_load.click_load(self.load_click)
try:
self.assertIn("填写密码", self.scr_load.get_att(self.load_password_null))
except AssertionError:
self.get_ass()
raise
# 错误账户正确密码
def test_load004(self):
# 点击登录链接
self.scr_load.click_load(self.load_mark)
# 输入用户名
self.scr_load.input_text(self.username, "admin1")
# 输入密码
self.scr_load.input_text(self.password, "123456")
# 点击登录按钮
self.scr_load.click_load(self.load_click)
try:
self.assertIn("用户名和密码不匹配", self.scr_load.get_att(self.load_wrong))
except AssertionError:
self.get_ass()
raise
# 错误账户错误密码
def test_load005(self):
# 点击登录链接
self.scr_load.click_load(self.load_mark)
# 输入用户名
self.scr_load.input_text(self.username, "admin1")
# 输入密码
self.scr_load.input_text(self.password, "1234567")
# 点击登录按钮
self.scr_load.click_load(self.load_click)
try:
self.assertIn("用户名和密码不匹配", self.scr_load.get_att(self.load_wrong))
except AssertionError:
self.get_ass()
raise
# 错误账户密码为空
def test_load006(self):
# 点击登录链接
self.scr_load.click_load(self.load_mark)
# 输入用户名
self.scr_load.input_text(self.username, "admin1")
# 输入密码
self.scr_load.input_text(self.password, "")
# 点击登录按钮
self.scr_load.click_load(self.load_click)
try:
self.assertIn("填写密码", self.scr_load.get_att(self.load_password_null))
except AssertionError:
self.get_ass()
raise
# 空账户正确密码
def test_load007(self):
# 点击登录链接
self.scr_load.click_load(self.load_mark)
# 输入用户名
self.scr_load.input_text(self.username, "")
# 输入密码
self.scr_load.input_text(self.password, "123456")
# 点击登录按钮
self.scr_load.click_load(self.load_click)
try:
self.assertIn("填写用户名或邮箱", self.scr_load.get_att(self.load_username_null))
except AssertionError:
self.get_ass()
raise
# 空账户错误密码
def test_load008(self):
# 点击登录链接
self.scr_load.click_load(self.load_mark)
# 输入用户名
self.scr_load.input_text(self.username, "")
# 输入密码
self.scr_load.input_text(self.password, "1234567")
# 点击登录按钮
self.scr_load.click_load(self.load_click)
try:
self.assertIn("填写用户名或邮箱", self.scr_load.get_att(self.load_username_null))
except AssertionError:
self.get_ass()
raise
# 空账户空密码
def test_load009(self):
# 点击登录链接
self.scr_load.click_load(self.load_mark)
# 输入用户名
self.scr_load.input_text(self.username, "")
# 输入密码
self.scr_load.input_text(self.password, "")
# 点击登录按钮
self.scr_load.click_load(self.load_click)
try:
self.assertIn("填写用户名或邮箱", self.scr_load.get_att(self.load_username_null))
except AssertionError:
self.get_ass()
raise
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
acc15361e8370b7ba0ae6a4582e8d0fc9c912c4d | f30f6672702591c2d0adad5a2f57af8afd493117 | /todo/migrations/0004_auto_20190612_1151.py | cdf7922646dffb5a2af9d82cfc9a58c456b4640d | [] | no_license | MedMekss/Listed | 0f294ecc16d2db4a9ee37f408b1a7a11229409f4 | 06ac0bb5140b11aaa704a6cd0f60bb2c15eb6449 | refs/heads/master | 2020-05-20T03:25:51.047936 | 2019-06-18T09:20:49 | 2019-06-18T09:20:49 | 185,356,172 | 3 | 1 | null | 2019-06-17T13:30:48 | 2019-05-07T08:25:24 | Python | UTF-8 | Python | false | false | 380 | py | # Generated by Django 2.2.1 on 2019-06-12 09:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todo', '0003_auto_20190606_1243'),
]
operations = [
migrations.AlterField(
model_name='item',
name='title',
field=models.CharField(max_length=32),
),
]
| [
"[email protected]"
] | |
c20346e9a05992ff1130b836fc537db55bc7d17f | e9b3404197c6ee6260ba7e377294805af5b74bd0 | /sphinx_rstbuilder/builders/rst.py | e9837a9ddf1069a17ed3b4e5615103d6a205a667 | [] | no_license | etarasenko/sphinx-rstbuilder | eb9d4083e9cc797fa7507899a7fc8518444015ce | 83a36646c52537f4566264588edef24727251657 | refs/heads/master | 2020-05-17T03:12:10.089707 | 2015-08-05T04:18:50 | 2015-08-05T04:18:50 | 40,223,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | # -*- coding: utf-8 -*-
from sphinx.builders.text import TextBuilder
from ..writers.rst import RstWriter
class RstBuilder(TextBuilder):
name = 'rst'
format = 'rst'
out_suffix = '.rst'
def get_target_uri(self, docname, typ=None):
return docname + self.out_suffix
def prepare_writing(self, docnames):
self.writer = RstWriter(self)
| [
"[email protected]"
] | |
e3de38465362031a14aa2ff4b827877b72f76780 | 60de13f814ebfff48740b693563bf4b83096534d | /venv/Scripts/pip-script.py | ee13259c85c9b690ddae6a5c5196f921bda9b1ed | [] | no_license | Daria8402/bandurova17ov1 | 1c568d41b64fa3c1093193fb78b6c5c15a569cd7 | 5b202d32a4b2707664615b7d9d98f4c77efa9622 | refs/heads/master | 2021-02-18T12:12:56.944287 | 2020-03-05T15:43:51 | 2020-03-05T15:43:51 | 245,193,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | #!D:\GitHub\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"[email protected]"
] | |
ebd3c1c21f84ae08aca5e069c923ae54ae4c4266 | 3c74adb0203f00af331e114838ef4190af455d81 | /mysite/blog/models.py | 96c1614bff94e921e61de59a4a36c592af4f0d92 | [] | no_license | SARTHAKKRSHARMA/Blog-Application | 0d0e2f4ca0069c32d2950b0fd2915f4665b84343 | 1250ab5f1f5bb136d837649ee1693651fe2129b7 | refs/heads/master | 2022-04-19T21:00:53.293587 | 2020-04-21T05:57:38 | 2020-04-21T05:57:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,561 | py | from django.db import models
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib.auth.admin import User
from django.utils import timezone
# Create your models here.
class Blog_Detail(models.Model):
author = models.ForeignKey(to=User,on_delete=models.CASCADE,related_name='author')
title = models.CharField(max_length=200)
body = models.TextField()
creation_date = models.DateTimeField(default=timezone.now())
pub_date = models.DateTimeField(blank=True,null=True)
likes = models.IntegerField(default=0)
dislikes = models.IntegerField(default=0)
like_user_reaction = models.ManyToManyField(to=User,blank=True,related_name='like_user')
dislike_user_reaction = models.ManyToManyField(to=User,blank=True,related_name='dislike_user')
def __str__(self):
return self.title
class Comments(models.Model):
author = models.CharField(max_length=250,blank=True)
blog = models.ForeignKey(Blog_Detail,on_delete=models.CASCADE,blank=True,null=True,related_name='comments')
body = models.TextField(blank=True)
creation_date = models.DateTimeField(default=timezone.now(),blank=True)
likes = models.IntegerField(default = 0,blank=True)
dislikes = models.IntegerField(default=0,blank=True)
like_user_reaction = models.ManyToManyField(to=User,blank=True,related_name='like_comment_user')
dislike_user_reaction = models.ManyToManyField(to=User,blank=True,related_name='dislike_comment_user')
def __str__(self):
return self.author
| [
"[email protected]"
] | |
fbc3cb3337489cd49a68c2578139f993cb3822c4 | a0083584308a52b045550dbe76007e2467b7e40f | /pythonvideos/napalm_mac_Address.py | f7338f62e722e668b8d2dd285552ab03e44f5a7b | [] | no_license | narkalya/git-demo | ac511391a2c8026d53215262202b924a220ded0b | abffcdf9e0d1afd15742bfdd45784423eb04d4ab | refs/heads/master | 2020-03-25T08:58:49.356341 | 2018-08-06T15:09:25 | 2018-08-06T15:09:25 | 143,641,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | from napalm import get_network_driver
driver = get_network_driver('ios')
iosvl2 = driver('192.168.122.72', 'david', 'cisco')
iosvl2.open()
print iosvl2.get_facts()
ios_output = iosvl2.get_mac_address_table()
print (json.dumps(ios_output, sort_keys=True, indent=4))
iosvl2.close()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.