blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2deca391a42294fa9a1e3b7a3e7a081a8a035d51 | 93a016fe0430c57ca6cbe4a1064b0451130f113a | /63. Unique Paths II/Solution1.py | efbd9db5396aff210c1df27060e0a1a1beb5b21d | [] | no_license | ChenPH0522/LeetCode | fcca5eaa707c25a1d60d22dc16fe0d46ab7aba12 | 0b0911851bad871b1da66fa5b8731b96bf9c313a | refs/heads/master | 2020-04-16T21:51:17.845752 | 2019-03-18T01:41:36 | 2019-03-18T01:41:36 | 165,942,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py | # 2019.2.20
class Solution(object):
def uniquePathsWithObstacles(self, obstacleGrid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
# initialize
nr = len(obstacleGrid)
if nr == 0:
return 0
nc = len(obstacleGrid[0])
if nc == 0:
return 0
f = [ [0] * (nc) for _ in range(nr) ]
if obstacleGrid[0][0] == 0:
f[0][0] = 1
else:
f[0][0] = 0
flag = obstacleGrid[0][0]
for r in range(1, nr):
if (not flag) and (obstacleGrid[r][0] != 1):
f[r][0] = 1
else:
flag = 1
f[r][0] = 0
flag = obstacleGrid[0][0]
for c in range(1, nc):
if (not flag) and (obstacleGrid[0][c] != 1):
f[0][c] = 1
else:
flag = 1
f[0][c] = 0
# update
for r in range(1, nr):
for c in range(1, nc):
if obstacleGrid[r][c] == 1:
f[r][c] = 0
else:
f[r][c] = f[r-1][c] + f[r][c-1]
return f[-1][-1] | [
"[email protected]"
] | |
85b509b27de898050b210a48965adbce12130a4a | 38dd9d9b5b4887c29a8ab7a4fff2017fdfa79217 | /08. RESTful API Web/02. Sentence Corrector/app/util/spellcheck.py | 440b76813468da2ac4519fea63ac9ca28cd5920b | [] | no_license | ZippoCode/SAR | f7defb01273451a06aae2af0885b4ac87b3ccff9 | 63ab4789a80af5927153e110d80af8a8c05449bb | refs/heads/master | 2020-04-06T09:45:20.392836 | 2018-11-13T09:40:38 | 2018-11-13T09:41:00 | 138,259,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,241 | py | import unirest
import re
import string
import random
from app_secrets import mashape_key
regex = re.compile('[%s]' % re.escape(string.punctuation))
DEFAULT_KEY = "https://montanaflynn-spellcheck.p.mashape.com/check/?text="
def get_dict_spellcheck(text):
"""
Effettua una GET sulle API si Spellcheck. Restituisce un dizionario che contiene:
- La frase originale
- La frase suggerita
- Un dizionario che contiene la coppia 'Parola errata' : 'Lista di correzioni'
- La lista delle possibili frasi corrette
In particolare, le frasi vengono 'pulite' dalla punteggiatura
:param text:
:return: dict
"""
# Pulizia testo
text = "%s%s" % (text[0].upper(), text[1:].lower())
text = regex.sub(' ', text)
text = re.sub(' +', ' ', text)
response = unirest.get(DEFAULT_KEY + text,
headers={
"X-Mashape-Key": mashape_key,
"Accept": "application/json"
})
if response.code != 200:
return None
original = response.body['original']
suggestion = response.body['suggestion']
# Costruzione del Dizionario
dict_corrections = {}
response_corrections = response.body['corrections']
phrase_suggestion = list()
for word_original in response_corrections:
list_word_corrections = list()
for correction in response_corrections[word_original]:
list_word_corrections.append(correction.title())
phrase_suggestion.append(original.replace(word_original, correction.upper()))
dict_corrections[word_original.title()] = list_word_corrections
# Costruzione della frase suggerita con le parole modificate
original_correct = original
for wo, ws in zip(original_correct.split(), suggestion.split()):
if wo != ws:
original_correct = original_correct.replace(wo, ws.upper())
phrase_suggestion.append(original_correct)
random.shuffle(phrase_suggestion)
response_dict = {
"Original": original,
"Suggestion": suggestion,
'Phrase Suggestion': phrase_suggestion,
"Corrections": dict_corrections
}
return response_dict
| [
"[email protected]"
] | |
9933352045d96cc5d2c27b9f4a3be54ce25e1fbe | cd79b051f67fb014a9494749598e37e1585a5af0 | /task1.py | 9a2a9c2862e63e447fa8faeab9fc8390ab6e7cb0 | [] | no_license | gskirankumar6/lab-6 | 598be50745ebb0712484190c0f8165ee04bd829a | 2ba746c9653846fc1e12a9fdf79a2a4c42e3480c | refs/heads/master | 2021-05-10T13:51:30.333294 | 2018-01-22T21:27:59 | 2018-01-22T21:27:59 | 118,494,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | import math
class point():
x=0
y=0
p1=point()
p2=point()
p2.x=1
p2.y=2
def distance_between_points(p1,p2):
distance=math.sqrt((p1.x-p2.x)**2+(p2.y-p1.y)**2)
return distance
print(distance_between_points(p1,p2))
| [
"[email protected]"
] | |
69a05928de9ed76446b23f57b1dc1340e6eaaaef | f3b233e5053e28fa95c549017bd75a30456eb50c | /bace_input/L3A/3A-3N_wat_20Abox/set_1ns_equi_m.py | e4ea2818a930af0f9347d9ab3c2a65bf3ce26e2e | [] | no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | import os
dir = '/mnt/scratch/songlin3/run/bace/L3A/wat_20Abox/ti_one-step/3A_3N/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi_m.in'
temp_pbs = filesdir + 'temp_1ns_equi_m.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi_m.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../3A-3N_merged.prmtop .")
os.system("cp ../0.5_equi_0_3.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
] | |
f6841ab723b264264e63b0292554b13c38cc0029 | 5dcb58862de8905a8ea254ce229686b40ab869c6 | /python/laba8_modules/graph.py | 2a9eba10310e0fc57d24ed517b55a5d969465a22 | [] | no_license | neverkor/labi | 30785d7a6b4b06fb6b233223a5ef12c4e6e689e3 | 58f60671d0147158d5d91dc29b3a751e413aec5f | refs/heads/master | 2022-03-04T14:20:48.182091 | 2019-10-07T13:55:27 | 2019-10-07T13:55:27 | 120,272,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | import data
import matplotlib.pyplot as plt
# Графическое изображение (не обязательно, так нагляднее)
def graphic():
circle = plt.Circle((data.centr_x, data.centr_y), data.radius, color='blue', fill=False)
fig, ax = plt.subplots()
ax.add_artist(circle)
ax.axis("equal")
plt.xlim(-50, 50)
plt.ylim(-50, 50)
plt.scatter(data.x, data.y, s=0.5, color='red')
plt.scatter(data.os_x, data.os_y, s=1, color='black')
plt.show()
data.os_x = []
data.os_y = data.y
| [
"[email protected]"
] | |
58757ab16287a0e8fe9e579352191ef57f453aed | 46fd19b150edec1ee25b3f809ffc385708024a15 | /Jewels_And_Stones.py | b312caf6a4bc0e57ec815a8206569adb6e1df08c | [] | no_license | vrushti-mody/Leetcode-Solutions | a5e16b7f6e786a023b1a7db8895f905119e4aa2e | 194b7c29e31e80d7589e83e00cf100ce743871c4 | refs/heads/master | 2022-11-21T21:43:29.875987 | 2020-07-29T10:48:40 | 2020-07-29T10:48:40 | 279,860,314 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | # You're given strings J representing the types of stones that are jewels, and S representing the stones you have. Each character in S is a type of stone you have. You want to know how many of the stones you have are also jewels.
# The letters in J are guaranteed distinct, and all characters in J and S are letters. Letters are case sensitive, so "a" is considered a different type of stone from "A".
class Solution:
def numJewelsInStones(self, J: str, S: str) -> int:
j=0
i=0
count=0
while i<len(S):
if j==len(J):
j=0
i=i+1
elif S[i]==J[j]:
i=i+1
j=0
count=count+1
else:
j=j+1
return count
| [
"[email protected]"
] | |
22a8528b7f3e90d3daa6100e399bbe178244f3b3 | a0df0314a9e934e26daf0fe3dee6a88b7e91e53d | /langs/spanish/subselect_PROP.py | ac65d507dc3a458ceb6d5edbc3364e516e78e737 | [
"MIT"
] | permissive | bplank/ud-conversion-tools | 13e3f2093ed04873092db5c3a73349d601d554ce | 1818df8f47822564d4bf0ae954ebfb405902dd02 | refs/heads/master | 2023-05-30T00:58:49.090074 | 2023-05-07T12:17:18 | 2023-05-07T12:17:18 | 258,237,683 | 1 | 2 | MIT | 2023-05-07T12:17:19 | 2020-04-23T14:48:50 | Python | UTF-8 | Python | false | false | 799 | py |
import nltk, re
stoplist = list(nltk.corpus.stopwords.words())+ "& bajo al".split(" ")
romans = re.compile("^M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})$")
for line in open("PROPNlist").readlines():
line = line.strip()
freq, word, POS, underscore = line.split()
newPOS = "PROPN"
newfeats = "_"
newlabel = "_"
if word.lower() in stoplist:
#print(word)
newPOS = "STOP"
elif "'" in word and len(word) < 4:
#print(word)
newPOS = "STOP"
# elif set(word).intersection(set(["0","1","2","3","4","5","6","7","8","9"])):
# print(word)
# elif romans.search(word):
# print(word)
if newPOS != POS:
outline = "\t".join([freq, word, POS, underscore, newPOS, newfeats, newlabel])
print(outline) | [
"[email protected]"
] | |
38aa1cecef456e46672f644af502b036e9857c79 | 58cf4e5a576b2baf7755ae19d410bf8afc2f3709 | /leetcode-solutions/P1718Lexicographically_valid_sequence.py | c9d06c89c6e96e5f0b886e1ffc39ed7c4e203f95 | [] | no_license | srihariprasad-r/leet-code | 78284beac34a4d84dde9f8cd36503496b618fdf7 | fc4f1455bafd1496eb5469a509be8638b75155c1 | refs/heads/master | 2023-08-16T20:24:09.474931 | 2023-08-07T14:23:36 | 2023-08-07T14:23:36 | 231,920,275 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,267 | py | class Solution(object):
def constructDistancedSequence(self, n):
"""
:type n: int
:rtype: List[int]
"""
visited = set()
def backtrack(idx):
if idx == el:
return True
if ans[idx] != 0:
return backtrack(idx+1)
else:
for num in range(n, 0, -1):
if num == 1:
if num not in visited:
ans[idx] = num
visited.add(num)
if backtrack(idx+1):
return True
ans[idx] = 0
visited.remove(num)
else:
if num not in visited and idx + num < len(ans) and ans[idx+num] == 0:
ans[idx] = ans[idx+num] = num
visited.add(num)
if backtrack(idx+1):
return True
ans[idx] = ans[idx+num] = 0
visited.remove(num)
return False
el = 1 + (n-1) * 2
ans = [0] * el
backtrack(0)
return ans
| [
"[email protected]"
] | |
d5c24aad7f699753b6f6b9dd0785c1360a3c4486 | 7670d3ab993875f935e7cd40e522e5a44d6acb87 | /EMSController/EMSBroker/serializers.py | 5738d594d66ced2ac449b9e868a3e608e5dd7574 | [] | no_license | sr-kulkarni/Eirene | b2a2057a55505d27cb3a268d60408a275e2cd7f5 | 1d074811a23bac1fa451e37d8eaa87328ed87c5f | refs/heads/master | 2021-01-10T03:38:42.465389 | 2016-03-21T04:47:37 | 2016-03-21T04:47:37 | 47,056,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | from rest_framework import serializers
from EMSBroker.models import Service, ServiceHelper
class ServiceSerializer(serializers.ModelSerializer):
class Meta:
model = Service
fields = ('name','address',)
class ServiceHelperSerializer(serializers.ModelSerializer):
class Meta:
model = ServiceHelper
fields = ('service','specifics',)
| [
"[email protected]"
] | |
069aa72a031f9524c83c56938ef67ba5540f6609 | 19d801b2248c1f277fbffeac0b44c7508ed1dd9a | /python/cours1/2_4_1_quotes.py | b8f9b3ab8030336ad60bd1227f16ac9875ee41ea | [] | no_license | eminet666/eminet_tutoriels | 92a24bc820cc3637727f0c5a9fdfc9c90d9450be | f5220caaf5c37b22f3cd667533f922e34deac245 | refs/heads/main | 2023-07-31T06:18:37.657750 | 2021-09-09T19:56:20 | 2021-09-09T19:56:20 | 356,902,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | # -*- coding: utf8 -*-
quotes = [
"Ecoutez-moi, Monsieur Shakespeare, nous avons beau être ou ne pas être, nous sommes !",
"On doit pouvoir choisir entre s'écouter parler et se faire entendre."
]
characters = [
"alvin et les Chipmunks",
"Babar",
"betty boop",
"calimero",
"casper",
"le chat potté",
"Kirikou"
]
| [
"[email protected]"
] | |
c5da38cf828ee2e7f9f04fe2a3062059242b45d0 | 1c7fa268ee031395806f38d52b6a7282ba5a4633 | /hr_python/collections/medium/WordOrder.py | f5ae2f7799e2749511ce13812c98aa411f7532da | [] | no_license | murugesan-narayan/hr_python | d2f562ecd2aa6c4eef4aab8363d5d040447ed727 | 86542342fc77cf7c95ebd08e5142186410f6385d | refs/heads/master | 2022-04-12T14:59:16.293611 | 2020-03-24T14:25:30 | 2020-03-24T14:25:30 | 249,729,320 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | from collections import OrderedDict
if __name__ == '__main__':
od = OrderedDict()
for i in range(int(input())):
w = input()
od[w] = od.get(w, 0) + 1
print(len(od))
print(*od.values())
| [
"[email protected]"
] | |
25d05a6156015393e9e10755b75e63847b9ff2d4 | 89d1f88b63723cb9c90bb50d947c6c9b0ef8909c | /code/YESR/make_filelist.py | 657d2eec43b383d52c8d6fe0e36dd562d3f71735 | [] | no_license | yexuehua/Cell_SR | 9240138a387d4b7e64348869e3167772584a052d | 25f5dcdc821ae11da22fdbf87bf389bcfacc01f4 | refs/heads/master | 2023-03-05T01:49:04.120027 | 2021-02-15T12:30:34 | 2021-02-15T12:30:34 | 264,704,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 612 | py | import pandas as pd
import os
import numpy as np
top_path = r"C:\Users\212774000\Documents\python\gitclone\SR\Cell_SR\data"
lr_names = os.listdir(os.path.join(top_path,"overlay256"))
lr_names.sort(key=lambda x:int(x[:-12]))
lr_lists = [os.path.join("overlay256",i) for i in lr_names]
hr_names = os.listdir(os.path.join(top_path,"overlay512"))
hr_names.sort(key=lambda x:int(x[:-12]))
hr_lists = [os.path.join("overlay512",i) for i in hr_names]
df = pd.DataFrame({"overlay256":lr_lists,
"overlay512":hr_lists})
df.to_csv("data.csv")
ndf = pd.read_csv("data.csv")
print(list(ndf["overlay256"])) | [
"[email protected]"
] | |
649c8bdf219d3d84de1f29cd783c3241ea33cbc8 | c205aaf81190afcd78c20434f95af890cf05eb55 | /100-199/141-linked-list-cycle.py | b7397c42245bf96c9920d356d2e841c285153683 | [] | no_license | StdioA/LeetCode | 94be861978952364549dcb64d6effbcc05952449 | 2b0ed54a5bfc3496a6234afc8568bb7ec8bd9c5d | refs/heads/master | 2021-09-18T20:55:51.402776 | 2021-09-08T11:45:24 | 2021-09-08T11:45:24 | 52,870,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | # coding: utf-8
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
from made_list import ListNode
class Solution:
# @param head, a ListNode
# @return a boolean
def hasCycle(self, head):
nodes = {}
cur = head
while cur:
if nodes.get(id(cur),None):
return True
else:
nodes[id(cur)] = True
cur = cur.next
return False | [
"[email protected]"
] | |
324bc5c67b22e80c45f9991b08b6e18b97fed42e | 3203738cdab38847dbf396411b1ea6d0140de539 | /venv/bin/pip2.7 | 2accc2ba9901fa0063695a5f935ef5c42932705f | [] | no_license | ChenPaulYu/rpi-video-example | 7e000dd1dd88f90a49430507c7407f2ae2bcb077 | 37fd455c57b6a921617ad1d97437daf426cd8461 | refs/heads/master | 2021-04-15T07:32:17.841990 | 2018-03-26T10:49:40 | 2018-03-26T10:49:40 | 126,814,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | 7 | #!/var/www/lab_app/venv/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
35d63f1643bbb28b4a418dc38659f9d3048320b8 | 57e4571b018e59b307c52dda8b21920d3817747e | /simplesocial/groups/urls.py | adec226988e5c7fa2d9cb797f871307fe4726951 | [] | no_license | rajatsaini736/simple_social | f39031e829fc1ff58eb547c4b4dacd1404fe6aab | 3334cd09e8eb2c40179c3891f4750b039ba8a66e | refs/heads/master | 2020-03-22T20:35:39.410307 | 2018-07-11T18:19:16 | 2018-07-11T18:19:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | from django.conf.urls import url
from . import views
app_name = 'groups'
urlpatterns = [
url(r'^$',views.ListGroups.as_view(),name='all'),
url(r'^new/$',views.CreateGroup.as_view(),name='create'),
url(r'^posts/in/(?P<slug>[-\w]+)/$',views.SingleGroup.as_view(),name='single'),
url(r'^join/(?P<slug>[-\w]+)/$'views.JoinGroup.as_view(),name='join'),
url(r'^leave/(?P<slug>[-\w]+)/$',views.LeaveGroup.as_view(),name='leave'),
] | [
"[email protected]"
] | |
50c21f40e005535778798626eec9e021586efbda | 25f22d78a376389ff0f825ce1ef9d985de85b422 | /venv/lib/python3.6/tokenize.py | e055e11934aee4f85b4022182011d69cb11b8a53 | [] | no_license | DelanoJo/flask_page | 59ebb7ec6d71debe74f1b19c3d0f4eb9c0ceeece | 1eb96ea99a69ed207593626903bc59eb3f9c7ce5 | refs/heads/master | 2020-12-30T16:41:06.036540 | 2017-05-11T19:01:38 | 2017-05-11T19:01:38 | 91,010,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56 | py | /Users/delanojohnson/anaconda3/lib/python3.6/tokenize.py | [
"[email protected]"
] | |
b43237a959e7f5df821a1de7944f76ab53d41fbe | 13aa5daad57fcb5458e4788779602fe64dd53b3c | /reconect user in conf/test.py | dada1e6b1a5a1852b405a879c371cfae265e35fd | [] | no_license | QAkharkivTC/Server | c2a1f7ce5ef0e6074d301f8a36f66e8a43e7ce5e | eb6bab434c461bd66b2db2850116aa13b9931df7 | refs/heads/main | 2023-08-31T21:49:45.648597 | 2021-10-24T20:05:11 | 2021-10-24T20:05:11 | 394,217,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,664 | py | import json
import requests
import time
import urllib3
import certifi
import ssl
import re
#test data
user = 'artem_7'
server_name = 'aa111.trueconf.name'
ip = '10.130.2.209'
token = '2PReau6pERNRhbHZcd1Q6I2Y1Mlnd7C3'
cid = 'chat'
login_admim = 'tc'
password_admin = 'qweASD123'
def invite_partisipant(user, server_name, ip, cid, token):
ssl._create_default_https_context = ssl._create_unverified_context
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
url = 'https://'+ip+'/api/v3.3/conferences/'+cid+'/invite?access_token='+token+''
headers = {'content-type': 'application/json'}
payload = {"participants":[""+user+"@"+server_name+""]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=False)
#debug
if response.status_code == 200:
print('invite '+user)
else:
print('"invite_partisipant" dosen\'t work')
print(response.status_code)
print(response.text)
def get_stream_id(ip, cid, token):
ssl._create_default_https_context = ssl._create_unverified_context
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
url = 'https://'+ip+'/api/v4/conferences/?access_token='+token+'&state=running&page_size=1000'
headers = {'content-type': 'application/json'}
response = requests.get(url, headers=headers, verify=False)
media = json.loads(response.text)
for i in range(len(media['conferences'])):
if media['conferences'][i]['id'] == str(cid):
stream_id = media['conferences'][i]['session_id']
else:
pass
#print(stream_id)
stream_id = re.sub('[@]','%40',stream_id)
stream_id = re.sub('[#]','%23',stream_id)
#print(stream_id)
#debug
if response.status_code != 200:
print('something was wrong: see in "get_stream_id"')
print(response.text)
print(response.status_code)
else:
pass
return stream_id
def get_instans_partisipant(user, cid, ip, token, login_admim, password_admin):
ssl._create_default_https_context = ssl._create_unverified_context
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
headers = {'content-type': 'application/json'}
data = {"username":login_admim,"password":password_admin}
url = 'https://'+ip+'/guest/auth/login/'
response = requests.post(url, headers=headers, data=data, verify=False)
response.status_code
#print(response.cookies)
test = response.cookies
#print(test)
for cookie in response.cookies:
#print(cookie.__dict__)
coockies_1 = cookie.__dict__['value']
#coockies = test['value']
#print(coockies)
call_id = user+'@'+server_name
stream_id = get_stream_id(ip, cid, token)
cookies = {'PHPSESSID':''+coockies_1+''}
headers = {'content-type': 'application/json'}
url = 'https://'+ip+'/draft/v1/streams/'+stream_id+'/participants/'
response = requests.get(url, headers=headers, cookies=cookies, verify=False)
print(response.status_code)
print(response.text)
media = json.loads(response.text)
for i in range(len(media['data']['participants'])):
if media['data']['participants'][i]['call_id'] == call_id:
instans = media['data']['participants'][i]['participant_id']
else:
pass
#debug
if response.status_code != 200:
print('something was wrong: see in "get_instans_partisipant"')
print(response.text)
print(response.status_code)
print(len(media['data']['participants']))
else:
#print(instans)
pass
return instans
def remove_partisipant(ip, cid, user, server_name, token, login_admim, password_admin):
ssl._create_default_https_context = ssl._create_unverified_context
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
stream_id = get_stream_id(ip, cid, token)
remove_user = user+'@'+server_name
instans = get_instans_partisipant(user, cid, ip, token, login_admim, password_admin)
instans = re.sub('[@]','%40',instans)
instans = re.sub('[/]','%2F',instans)
cookies_value = get_cookies(ip, login_admim, password_admin)
cookies = {'PHPSESSID':''+cookies_value+''} # ea573dbe280b4235ac1f3c8fb16bb36d
headers = {'content-type': 'application/json'}
url = 'https://'+ip+'/draft/v1/streams/'+stream_id+'/participants/'+instans+'/'
response = requests.delete(url, headers=headers, cookies=cookies, verify=False)
response.status_code
#debug
if response.status_code == 200:
print('user ' + user + ' was removed')
else:
print(response.status_code)
print(response.text)
def get_cookies(ip, cid, user, server_name, token, login_admim, password_admin):
ssl._create_default_https_context = ssl._create_unverified_context
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
headers = {'content-type': 'application/json'}
data = {"username":login_admim,"password":password_admin}
url = 'https://'+ip+'/guest/auth/login/'
response = requests.post(url, headers=headers, data=data, verify=False)
response.status_code
#print(response.cookies)
test = response.cookies
#print(test)
for cookie in response.cookies:
#print(cookie.__dict__)
coockies_11 = cookie.__dict__['value']
#coockies = test['value']
#print(coockies)
stream_id = get_stream_id(ip, cid, token)
remove_user = user+'@'+server_name
instans = get_instans_partisipant(user, cid, ip, token, login_admim, password_admin)
instans = re.sub('[@]','%40',instans)
instans = re.sub('[/]','%2F',instans)
cookies = {'PHPSESSID':''+coockies_11+''} # ea573dbe280b4235ac1f3c8fb16bb36d
headers = {'content-type': 'application/json'}
url = 'https://'+ip+'/draft/v1/streams/'+stream_id+'/participants/'+instans+'/'
response = requests.delete(url, headers=headers, cookies=cookies, verify=False)
response.status_code
#debug
if response.status_code == 200:
print('user ' + user + ' was removed')
else:
print(response.status_code)
print(response.text)
return coockies
get_cookies(ip, cid, user, server_name, token, login_admim, password_admin)
| [
"[email protected]"
] | |
5260df6b703ce981378e8ce7186a7d59f0aa27dd | 74a0372356b332098a29dde176d3ea0dc9d2ee9d | /koalixcrm/crm/product/unit_transform.py | 0249658ed77853e7ee6410bae24f7f4c8019a238 | [
"BSD-3-Clause"
] | permissive | Shikhar10000/koalixcrm | 6fd1d6d3e341af108d1eb090c9a00a7f815edd32 | 6d0230a0f192bb7126c42db59ecfc4369a765a7a | refs/heads/master | 2022-06-19T10:31:00.524875 | 2020-05-09T18:46:43 | 2020-05-09T18:46:43 | 262,627,556 | 1 | 0 | BSD-3-Clause | 2020-05-09T17:53:09 | 2020-05-09T17:53:08 | null | UTF-8 | Python | false | false | 1,950 | py | # -*- coding: utf-8 -*-
from django.db import models
from django.contrib import admin
from django.utils.translation import ugettext as _
class UnitTransform(models.Model):
from_unit = models.ForeignKey('Unit',
verbose_name=_("From Unit"),
blank=False,
null=False,
related_name="db_reltransfromfromunit")
to_unit = models.ForeignKey('Unit',
verbose_name=_("To Unit"),
blank=False,
null=False,
related_name="db_reltransfromtounit")
product_type = models.ForeignKey('ProductType',
blank=False,
null=False,
verbose_name=_("Product Type"))
factor = models.DecimalField(verbose_name=_("Factor between From and To Unit"),
blank=False,
null=False,
max_digits=17,
decimal_places=2,)
def transform(self, unit):
if self.from_unit == unit:
return self.to_unit
else:
return None
def get_transform_factor(self):
return self.factor
def __str__(self):
return "From " + self.from_unit.short_name + " to " + self.to_unit.short_name
class Meta:
app_label = "crm"
verbose_name = _('Unit Transform')
verbose_name_plural = _('Unit Transforms')
class UnitTransformInlineAdminView(admin.TabularInline):
model = UnitTransform
extra = 1
classes = ['collapse']
fieldsets = (
('', {
'fields': ('from_unit',
'to_unit',
'factor',)
}),
)
allow_add = True
| [
"[email protected]"
] | |
2ba3413d40aec8e567953a35086628cad4676329 | 4dda19ac20fb8dcf1abd5495a323fceec02d6c74 | /depth_analyzer.py | e0f3f0cc2dfb3f2ff56e7feb8cfb9d97e2c4ec8a | [] | no_license | AndreSlavescu/Stereo-Vision-Depth-Analyzer | cc6832905e9581c314f5a82f54accd3421207a2a | e915b3e38dcb96fe5b1153563053bf961fa791dd | refs/heads/main | 2023-04-10T22:37:57.580173 | 2021-05-05T01:16:38 | 2021-05-05T01:16:38 | 357,052,381 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,647 | py | #!/usr/bin/env python3
import cv2
import depthai as dai
import matplotlib.pyplot as plt
from collections import deque
class DataPlot:
def __init__(self, max_entries=20):
self.axis_x = deque(maxlen=max_entries)
self.axis_y = deque(maxlen=max_entries)
self.max_entries = max_entries
self.buf1 = deque(maxlen=5)
self.buf2 = deque(maxlen=5)
def add(self, x, y):
self.axis_x.append(x)
self.axis_y.append(y)
class RealtimePlot:
def __init__(self, axes):
self.axes = axes
self.lineplot, = axes.plot([], [], "ro-")
def plot(self, dataPlot):
self.lineplot.set_data(dataPlot.axis_x, dataPlot.axis_y)
self.axes.set_xlim(min(dataPlot.axis_x), max(dataPlot.axis_x))
ymin = 0
ymax = max(dataPlot.axis_y) + 10
self.axes.set_ylim(ymin, ymax)
self.axes.relim();
stepSize = 0.01
# Start defining a pipeline
pipeline = dai.Pipeline()
# Define a source - two mono (grayscale) cameras
monoLeft = pipeline.createMonoCamera()
monoRight = pipeline.createMonoCamera()
stereo = pipeline.createStereoDepth()
spatialLocationCalculator = pipeline.createSpatialLocationCalculator()
xoutDepth = pipeline.createXLinkOut()
xoutSpatialData = pipeline.createXLinkOut()
xinSpatialCalcConfig = pipeline.createXLinkIn()
xoutDepth.setStreamName("depth")
xoutSpatialData.setStreamName("spatialData")
xinSpatialCalcConfig.setStreamName("spatialCalcConfig")
# MonoCamera
monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT)
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)
outputDepth = True
outputRectified = False
lrcheck = False
subpixel = False
# StereoDepth
stereo.setOutputDepth(outputDepth)
stereo.setOutputRectified(outputRectified)
stereo.setConfidenceThreshold(255)
stereo.setLeftRightCheck(lrcheck)
stereo.setSubpixel(subpixel)
monoLeft.out.link(stereo.left)
monoRight.out.link(stereo.right)
spatialLocationCalculator.passthroughDepth.link(xoutDepth.input)
stereo.depth.link(spatialLocationCalculator.inputDepth)
topLeft = dai.Point2f(0.5, 0.5)
bottomRight = dai.Point2f(0.6, 0.6)
spatialLocationCalculator.setWaitForConfigInput(False)
config = dai.SpatialLocationCalculatorConfigData()
config.depthThresholds.lowerThreshold = 100
config.depthThresholds.upperThreshold = 10000
config.roi = dai.Rect(topLeft, bottomRight)
spatialLocationCalculator.initialConfig.addROI(config)
spatialLocationCalculator.out.link(xoutSpatialData.input)
xinSpatialCalcConfig.out.link(spatialLocationCalculator.inputConfig)
# Pipeline is defined, now we can connect to the device
with dai.Device(pipeline) as device:
device.startPipeline()
# Output queue will be used to get the depth frames from the outputs defined above
depthQueue = device.getOutputQueue(name="depth", maxSize=4, blocking=False)
spatialCalcQueue = device.getOutputQueue(name="spatialData", maxSize=4, blocking=False)
spatialCalcConfigInQueue = device.getInputQueue("spatialCalcConfig")
color = (0, 255, 0)
print("Use WASD keys to move ROI!")
fig, axes = plt.subplots()
plt.title('Plotting Data')
data = DataPlot();
dataPlotting = RealtimePlot(axes)
count = 0
while True:
count += 1
inDepth = depthQueue.get() # Blocking call, will wait until a new data has arrived
inDepthAvg = spatialCalcQueue.get() # Blocking call, will wait until a new data has arrived
depthFrame = inDepth.getFrame()
depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
depthFrameColor = cv2.equalizeHist(depthFrameColor)
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)
spatialData = inDepthAvg.getSpatialLocations()
print("spatial data len", len(spatialData))
for depthData in spatialData:
roi = depthData.config.roi
roi = roi.denormalize(width=depthFrameColor.shape[1], height=depthFrameColor.shape[0])
xmin = int(roi.topLeft().x)
ymin = int(roi.topLeft().y)
xmax = int(roi.bottomRight().x)
ymax = int(roi.bottomRight().y)
fontType = cv2.FONT_HERSHEY_TRIPLEX
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, 2)
cv2.putText(depthFrameColor, f"X: {int(depthData.spatialCoordinates.x)} mm", (xmin + 10, ymax + 20), fontType, 0.5, color)
cv2.putText(depthFrameColor, f"Y: {int(depthData.spatialCoordinates.y)} mm", (xmin + 10, ymax + 35), fontType, 0.5, color)
cv2.putText(depthFrameColor, f"Z: {int(depthData.spatialCoordinates.z)} mm", (xmin + 10, ymax + 50), fontType, 0.5, color)
data.add(count, int(depthData.spatialCoordinates.z))
dataPlotting.plot(data)
plt.pause(0.001)
cv2.imshow("depth", depthFrameColor)
newConfig = False
key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('w'):
if topLeft.y - stepSize >= 0:
topLeft.y -= stepSize
bottomRight.y -= stepSize
newConfig = True
elif key == ord('a'):
if topLeft.x - stepSize >= 0:
topLeft.x -= stepSize
bottomRight.x -= stepSize
newConfig = True
elif key == ord('s'):
if bottomRight.y + stepSize <= 1:
topLeft.y += stepSize
bottomRight.y += stepSize
newConfig = True
elif key == ord('d'):
if bottomRight.x + stepSize <= 1:
topLeft.x += stepSize
bottomRight.x += stepSize
newConfig = True
elif key == ord('e'):
topLeft.x += 0.01
topLeft.y += 0.01
bottomRight.x -= 0.01
bottomRight.y -= 0.01
newConfig = True
elif key == ord('r'):
topLeft.x -= 0.01
topLeft.y -= 0.01
bottomRight.x += 0.01
bottomRight.y += 0.01
newConfig = True
if newConfig:
config.roi = dai.Rect(topLeft, bottomRight)
cfg = dai.SpatialLocationCalculatorConfig()
cfg.addROI(config)
spatialCalcConfigInQueue.send(cfg)
| [
"[email protected]"
] | |
bcfd5af36d4792ba100793bf21ba608c5053a457 | 053b436a21874cb35d5376f7c45d046d80e08e04 | /Chapter 9 Excercise 2.py | a62ca54f3d25297e349701d28ddff78b6083b112 | [] | no_license | braeden-smith/Chapter-8-9-10 | 4b34f813e1c6f8fc0c7ca1c87062d96e4c851dc9 | c8d575dcc941f8dd121074096a37d0966e52fc33 | refs/heads/master | 2021-05-02T09:15:24.292906 | 2018-02-09T18:05:35 | 2018-02-09T18:05:35 | 120,820,485 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | #Braeden Smith Chapter 9 Excercise 2
# Exercise 2
# Write a program that categorizes each mail message by which day of the week the
# commit was done. To do this look for lines that start with “From”,
# then look for the third word and keep a running count of each of the days of the week.
# At the end of the program print out the contents of your dictionary (order does not matter).
weekdays = {}
table = []
with open('mboxshort.txt') as f:
for line in f:
table = line.split()
if len(table) > 3 and line.startswith('From'):
day = table[2]
if day not in weekdays:
weekdays[day] = 1
else:
weekdays[day] += 1
print(weekdays) | [
"[email protected]"
] | |
fbe81a7e2f5b2e9010815422bc4c7d0db403fb9b | 1ffc799602225024bc8adccbd6988478c9ce434f | /mags_raw_to_processed_fast_forward.py | e727f0b5c6299c9152fe8bacf336d2ee13722531 | [
"Apache-2.0"
] | permissive | Thomas-Hirsch/airflow-dags | 8adf63ebca05bf24d2f787e0a478f5ca5d14dcd5 | 37b1e9a44dbfd508c1f6b86c712d699bfdff4ca2 | refs/heads/master | 2020-04-21T09:05:22.364622 | 2019-03-19T12:31:25 | 2019-03-19T12:31:25 | 169,438,103 | 0 | 0 | Apache-2.0 | 2019-03-19T13:31:08 | 2019-02-06T16:33:38 | Python | UTF-8 | Python | false | false | 2,600 | py | from datetime import datetime, timedelta
import airflow
from airflow import DAG
from airflow.contrib.operators.kubernetes_pod_operator import KubernetesPodOperator
from airflow.utils.dates import days_ago
TAR_PROCESS_SCRIPT = "tar_raw_to_process.py"
DB_REBUILD_SCRIPT = "rebuild_databases.py"
DB_VERSION = 'v1'
DBS_TO_REBUILD = 'mags_processed'
# HOCAS_RAW_FOLDER = ""
# HOCAS_PYTHON_SCRIPT_NAME = "hocas_raw_to_process.py"
MAGS_IMAGE = "593291632749.dkr.ecr.eu-west-1.amazonaws.com/airflow-magistrates-data-engineering:v0.0.7"
MAGS_ROLE = "airflow_mags_data_processor"
task_args = {
"depends_on_past": False,
"email_on_failure": True,
"owner": "isichei",
"email": ["[email protected]"],
}
# Catch-up on dates before today-REUPDATE_LAST_N_DAYS days
dag = DAG(
"mags_data_raw_to_processed_fast_forward",
default_args=task_args,
description="Process mags data (HOCAS and TAR) running all preprocessing in parallel",
start_date=datetime.now(),
schedule_interval=None,
)
tasks = {}
file_land_timestamps = [
"file_land_timestamp=1541030400",
"file_land_timestamp=1541116800",
"file_land_timestamp=1541203200",
"file_land_timestamp=1541289600",
"file_land_timestamp=1541376000",
"file_land_timestamp=1541462400",
"file_land_timestamp=1541548800",
"file_land_timestamp=1541635200",
"file_land_timestamp=1541721600"
]
task_id = "rebuild-athena-schemas"
tasks[task_id] = KubernetesPodOperator(
dag=dag,
namespace="airflow",
image=MAGS_IMAGE,
env_vars={
"PYTHON_SCRIPT_NAME": DB_REBUILD_SCRIPT,
"DB_VERSION": DB_VERSION,
"DBS_TO_REBUILD" : DBS_TO_REBUILD
},
arguments=["{{ ds }}"],
labels={"app": dag.dag_id},
name=task_id,
in_cluster=True,
task_id=task_id,
get_logs=True,
annotations={"iam.amazonaws.com/role": MAGS_ROLE},
)
# Run each set of paths in parallel and set rebuild of databases downstream
for i, flt in enumerate(file_land_timestamps) :
task_id = f"process-tar-{i}"
tasks[task_id] = KubernetesPodOperator(
dag=dag,
namespace="airflow",
image=MAGS_IMAGE,
env_vars={
"PYTHON_SCRIPT_NAME": TAR_PROCESS_SCRIPT,
"DB_VERSION": DB_VERSION,
"S3_RELATIVE_FOLDER_PATHS": flt,
},
arguments=["{{ ds }}"],
labels={"app": dag.dag_id},
name=task_id,
in_cluster=True,
task_id=task_id,
get_logs=True,
annotations={"iam.amazonaws.com/role": MAGS_ROLE},
)
# Set dependencies
tasks[task_id] >> tasks["rebuild-athena-schemas"]
| [
"[email protected]"
] | |
450436a24d35b4f202b9f4b0862ca5a54129f0c6 | e7ab780aeae5a6910eb0b7f23d204ae6b6d4b93b | /case/dataTable/scriptCreteTable.py | 01854fce4d6f2253fd1d256142c0a4012c9ccef2 | [] | no_license | wmengchen/hufu_ui | 59dd60cc48bffc5685a2e82235d98496cf6972a9 | 324497bb9f1c4461d8d80cd956e24317a6565e60 | refs/heads/master | 2023-01-14T04:39:52.260732 | 2020-11-17T02:34:58 | 2020-11-17T02:34:58 | 276,246,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,141 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# @date: 2020/7/23 15:07
# @name: scriptCreteTable
# @author:menghuan.wmc
import ddt,unittest,sys,os,re
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from comm.element import Element
from comm.readExcel import ReadExcel
from comm import login
from config import setting
from selenium.webdriver.common.keys import Keys
from comm.sql import Dbconnect
import time
from selenium.webdriver.common.action_chains import ActionChains
import configparser as cparser
cf = cparser.ConfigParser()
cf.read(setting.Test_config,encoding='utf-8')
username = cf.get('test_admin','username')
password = cf.get('test_admin','password')
sheetName = 'scriptCreteTable'
date = time.strftime('%Y_%m_%d',time.localtime(time.time()))
testData = ReadExcel(setting.Test_case,sheetName).read_data()
@ddt.ddt
class ScriptCreteTable(unittest.TestCase):
def setUp(self):
print('--------测试开始--------')
self.login = login.Login()
self.login.login(username, password)
self.driver = self.login.browser
pass
@ddt.data(*testData)
def test_ScriptCreteTable(self,data):
print('---------{}---------'.format(data['case_name']))
Element(self.driver,'project','Projectfind_click').wait_send_keys(date+data["project_name"])
Element(self.driver,'project','Projectfind_click').send_keys(Keys.ENTER)
time.sleep(1)
Element(self.driver,'project','enterProject_click').wait_click()
time.sleep(1)
Element(self.driver,'dataAssert','dataAssert_click').wait_click()
Element(self.driver,'dataTable','dataTable_click').wait_click()
Element(self.driver,'dataTable','dataTablescriptcreateTable_click').wait_click()
Element(self.driver,'dataTable','dataTableScript_inputclick').wait_click()
ActionChains(self.driver).send_keys(data["script"]).perform()
time.sleep(1)
Element(self.driver,'dataTable','dataTableScript_cancelclick').wait_click()
time.sleep(2)
Element(self.driver, 'dataTable', 'dataTablescriptcreateTable_click').wait_click()
time.sleep(1)
Element(self.driver, 'dataTable', 'dataTableScript_inputclick').wait_click()
ActionChains(self.driver).send_keys(data["script"]).perform()
time.sleep(1)
Element(self.driver,'dataTable','dataTableScript_saveclick').wait_click()
Element(self.driver, 'dataTable', 'dataTableScript_saveclick').wait_not_click()
time.sleep(1)
Element(self.driver,'dataTable','dataTablefind_click').wait_send_keys(data["table_name"])
time.sleep(1)
content = Element(self.driver,'dataTable','dataTablefind_totalclick').get_text_value()
self.check_result(content)
def check_result(self,content):
number = re.findall(r"\d+\.?\d*", content)
print("number:",number[0])
s = number[0]
print("s的值是",s)
assert int(s) == 1
def tearDown(self):
print('--------测试结束--------')
self.login.logout()
if __name__=="__main__":
unittest.main() | [
"[email protected]"
] | |
6d4d355c0b0481060525524ea626bc594a5bbb5b | f3693916a8b118bf139364604dac3f51235ed613 | /functional/Components/Permissions/ManageConfiguration/Clients/Clients_POST/test_TC_42284_Clients_POST_Admin_Create_Flag_Set_To_False_1.py | 89199af884dce34566d2d26a0c8dc211f7f1fe2f | [] | no_license | muktabehera/QE | e7d62284889d8241d22506f6ee20547f1cfe6db1 | 3fedde591568e35f7b80c5bf6cd6732f8eeab4f8 | refs/heads/master | 2021-03-31T02:19:15.369562 | 2018-03-13T02:45:10 | 2018-03-13T02:45:10 | 124,984,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,133 | py | # -*- coding: UTF-8 -*-
"""PFE Component Tests - Clients.
* TC-42284 - Clients POST:
Verify that Create button is not displayed on 'Proximities zones' page under Intelligent Content Routing menu for user with "Manage Configuration" permission, while "Config admin can create" flag is set to false within the token expiration time.
Equivalent test CURL command:
curl -H "Host: <client_host>" -k -H "Authorization: Bearer
<valid_token>" -X POST -d @<JSON_data_file> -H "Content-Type:
application/json" "<PF_host>://<client_host>/clients"
Same, with test data:
curl -H "Host: <client_host>" -k -H "Authorization: Bearer
<valid_token>" -X POST -d @<JSON_data_file> -H "Content-Type:
application/json" "<PF_host>://<client_host>/clients"
JSON data sent to PathFinder in this test:
{'id': 'auto_client',
'matchingRule': {'groups': [{'groups': [],
'operator': 'ALL',
'rules': [{'contextField': 'remoteAddress',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': '172.0.0.0/8',
'operator': 'IPMATCH'},
{'contextField': 'remoteHost',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': 'qed.com',
'operator': 'EQ'},
{'contextField': 'serverHost',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': '172.30.3.174',
'operator': 'EQ'},
{'contextField': 'serverPort',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': 8080,
'operator': 'EQ'},
{'contextField': 'operatingSystem',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': 'WINDOWS_8',
'operator': 'OSMATCH'},
{'contextField': 'browser',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': 'IE',
'operator': 'BROWSERMATCH'},
{'contextField': 'headerMap',
'contextFieldKey': 'headergroup',
'contextFieldType': 'String',
'expressionType': 'Map',
'matchValue': 'headergroup',
'operator': 'EQ'},
{'contextField': 'queryParamMap',
'contextFieldKey': 'query1',
'contextFieldType': 'String',
'expressionType': 'Map',
'matchValue': 'query1',
'operator': 'EQ'},
{'contextField': 'tags',
'contextFieldKey': '1234',
'contextFieldType': 'String',
'expressionType': 'Map',
'matchValue': '1234',
'operator': 'EQ'}]}],
'operator': 'ALL',
'rules': [{'contextField': 'remoteAddress',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': '172.0.0.0/8',
'operator': 'IPMATCH'},
{'contextField': 'remoteHost',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': 'qed.com',
'operator': 'EQ'},
{'contextField': 'serverHost',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': '172.30.3.174',
'operator': 'EQ'},
{'contextField': 'operatingSystem',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': 'BLACKBERRY',
'operator': 'OSMATCH'},
{'contextField': 'browser',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': 'IE10',
'operator': 'BROWSERMATCH'},
{'contextField': 'headerMap',
'contextFieldKey': 'header',
'contextFieldType': 'String',
'expressionType': 'Map',
'matchValue': 'header',
'operator': 'EQ'},
{'contextField': 'queryParamMap',
'contextFieldKey': 'query',
'contextFieldType': 'String',
'expressionType': 'Map',
'matchValue': 'query',
'operator': 'EQ'},
{'contextField': 'tags',
'contextFieldKey': 'tag',
'contextFieldType': 'String',
'expressionType': 'Map',
'matchValue': 'tag',
'operator': 'EQ'},
{'contextField': 'serverPort',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': 8080,
'operator': 'EQ'}]},
'name': 'POST: Auto Client',
'sourceSelectionRule': [{'groups': [{'groups': [],
'operator': 'ALL',
'rules': [{'contextField': 'bitrateKbps',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': 523,
'operator': 'EQ'},
{'contextField': 'heightPx',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': 456,
'operator': 'EQ'},
{'contextField': 'mimetype',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': 'video/mp4',
'operator': 'MIMEMATCH'},
{'contextField': 'tags',
'contextFieldKey': '456',
'contextFieldType': 'String',
'expressionType': 'Map',
'matchValue': '456',
'operator': 'EQ'},
{'contextField': 'widthPx',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': 200,
'operator': 'EQ'}]}],
'operator': 'ALL',
'rules': [{'contextField': 'bitrateKbps',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': 256,
'operator': 'EQ'},
{'contextField': 'heightPx',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': 563,
'operator': 'EQ'},
{'contextField': 'mimetype',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': 'application/x-mpegURL',
'operator': 'MIMEMATCH'},
{'contextField': 'tags',
'contextFieldKey': '124',
'contextFieldType': 'String',
'expressionType': 'Map',
'matchValue': '124',
'operator': 'EQ'},
{'contextField': 'widthPx',
'contextFieldKey': None,
'contextFieldType': 'String',
'expressionType': 'Single',
'matchValue': 250,
'operator': 'EQ'}]}]}
"""
import pytest
from qe_common import *
logger = init_logger()
@pytest.mark.draft # remove this after script passed unit tests successfuly
@pytest.mark.components
@pytest.allure.story('Clients')
@pytest.allure.feature('POST')
class Test_PFE_Components(object):
"""PFE Clients test cases."""
@pytest.allure.link('https://jira.qumu.com/browse/TC-42284')
@pytest.mark.Clients
@pytest.mark.POST
def test_TC_42284_POST_Clients_Admin_Create_Flag_Set_To_False(self, context):
"""TC-42284 - Clients-POST
Verify that Create button is not displayed on 'Proximities zones' page under Intelligent Content Routing menu for user with "Manage Configuration" permission, while "Config admin can create" flag is set to false within the token expiration time."""
# Define a test step
with pytest.allure.step("""Verify that Create button is not displayed on 'Proximities zones' page under Intelligent Content Routing menu for user with "Manage Configuration" permission, while "Config admin can create" flag is set to false within the token expiration time."""):
### Positive test example
# Test case configuration
clientDetails = context.sc.ClientDetails(
id='auto_client',
matchingRule={
'operator':
'ALL',
'rules': [{
'expressionType': 'Single',
'contextField': 'remoteAddress',
'operator': 'IPMATCH',
'contextFieldType': 'String',
'matchValue': '172.0.0.0/8',
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'remoteHost',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 'qed.com',
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'serverHost',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': '172.30.3.174',
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'operatingSystem',
'operator': 'OSMATCH',
'contextFieldType': 'String',
'matchValue': 'BLACKBERRY',
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'browser',
'operator': 'BROWSERMATCH',
'contextFieldType': 'String',
'matchValue': 'IE10',
'contextFieldKey': None
}, {
'expressionType': 'Map',
'contextField': 'headerMap',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 'header',
'contextFieldKey': 'header'
}, {
'expressionType': 'Map',
'contextField': 'queryParamMap',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 'query',
'contextFieldKey': 'query'
}, {
'expressionType': 'Map',
'contextField': 'tags',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 'tag',
'contextFieldKey': 'tag'
}, {
'expressionType': 'Single',
'contextField': 'serverPort',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 8080,
'contextFieldKey': None
}],
'groups': [{
'operator':
'ALL',
'rules': [{
'expressionType': 'Single',
'contextField': 'remoteAddress',
'operator': 'IPMATCH',
'contextFieldType': 'String',
'matchValue': '172.0.0.0/8',
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'remoteHost',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 'qed.com',
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'serverHost',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': '172.30.3.174',
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'serverPort',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 8080,
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'operatingSystem',
'operator': 'OSMATCH',
'contextFieldType': 'String',
'matchValue': 'WINDOWS_8',
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'browser',
'operator': 'BROWSERMATCH',
'contextFieldType': 'String',
'matchValue': 'IE',
'contextFieldKey': None
}, {
'expressionType': 'Map',
'contextField': 'headerMap',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 'headergroup',
'contextFieldKey': 'headergroup'
}, {
'expressionType': 'Map',
'contextField': 'queryParamMap',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 'query1',
'contextFieldKey': 'query1'
}, {
'expressionType': 'Map',
'contextField': 'tags',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': '1234',
'contextFieldKey': '1234'
}],
'groups': []
}]
},
name='POST: Auto Client',
sourceSelectionRule=[{
'operator':
'ALL',
'rules': [{
'expressionType': 'Single',
'contextField': 'bitrateKbps',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 256,
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'heightPx',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 563,
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'mimetype',
'operator': 'MIMEMATCH',
'contextFieldType': 'String',
'matchValue': 'application/x-mpegURL',
'contextFieldKey': None
}, {
'expressionType': 'Map',
'contextField': 'tags',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': '124',
'contextFieldKey': '124'
}, {
'expressionType': 'Single',
'contextField': 'widthPx',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 250,
'contextFieldKey': None
}],
'groups': [{
'operator':
'ALL',
'rules': [{
'expressionType': 'Single',
'contextField': 'bitrateKbps',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 523,
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'heightPx',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 456,
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'mimetype',
'operator': 'MIMEMATCH',
'contextFieldType': 'String',
'matchValue': 'video/mp4',
'contextFieldKey': None
}, {
'expressionType': 'Map',
'contextField': 'tags',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': '456',
'contextFieldKey': '456'
}, {
'expressionType': 'Single',
'contextField': 'widthPx',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 200,
'contextFieldKey': None
}],
'groups': []
}]
}])
# createEntity the Clients.
# The `check` call validates return code
# and some of the swagger schema.
# Most schema checks are disabled.
response = check(
context.cl.Clients.createEntity(
body=clientDetails
)
)
### Can add tests here to validate the response content
with pytest.allure.step("""Verify that Create button is not displayed on 'Proximities zones' page under Intelligent Content Routing menu for user with "Manage Configuration" permission, while "Config admin can create" flag is set to false within the token expiration time."""):
### Negative test example
# Test case configuration
clientDetails = context.sc.ClientDetails(
id='auto_client',
matchingRule={
'operator':
'ALL',
'rules': [{
'expressionType': 'Single',
'contextField': 'remoteAddress',
'operator': 'IPMATCH',
'contextFieldType': 'String',
'matchValue': '172.0.0.0/8',
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'remoteHost',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 'qed.com',
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'serverHost',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': '172.30.3.174',
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'operatingSystem',
'operator': 'OSMATCH',
'contextFieldType': 'String',
'matchValue': 'BLACKBERRY',
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'browser',
'operator': 'BROWSERMATCH',
'contextFieldType': 'String',
'matchValue': 'IE10',
'contextFieldKey': None
}, {
'expressionType': 'Map',
'contextField': 'headerMap',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 'header',
'contextFieldKey': 'header'
}, {
'expressionType': 'Map',
'contextField': 'queryParamMap',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 'query',
'contextFieldKey': 'query'
}, {
'expressionType': 'Map',
'contextField': 'tags',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 'tag',
'contextFieldKey': 'tag'
}, {
'expressionType': 'Single',
'contextField': 'serverPort',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 8080,
'contextFieldKey': None
}],
'groups': [{
'operator':
'ALL',
'rules': [{
'expressionType': 'Single',
'contextField': 'remoteAddress',
'operator': 'IPMATCH',
'contextFieldType': 'String',
'matchValue': '172.0.0.0/8',
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'remoteHost',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 'qed.com',
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'serverHost',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': '172.30.3.174',
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'serverPort',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 8080,
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'operatingSystem',
'operator': 'OSMATCH',
'contextFieldType': 'String',
'matchValue': 'WINDOWS_8',
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'browser',
'operator': 'BROWSERMATCH',
'contextFieldType': 'String',
'matchValue': 'IE',
'contextFieldKey': None
}, {
'expressionType': 'Map',
'contextField': 'headerMap',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 'headergroup',
'contextFieldKey': 'headergroup'
}, {
'expressionType': 'Map',
'contextField': 'queryParamMap',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 'query1',
'contextFieldKey': 'query1'
}, {
'expressionType': 'Map',
'contextField': 'tags',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': '1234',
'contextFieldKey': '1234'
}],
'groups': []
}]
},
name='POST: Auto Client',
sourceSelectionRule=[{
'operator':
'ALL',
'rules': [{
'expressionType': 'Single',
'contextField': 'bitrateKbps',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 256,
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'heightPx',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 563,
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'mimetype',
'operator': 'MIMEMATCH',
'contextFieldType': 'String',
'matchValue': 'application/x-mpegURL',
'contextFieldKey': None
}, {
'expressionType': 'Map',
'contextField': 'tags',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': '124',
'contextFieldKey': '124'
}, {
'expressionType': 'Single',
'contextField': 'widthPx',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 250,
'contextFieldKey': None
}],
'groups': [{
'operator':
'ALL',
'rules': [{
'expressionType': 'Single',
'contextField': 'bitrateKbps',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 523,
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'heightPx',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 456,
'contextFieldKey': None
}, {
'expressionType': 'Single',
'contextField': 'mimetype',
'operator': 'MIMEMATCH',
'contextFieldType': 'String',
'matchValue': 'video/mp4',
'contextFieldKey': None
}, {
'expressionType': 'Map',
'contextField': 'tags',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': '456',
'contextFieldKey': '456'
}, {
'expressionType': 'Single',
'contextField': 'widthPx',
'operator': 'EQ',
'contextFieldType': 'String',
'matchValue': 200,
'contextFieldKey': None
}],
'groups': []
}]
}])
# prepare the request, so we can modify it
request = context.cl.Clients.createEntity(
body=clientDetails
)
### Invalid JSON Error injection example
### Errors that result in valid JSON can be configured above.
### Otherwise, uncomment the code below (request.future....)
# Get the generated payload and corrupt the metric
# request.future.request.data = request.future.request.data.replace(
# '"metric": 1,', '"metric":,'
# )
# createEntity the Clients, and check we got the error we expect
try:
client, response = check(
request,
quiet=True, returnResponse=True
)
except (HTTPBadRequest, HTTPForbidden) as e: # 400, 403 error
get_error_message(e) | expect.any(
should.start_with('may not be empty'),
should.start_with('Invalid page parameter specified'),
should.contain('Invalid Authorization Token')
)
else:
raise Exception(
"Expected error message, got {} status code instead.".format(
response.status_code))
| [
"[email protected]"
] | |
8116929850f819d2d6e3998cffe99660ffe4a130 | 838457ddaffd69fe1b6f1754884a938b73f5996f | /ar.py | cb84479e94f25b6362a81c74b00dfc0809911dae | [] | no_license | ociepkam/MISK | eb813501084b56b3b2a702b269a7b6254641d781 | 622299326af3fa944800ac2ea76e4a42a71f569f | refs/heads/master | 2020-12-25T08:36:50.101904 | 2016-06-07T13:34:36 | 2016-06-07T13:34:36 | 60,355,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | class Square:
def __init__(self, x, y, wood, fire, humidity, square_size):
self.x = x
self.y = y
self.wood = wood
self.fire = fire
self.humidity = humidity
self.square_size = square_size
| [
"[email protected]"
] | |
581220800a7ae91d87aec06578cf5db8869815c6 | f6020796383233cbb992a9e55d7659d2a4b75174 | /EXPENSE-ENV/bin/pasteurize | 7ab611cbfc8fbd333f8dfcba7bfa71f7b48d7bb7 | [
"MIT"
] | permissive | ADL175/expense_tracker_401d6 | 1420e03c68349c526cb7b12466c1e67e274976f3 | 2827cda2ca154ce70a9b1879c198f1ec94ebd121 | refs/heads/master | 2021-01-23T01:26:22.415883 | 2017-05-31T15:49:16 | 2017-05-31T15:49:16 | 92,872,053 | 0 | 0 | null | 2017-05-31T15:49:17 | 2017-05-30T20:12:10 | Python | UTF-8 | Python | false | false | 464 | #!/Users/davidlim/Desktop/Project/401_python/wk3/expense_tracker_401d6/EXPENSE-ENV/bin/python3
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.16.0','console_scripts','pasteurize'
__requires__ = 'future==0.16.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('future==0.16.0', 'console_scripts', 'pasteurize')()
)
| [
"[email protected]"
] | ||
52426587ad4df18dd187e5edfcd235f5002b9f89 | a423e93de507af4edbfe09d9b082a6c0e05bc30d | /main.py | c52331f61323dbd18df3dc50973d5bb52738acec | [] | no_license | mibre2/PythonStockCandlestickChart | ffca3fdc8f8559a5da023ce206a174204769b15a | a733c8b01751feeee4051b4dd28969caa28b8f1f | refs/heads/master | 2023-03-19T00:09:27.123249 | 2021-03-14T23:20:28 | 2021-03-14T23:20:28 | 347,778,647 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 973 | py | import datetime as dt
import pandas_datareader as web
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from mplfinance.original_flavor import candlestick_ohlc
# Define Time Frame
start = dt.datetime(2021, 1, 4) # Start of 2021
end = dt.datetime.now() # Present TIME
# Load Data
ticker = input('Input ticker symbol: ') # Prompts user for input of ticker symbol
data = web.DataReader(ticker, 'yahoo', start, end)
# Restructure data
data = data[['Open', 'High', 'Low', 'Close']]
data.reset_index(inplace=True)
data['Date'] = data['Date'].map(mdates.date2num)
# Visualization
ax = plt.subplot()
ax.grid(True)
ax.set_axisbelow(True)
ax.set_title(ticker + ' Share Price'.format(ticker), color='white')
ax.set_facecolor('black')
ax.figure.set_facecolor('#121212')
ax.tick_params(axis='x', colors='white')
ax.tick_params(axis='y', colors='white')
ax.xaxis_date()
candlestick_ohlc(ax, data.values, width=0.5, colorup='#008000')
plt.show()
| [
"[email protected]"
] | |
5632ec92f87da31b038a9178cbd330bca059d3cb | 6f801f3e71c934a7a41dfdcbb83acffcba7ee2a4 | /lib/pantone.py | bf3689ab31758fa26e881005b87e265b7774d214 | [
"MIT",
"GPL-1.0-or-later"
] | permissive | alyberty/we-love-colors | 149d9f3d652fa9617b88829eb2845c150b7050c1 | 47883048b651601db64b4ad44950ba28ec88ae2b | refs/heads/master | 2020-07-27T23:57:08.603352 | 2019-09-19T17:25:37 | 2019-09-19T17:25:37 | 209,248,991 | 0 | 0 | MIT | 2019-09-18T07:44:48 | 2019-09-18T07:44:48 | null | UTF-8 | Python | false | false | 12,728 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
##
# Imports
# For more information, see https://www.python.org/dev/peps/pep-0008/#imports
##
import os
import json
from urllib.request import urlopen
from bs4 import BeautifulSoup
from palette import Palette
from lib.helpers.nat_sort import natural_sort
class Pantone(Palette):
# Dictionary holding fetched colors (raw)
sets = {
'graphic-design': [],
'fashion-design': [],
'product-design': [],
}
# Identifier
identifier = 'pantone'
# Copyright notices
copyright = {
'xml': '\n PANTONE® and related trademarks are the property of\n Pantone LLC (https://www.pantone.com), a division of X-Rite, a Danaher company\n ',
'gpl': '##\n# PANTONE® and related trademarks are the property of\n# Pantone LLC (https://www.pantone.com), a division of X-Rite, a Danaher company\n##\n'
}
def __init__(self):
super().__init__()
##
# Fetches PANTONE® colors
#
# Valid `set_name` parameter:
# - 'graphic-design', currently 15870 colors (pp 1-32)
# - 'fashion-design', currently 2443 colors (pp 1-14)
# - 'product-design', currently 4967 colors (pp 1-10)
##
def fetch(self, set_name, firstPage, lastPage):
# One baseURL to rule them all
base_url = 'https://www.numerosamente.it/pantone-list/'
# Translating set_name to valid URL path name via `dict.get()`
set_url = {
'graphic-design': 'graphic-designers/',
'fashion-design': 'fashion-and-interior-designers/',
'product-design': 'industrial-designers/',
}
# Looping through URLs & scraping color information from HTML tables
for i in range(firstPage, lastPage + 1):
html = urlopen(base_url + set_url.get(set_name) + str(i))
soup = BeautifulSoup(html, 'lxml')
print('Loading page ' + str(i) + ' .. done')
for remoteElement in soup.findAll('tr')[1:]:
color = {}
color['code'] = remoteElement.findAll('td')[0].text
color['rgb'] = remoteElement.findAll('td')[1].text
color['hex'] = remoteElement.findAll('td')[2].text
color['name'] = remoteElement.findAll('td')[3].text
# Checking if a fetched element already exists ..
found_same_name = False
for localElement in self.sets[set_name]:
if color['name'] != '' and color['name'] == localElement['name']:
found_same_name = True
# .. if not, adding it is da real MVP
if not found_same_name:
self.sets[set_name].append(color)
print('Loading ' + color['code'] + ' in set "' + set_name + '" .. done')
##
# Fetches all PANTONE® colors at once
##
def fetch_all(self):
self.fetch('graphic-design', 1, 32)
self.fetch('fashion-design', 1, 14)
self.fetch('product-design', 1, 10)
##
# Creates JSON files for Dulux® color sets
##
def create_json(self, input_filename=''):
if input_filename == '':
input_filename = self.identifier
# Dictionary holding fetched colors (processed)
sets_processed = {
##
# Pantone Color Systems - Graphics
# Pantone Matching System - PMS
# For more information, see https://www.pantone.com/color-systems/for-graphic-design
# or visit their shop: https://www.pantone.com/graphics
##
'graphic-design': {
# TODO: Solid/Spot Colors (Coated & Uncoated) - Link?
'C': [],
'U': [],
##
# CMYK Color Guide (Coated & Uncoated)
# https://www.pantone.com/products/graphics/cmyk-coated-uncoated
##
'PC': [],
'PU': [],
##
# Color Bridge Set (Coated & Uncoated)
# https://www.pantone.com/products/graphics/color-bridge-coated-uncoated
##
'CP': [], # https://www.pantone.com/products/graphics/color-bridge-coated
'UP': [], # https://www.pantone.com/products/graphics/color-bridge-uncoated
##
# Extended Gamut Coated Guide
# https://www.pantone.com/products/graphics/extended-gamut-coated-guide
##
'XGC': [],
##
# Pastels & Neons (Coated & Uncoated)
# https://www.pantone.com/products/graphics/pastels-neons
##
# Neons
'NC': [],
'NU': [],
# Pastels
'PAC': [],
'PAU': [],
##
# Metallics (Coated)
# https://www.pantone.com/products/graphics/metallics-guide
##
'MC': [],
},
##
# Pantone Color Systems - Fashion
# Fashion, Home + Interiors - FHI
# For more information, see https://www.pantone.com/color-systems/for-fashion-design
# or visit their shop: https://www.pantone.com/fashion-home-interiors
##
'fashion-design': {
# TODO: 'Textile Paper eXtended'
'TPX': [],
# TODO: 'Textile Paper Green'
'TPG': [],
# TODO: 'Textile Cotton eXtended'
'TCX': [],
##
# Nylon Brights Set
# https://www.pantone.com/products/fashion-home-interiors/nylon-brights-set
##
'TN': [],
##
# Pantone SkinTone™ Guide
# https://www.pantone.com/products/fashion-home-interiors/pantone-skintone-guide
##
'SP': [],
},
##
# Pantone Color Systems - Product
# Plastic Standards
# For more information, see https://www.pantone.com/color-systems/for-product-design
# or visit the shop: https://www.pantone.com/plastics
##
'product-design': {
'PQ': [], # https://www.pantone.com/color-intelligence/articles/technical/did-you-know-pantone-plastics-standards-explained
# TODO: 'Textile Cotton eXtended'
'TCX': [],
},
'custom-palettes': {
'color-of-the-year': []
# IDEA: Palettes created around CotY
}
}
# Lists holding base pastels
base_pastels = [
'Yellow 0131',
'Red 0331',
'Magenta 0521',
'Violet 0631',
'Blue 0821',
'Green 0921',
'Black 0961',
]
base_pastels_coated = [color + ' C' for color in base_pastels]
base_pastels_uncoated = [color + ' U' for color in base_pastels]
##
# List holding codes for PANTONE®'s 'Color of the Year' (CotY) since 2000
# https://www.pantone.com/color-intelligence/color-of-the-year/color-of-the-year-2019
##
colors_of_the_year = [
'15-4020', # 2000: Cerulean Blue
'17-2031', # 2001: Fuchsia Rose
'19-1664', # 2002: True Red
'14-4811', # 2003: Aqua Sky
'17-1456', # 2004: Tigerlily
'15-5217', # 2005: Blue Turquoise
'13-1106', # 2006: Sand Dollar
'19-1557', # 2007: Chili Pepper
'18-3943', # 2008: Blue Iris
'14-0848', # 2009: Mimosa
'15-5519', # 2010: Turquoise
'18-2120', # 2011: Honeysuckle
'17-1463', # 2012: Tangerine Tango
'17-5641', # 2013: Emerald
'18-3224', # 2014: Radiant Orchid
'18-1438', # 2015: Marsala
'15-3919', # 2016: Serenity
'13-1520', # 2016: Rose Quartz
'15-0343', # 2017: Greenery
'18-3838', # 2018: Ultra Violet
'16-1546', # 2019: Living Coral
]
with open(self.json_path + '/' + input_filename + '.json', 'r') as file:
data = json.load(file)
# Looping through PANTONE® color sets
for set, colors in data.items():
subset = sets_processed[set]
# Extracting each PANTONE® color subset
for i, color in enumerate(colors):
code = color['code']
if code[0:7] in colors_of_the_year:
code = code[0:7]
color['year'] = 2000 + colors_of_the_year.index(code)
sets_processed['custom-palettes']['color-of-the-year'].append(color)
if code[0:2] == 'P ':
if code[-2:] == ' C':
subset['PC'].append(color)
if code[-2:] == ' U':
subset['PU'].append(color)
else:
if code[-2:] == ' C':
if len(code) == 5:
if ('801 C' <= code <= '814 C') or ('901 C' <= code <= '942 C'):
subset['NC'].append(color)
continue
if '871 C' <= code <= '877 C':
subset['MC'].append(color)
continue
if len(code) == 6:
if ('8001 C' <= code <= '8965 C'):
subset['MC'].append(color)
continue
if ('9020 C' <= code <= '9603 C') or (code in base_pastels_coated):
subset['PAC'].append(color)
continue
if len(code) == 7 and ('10101 C' <= code <= '10399 C'):
subset['MC'].append(color)
continue
subset['C'].append(color)
if code[-2:] == ' U':
if len(code) == 5:
if ('801 U' <= code <= '814 U') or ('901 U' <= code <= '942 U'):
subset['NU'].append(color)
continue
if '871 U' <= code <= '877 U':
# TODO: There are no uncoated Metallics, deleting rather than skipping?
continue
if (len(code) == 6 and ('9020 U' <= code <= '9603 U')) or (code in base_pastels_uncoated):
subset['PAU'].append(color)
continue
subset['U'].append(color)
if code[-3:] == ' CP':
subset['CP'].append(color)
if code[-3:] == ' UP':
subset['UP'].append(color)
if code[-3:] == 'XGC':
subset['XGC'].append(color)
if code[-3:] == 'TCX':
subset['TCX'].append(color)
if code[-3:] == 'TPG':
subset['TPG'].append(color)
if code[-3:] == 'TPX':
subset['TPX'].append(color)
if code[-3:] == ' TN':
subset['TN'].append(color)
if code[-3:] == ' SP':
subset['SP'].append(color)
if code[0:3] == 'PQ-':
subset['PQ'].append(color)
for set, subsets in sets_processed.items():
if len(subsets) == 0:
break
# Creating subdirectories
file_path = self.json_path + '/' + set
os.makedirs(file_path, exist_ok=True)
for subset, colors in subsets.items():
# Applying natural sort order to all PANTONE® 'Graphics' colors
if set == 'graphic-design':
natural_sort(colors, 'code')
if set == 'custom-palettes':
colors.sort(key=lambda k: k['year'])
if subset == 'color-of-the-year':
subset = 'CotY'
json_path = file_path + '/' + subset + '_' + str(len(colors)) + '-colors.json'
# Dumping Pantone® color sets
with open(json_path, 'w') as file:
file.write(json.dumps(colors, indent=4))
print('Generating %s .. done' % json_path)
| [
"[email protected]"
] | |
dd74d8fa5aa418703d42a9054b81922bd2acbdfa | 061bbd366dfce18a6703ea8b42af9856c16304c2 | /baysian_neural_decoding/baysian_neural_decoding/latency.py | 09ac7bca84bdf0fa6e966cb73c67fa30fbd56240 | [
"MIT"
] | permissive | badralbanna/Insanally2017 | 671f991df3c70a54ae6b8a4bb47e761d27b19491 | cd77550ec6f25961f236bc31600189d26d05dc19 | refs/heads/master | 2021-01-02T09:10:41.415077 | 2019-02-20T14:44:00 | 2019-02-20T14:44:00 | 99,155,119 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | import numpy
#####################
# Finding Latencies #
#####################
def first_spike_latency(response, log=False, **kwargs):
if len(response) > 0:
time = [response[response > 0][0]]
else:
time = []
if not log:
return(numpy.array(time), numpy.array(time))
else:
return(numpy.log10(numpy.array(time)), numpy.array(time))
############################
# Estimating Probabilities #
############################
# Use set_bw and KDE wrapper
| [
"[email protected]"
] | |
252f1a6776995a522d5b7f0d6c852580ebd19803 | 2a22fc2d2b05ac1cc871777a1ac97f38fed2aea9 | /utils/focalloss.py | 5895b2bc0e526683a33885b46956fa5d7ba7615c | [] | no_license | tbornt/text_classification_pytorch | a3bcd7e7d3037608dad51105919663ba36a348a0 | d2dd5d948489629b1aacc3cfd6f6201e516175b3 | refs/heads/master | 2023-02-24T20:13:19.143752 | 2018-11-27T02:25:22 | 2018-11-27T02:25:22 | 156,794,961 | 16 | 1 | null | 2023-02-15T20:19:06 | 2018-11-09T01:54:54 | Python | UTF-8 | Python | false | false | 2,199 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module):
r"""
This criterion is a implemenation of Focal Loss, which is proposed in
Focal Loss for Dense Object Detection.
Loss(x, class) = - \alpha (1-softmax(x)[class])^gamma \log(softmax(x)[class])
The losses are averaged across observations for each minibatch.
Args:
alpha(1D Tensor, Variable) : the scalar factor for this criterion
gamma(float, double) : gamma > 0; reduces the relative loss for well-classified examples (p > .5),
putting more focus on hard, misclassified examples
size_average(bool): By default, the losses are averaged over observations for each minibatch.
However, if the field size_average is set to False, the losses are
instead summed for each minibatch.
"""
def __init__(self, class_num, alpha=None, gamma=2, size_average=True):
super(FocalLoss, self).__init__()
if alpha is None:
self.alpha = Variable(torch.ones(class_num, 1))
else:
if isinstance(alpha, Variable):
self.alpha = alpha
else:
self.alpha = Variable(alpha)
self.gamma = gamma
self.class_num = class_num
self.size_average = size_average
def forward(self, inputs, targets):
N = inputs.size(0)
C = inputs.size(1)
P = F.softmax(inputs)
class_mask = inputs.data.new(N, C).fill_(0)
class_mask = Variable(class_mask)
ids = targets.view(-1, 1)
class_mask.scatter_(1, ids.data, 1.)
if inputs.is_cuda and not self.alpha.is_cuda:
self.alpha = self.alpha.cuda()
alpha = self.alpha[ids.data.view(-1)]
probs = (P*class_mask).sum(1).view(-1, 1)
log_p = probs.log()
batch_loss = -alpha*(torch.pow((1-probs), self.gamma))*log_p
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss | [
"[email protected]"
] | |
c456ffedaa816a4036a4f62abdf89c0116d62451 | b24ab5f07fc60de448662ef4b213f5bac499a93e | /Player.py | d4d5eb2cc6b57acb71030d58e6a514c1d479be3a | [] | no_license | RhevanP/HangmanPYQT5 | 67d6bbe05dff8d4e23b1053b46a83e424ca2ceee | a760f6f4217a8ccac2bc47151abf2629cfd1f6a2 | refs/heads/main | 2023-02-05T15:13:37.770544 | 2020-12-28T12:16:13 | 2020-12-28T12:16:13 | 325,003,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 26 16:23:24 2020
@author: antho
"""
class Player() :
def __init__(self) :
self.lifeMax = 6
self.currentLife = self.lifeMax
self.userInput = []
self.lostGame = False
def loosingLife(self) :
self.currentLife -= 1
self.Lost()
def Lost(self) :
if self.currentLife == 0:
self.lostGame = True | [
"[email protected]"
] | |
f6f969236282077c72f14344524d390376e73ef6 | 0ed5a6f1b3f30c39b93886d6bad3fc202ac48260 | /stock.py | 7bea686dc1f26f47041705845af17755bed5e193 | [] | no_license | Hawkgirl/nse | 4c4b595f93ccd861bceb7f9813d95ae37d12da9d | 72b52c2c3dbbf3906e872ae33e6e24d3eb573b3b | refs/heads/master | 2020-04-05T07:46:03.023140 | 2018-11-12T10:36:37 | 2018-11-12T10:36:37 | 156,687,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | class stock():
def __init__(self, *arg, **kwargs):
if 'name' in kwargs:
self._name=kwargs['name']
if 'ltp' in kwargs:
self._ltp=kwargs['ltp']
if 'high52' in kwargs:
self._high52=kwargs['high52']
if 'tradevalue' in kwargs:
self._tradevalue=kwargs['tradevalue']/10000000
@property
def name(self):
return self._name
@property
def ltp(self):
return self._ltp
@property
def high52(self):
return self._high52
@property
def tradevalue(self):
return self._tradevalue
@property
def high52gap(self):
if self.ltp == 0: return 0
if self.high52 == self.ltp : return 0
val = (((float(self.high52 - self.ltp))/float(self.ltp)) * 100)
return round(val, 2)
| [
"[email protected]"
] | |
64cacb4092c260debe1654e1c4e577df432fe9ac | ccbdc11e15ee36a9a36ee75af8ce7ab41e2ca45f | /reciver.py | c4a7198d9b15c9da38b65b6e242015af536fcf7e | [] | no_license | musk03an22/arth | 36e482328cca01c35cc83ba2f04ab36a0aa71d0f | 22a9be1693fa611c9216b9bc656be28639c30ff7 | refs/heads/main | 2023-05-19T02:34:40.768872 | 2021-06-13T18:20:35 | 2021-06-13T18:20:35 | 376,076,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | import socket,cv2, pickle,struct
# create socket
client_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
host_ip = '192.168.56.1' # erver ip
port = 9999
client_socket.connect((host_ip,port))
data = b""
payload_size = struct.calcsize("Q")
while True:
while len(data) < payload_size:
packet = client_socket.recv(4*1024)
if not packet: break
data+=packet
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("Q",packed_msg_size)[0]
while len(data) < msg_size:
data += client_socket.recv(4*1024)
frame_data = data[:msg_size]
data = data[msg_size:]
frame = pickle.loads(frame_data)
cv2.imshow("RECEIVING VIDEO",frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
client_socket.close() | [
"[email protected]"
] | |
addfdf28434227d4c5d4be3ceb9ead15bad88b04 | 27de4db430ebd3264ce4bf148a4a9062f95541d7 | /jedi/long_file/hdf5_functions.py | e50164fc87326fa5fb747225f82d392c8ec76cf8 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | broadinstitute/jedi-public | 1afb499bb9c927b630945bca7f20e3df2baa0842 | bce10d7e1a89e666ec4c77a1e3448f684dc21daf | refs/heads/master | 2023-08-17T15:48:49.325910 | 2021-09-28T22:30:50 | 2021-09-28T22:30:50 | 409,325,595 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,626 | py | import json
import logging
import os
from collections import defaultdict
from dataclasses import dataclass
from enum import Enum
from typing import Dict, List, Optional, Callable, Any, Iterator, TypeVar, Set, Iterable
import h5py
import numpy as np
import pandas as pd
Instance = h5py.Group
BASELINE = 'baseline'
BASELINE_AGE = '' # NB: replace with field name for baseline age
AGE = 'age'
YEAR_UNIT_PD = 'y'
WEEK_UNIT_PD = 'W'
DAY_UNIT_PD = 'D'
ID = 'id'
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class FieldType(Enum):
string = 'string'
float = 'float'
int = 'int'
age = 'age'
T = TypeVar('T')
DEFAULT_SUFFIX = 'no_filter'
@dataclass(frozen=True)
class Field:
name: str # the name of the field, e.g. Date_Age
type: FieldType
file_type: str
units: str = None
column_suffix: Optional[str] = DEFAULT_SUFFIX # should be provided if there is a filter or function
filter: Optional[Callable[[T], bool]] = None # if filter applied to field's value is False, instance is skipped
function: Optional[Callable[[T], Any]] = None # function applied to field's value
def __str__(self) -> str:
return '.'.join([x for x in [self.file_type, self.name, self.column_suffix] if x is not None])
def __post_init__(self):
if self.filter is not None or self.function is not None:
# If you have a filter or function, you cannot use the default suffix
assert self.column_suffix != DEFAULT_SUFFIX
def str_from_instance(field_name: str, instance) -> Optional[str]:
if field_name in instance:
return instance[field_name][()]
def float_from_instance(field_name: str, instance) -> Optional[float]:
if field_name in instance:
return float(instance[field_name][0])
def int_from_instance(field_name: str, instance) -> Optional[int]:
if field_name in instance:
return int(instance[field_name][0])
def age_from_instance(field_name: str, instance) -> Optional[int]:
if field_name in instance:
return pd.to_timedelta(instance[field_name][()])
def field_from_instance(field: Field, instance) -> Any:
"""If field is not in the instance, will return None"""
if field.type == FieldType.float:
return float_from_instance(field.name, instance)
if field.type == FieldType.string:
return str_from_instance(field.name, instance)
if field.type == FieldType.int:
return int_from_instance(field.name, instance)
if field.type == FieldType.age:
return age_from_instance(field.name, instance)
else:
raise NotImplementedError(f'Field type {field.type} cannot be read from hd5.')
def instances_from_hd5(hd5: h5py.File, file_type: str) -> Iterator:
if file_type not in hd5:
return
for instance in hd5[file_type]:
yield hd5[file_type][instance]
def split_file_types(fields: List[Field]) -> Dict[str, List[Field]]:
file_type_to_field = defaultdict(list)
for field in fields:
file_type_to_field[field.file_type].append(field)
return file_type_to_field
def data_frame_from_hd5(hd5_paths: Iterable[h5py.File], fields: List[Field], id: int,
critical_fields: Optional[Set[Field]] = None) -> Optional[pd.DataFrame]:
if critical_fields is None:
critical_fields = set()
file_type_to_field = split_file_types(fields)
rows = []
for file_type, file_type_fields in file_type_to_field.items():
column_names = list(map(str, file_type_fields))
if len(column_names) > len(set(column_names)):
raise ValueError(
f'For file type {file_type}, there are duplicate field names in {column_names}.'
f'Overlapping columns will overwrite each other.'
)
for hd5_path in hd5_paths:
with h5py.File(hd5_path, 'r') as hd5:
instances = instances_from_hd5(hd5, file_type)
rows += list(
_data_from_instances(instances, file_type_fields, set(file_type_fields) & critical_fields, id))
if not rows:
return
df = pd.DataFrame(rows)
df.index = pd.to_timedelta(df[AGE])
del df[AGE]
df = df.dropna(how='all')
if df.empty:
return
df[ID] = os.path.splitext(os.path.basename(hd5_paths[0]))[0]
for field in fields:
if str(field) not in df:
df[str(field)] = np.nan
return df.sort_index()
def _data_from_instances(instances: Iterator[Instance], fields: List[Field], critical_fields: Set[Field], id: int) \
-> Dict[str, Any]:
"""
Yields dictionaries {AGE: age, field_1: value_1, ..., field_n: value_n}
Assumes all fields have file types that match the instances
"""
critical_fields_empty = len(critical_fields) == 0
age_field_name = FILE_TYPE_TO_AGE_FIELD[fields[0].file_type].name
baseline_field_name = _baseline_field(fields[0].file_type).name
for instance in instances:
# If critical_fields empty, assume any field is sufficient to yield an instance
contains_critical = critical_fields_empty
age = age_from_instance(age_field_name, instance)
if pd.isnull(age): # require age
continue
baseline = age_from_instance(baseline_field_name, instance)
field_to_value = {AGE: age, BASELINE: baseline}
for field in fields:
value = field_from_instance(field, instance)
if value is None or (field.filter is not None and not field.filter(value)):
continue
if field.function is not None:
try:
value = field.function(value)
except Exception as err:
error_dict = {"id": id, "file": field.file_type, "field": field.name, "error": str(err)}
logger.warning(json.dumps(error_dict))
# Once a critical value is found set contains_critical to True and keep it True
contains_critical = contains_critical or (value and field in critical_fields)
field_to_value[str(field)] = value
else: # happens if for loop completes with all fields found
if contains_critical:
yield field_to_value
FILE_TYPE_TO_AGE_FIELD: Dict[str, Field] = {
# mappings from file types to Fields representing age fields
# e.g.
# file_type_name: Field(age_field_name, FieldType.age, file_type=file_type_name)
}
def _baseline_field(file_type):
return Field(BASELINE_AGE, FieldType.age, file_type=file_type)
| [
"[email protected]"
] | |
6933106ff0df2eb00e3c7ef2da963992e0091811 | 3b5b326e0cf9b4e65ed12c104b5ff07f49c9ef52 | /introdução-web/Oficina/servico_carro.py | 411ff0351b8fbb772bd397e2ab46b996bbdb6226 | [] | no_license | lcgandolfi/DevWeb | 641569306bad78b441b86ca0e2dcccd115130682 | 76c7558ade526f9faa39750555458cb67cc03179 | refs/heads/master | 2023-07-14T17:20:53.457506 | 2021-09-02T23:40:21 | 2021-09-02T23:40:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | class ServicosAutomovel:
def __init__(self,servicos ,automovel,data):
self.servicos = servicos
self.automovel = automovel
self.data = data
| [
"[email protected]"
] | |
2785105a509415ffd42a0b2ee22247d42f05b027 | dd5e84730c1e834a542dd41d287000b74d5927d8 | /scripts/20_plot_results.py | e74521b059e0c473945f9c2a9969388af93e3ad3 | [
"MIT"
] | permissive | chrisroadmap/loaded-dice | d464b719a612a199fcebcfa01a95ee3073b13570 | 76c05460c483a745014b46acbf77b776c1c21204 | refs/heads/main | 2023-08-31T19:15:05.970381 | 2023-08-18T10:08:19 | 2023-08-18T10:08:19 | 508,031,466 | 1 | 1 | MIT | 2023-07-17T13:42:09 | 2022-06-27T19:07:42 | Python | UTF-8 | Python | false | false | 9,980 | py | import os
import matplotlib.pyplot as pl
from matplotlib.lines import Line2D
from matplotlib.ticker import ScalarFormatter
from matplotlib.patches import Patch
import numpy as np
import pandas as pd
from fair.energy_balance_model import EnergyBalanceModel
from fair.forcing.ghg import meinshausen2020
pl.rcParams['figure.figsize'] = (11.9/2.54, 11.9/2.54)
pl.rcParams['font.size'] = 7 #20
pl.rcParams['font.family'] = 'Arial'
pl.rcParams['ytick.direction'] = 'in'
pl.rcParams['ytick.minor.visible'] = True
pl.rcParams['ytick.major.right'] = True
pl.rcParams['ytick.right'] = True
pl.rcParams['xtick.direction'] = 'in'
pl.rcParams['xtick.minor.visible'] = True
pl.rcParams['xtick.major.top'] = True
pl.rcParams['xtick.top'] = True
pl.rcParams['axes.spines.top'] = True
pl.rcParams['axes.spines.bottom'] = True
pl.rcParams['figure.dpi'] = 150
here = os.path.dirname(os.path.realpath(__file__))
os.makedirs(os.path.join(here, '..', 'figures'), exist_ok=True)
ensemble_size=1001
df_configs = pd.read_csv(os.path.join(here, '..', 'data_input', 'fair-2.1.0', 'calibrated_constrained_parameters.csv'), index_col=0)
configs = df_configs.index
ecs = np.zeros(1001)
tcr = np.zeros(1001)
for i, config in enumerate(configs):
ebm = EnergyBalanceModel(
ocean_heat_capacity = df_configs.loc[config, 'c1':'c3'],
ocean_heat_transfer = df_configs.loc[config, 'kappa1':'kappa3'],
deep_ocean_efficacy = df_configs.loc[config, 'epsilon'],
gamma_autocorrelation = df_configs.loc[config, 'gamma'],
forcing_4co2 = df_configs.loc[config, 'F_4xCO2'],
timestep=3,
stochastic_run=False,
)
ebm.emergent_parameters()
ecs[i], tcr[i] = (ebm.ecs, ebm.tcr)
yunit = {
'CO2_FFI_emissions': 'GtCO$_2$ yr$^{-1}$',
'CO2_total_emissions': 'GtCO$_2$ yr$^{-1}$',
'CO2_concentration': 'ppm',
'temperature': '°C relative to 1850-1900',
'social_cost_of_carbon': '\$(2020) tCO$_2^{-1}$',
'radiative_forcing': 'W m$^{-2}$'
}
title = {
'CO2_FFI_emissions': '(a) CO$_2$ fossil emissions',
'CO2_total_emissions': '(a) CO$_2$ emissions',
'CO2_concentration': '(b) CO$_2$ concentrations',
'temperature': '(b) Surface temperature',
'social_cost_of_carbon': 'Social cost of carbon',
'radiative_forcing': '(c) Effective radiative forcing'
}
ylim = {
'CO2_FFI_emissions': (-20, 55),
'CO2_total_emissions': (-20, 55),
'CO2_concentration': (300, 750),
'temperature': (0.5, 4),
'social_cost_of_carbon': (0, 4000),
'radiative_forcing': (0, 7)
}
labels = {
'dice': 'DICE-2016R "optimal"',
'dice_disc2pct': "Rennert et al.",
'dice_below2deg': "Well-below 2°C",
'dice_1p5deglowOS': "1.5°C overshoot"
}
colors = {
'dice': "#003f5c",
'dice_disc2pct': "#1b98e0",
'dice_below2deg': "#bc5090",
'dice_1p5deglowOS': "#ffa600"
}
outputs = {}
np.set_printoptions(precision=3)
for scenario in ['dice', 'dice_disc2pct', 'dice_below2deg', 'dice_1p5deglowOS']:
outputs[scenario] = {}
for variable in ['net_zero_year', 'CO2_concentration', 'temperature', 'social_cost_of_carbon', 'CO2_FFI_emissions', 'CO2_total_emissions', 'radiative_forcing']:
df = pd.read_csv(os.path.join(here, '..', 'data_output', 'results', f'{scenario}__{variable}.csv'), index_col=0)
outputs[scenario][variable] = df[:].T.values
print(scenario)
print('CO2 FFI emissions 2101', np.nanpercentile(outputs[scenario]['CO2_FFI_emissions'][26, :], (5, 16, 33, 50, 67, 84, 95))) # CO2 fossil emissions 2100
print('CO2 FFI emissions 2050', np.nanpercentile(outputs[scenario]['CO2_FFI_emissions'][9, :], (5, 16, 33, 50, 67, 84, 95))) # CO2 fossil emissions 2100
print('CO2 total emissions 2101', np.nanpercentile(outputs[scenario]['CO2_total_emissions'][26, :], (5, 16, 33, 50, 67, 84, 95))) # CO2 fossil emissions 2100
print('CO2 total emissions 2050', np.nanpercentile(outputs[scenario]['CO2_total_emissions'][9, :], (5, 16, 33, 50, 67, 84, 95))) # CO2 fossil emissions 2100
print('SCC 2023', np.nanpercentile(outputs[scenario]['social_cost_of_carbon'][0, :], (5, 16, 33, 50, 67, 84, 95))) # social cost of carbon 2020
print('temperature 2101', np.nanpercentile(outputs[scenario]['temperature'][26, :], (5, 16, 33, 50, 67, 84, 95))) # temperature 2100
print('temperature peak', np.nanpercentile(np.max(outputs[scenario]['temperature'], axis=0), (5, 16, 33, 50, 67, 84, 95))) # peak temperature
print('forcing 2101', np.nanpercentile(outputs[scenario]['radiative_forcing'][26, :], (5, 16, 33, 50, 67, 84, 95))) # radiative forcing 2100
print('net zero year ', np.nanpercentile(outputs[scenario]['net_zero_year'][:], (5, 16, 33, 50, 67, 84, 95))) # net zero year
print()
# Headline plot with three scenarios
fig, ax = pl.subplots(2,2)
for i, variable in enumerate(['CO2_total_emissions', 'temperature', 'radiative_forcing']):
for scenario in ['dice', 'dice_below2deg', 'dice_1p5deglowOS']:
ax[i//2,i%2].fill_between(
np.arange(2023, 2134, 3),
np.nanpercentile(outputs[scenario][variable][:37, :], 5, axis=1),
np.nanpercentile(outputs[scenario][variable][:37, :], 95, axis=1),
color=colors[scenario],
alpha=0.2,
lw=0
)
ax[i//2,i%2].fill_between(
np.arange(2023, 2134, 3),
np.nanpercentile(outputs[scenario][variable][:37, :], 16, axis=1),
np.nanpercentile(outputs[scenario][variable][:37, :], 84, axis=1),
color=colors[scenario],
alpha=0.2,
lw=0
)
ax[i//2,i%2].plot(
np.arange(2023, 2134, 3),
np.nanmedian(outputs[scenario][variable][:37, :], axis=1),
color=colors[scenario],
label=labels[scenario],
)
ax[i//2,i%2].set_xlim(2023,2125)
ax[i//2,i%2].set_title(title[variable])
ax[i//2,i%2].set_ylabel(yunit[variable])
ax[i//2,i%2].set_ylim(ylim[variable])
ax[i//2,i%2].set_xticks(np.arange(2025, 2130, 25))
if i==0:
ax[i//2,i%2].axhline(0, ls=':', color='k')
ax[i//2,i%2].axvline(2100, ls=':', color='k')
ax[1,0].legend(fontsize=6, frameon=False)
fig.tight_layout()
for scenario in ['dice', 'dice_below2deg', 'dice_1p5deglowOS']:
ax[1,1].hist(
outputs[scenario]['social_cost_of_carbon'][0, :],
alpha=0.5,
label=labels[scenario],
color=colors[scenario],
density=True,
bins=np.logspace(-1, 4, 101),
log=True
)
ax[1,1].set_xscale('log')
pl.rcParams['xtick.minor.visible'] = True
ax[1,1].set_xlim(6, 10000)
ax[1,1].set_title("(d) Social cost of carbon in 2023")
ax[1,1].set_xlabel("(2020\$)")
ax[1,1].set_ylabel("Density")
ax[1,1].set_yticklabels([])
ax[1,1].xaxis.set_major_formatter(ScalarFormatter())
line_this = Line2D([0], [0], label='Median', color='k')
u68_this = Patch(facecolor='k', lw=0, alpha=0.4, label='16-84% range')
u90_this = Patch(facecolor='k', lw=0, alpha=0.2, label='5-95% range')
ax[0,1].legend(handles=[line_this, u68_this, u90_this], fontsize=6, frameon=False, loc='upper left')
fig.tight_layout()
pl.savefig(os.path.join(here, '..', 'figures', f'projections_scc_ecs.png'))
pl.savefig(os.path.join(here, '..', 'figures', f'projections_scc_ecs.pdf'))
pl.show()
# Add in 2pct discount case (four scenarios)
fig, ax = pl.subplots(2,2)
for i, variable in enumerate(['CO2_total_emissions', 'temperature', 'radiative_forcing']):
for scenario in ['dice', 'dice_disc2pct', 'dice_below2deg', 'dice_1p5deglowOS']:
ax[i//2,i%2].fill_between(
np.arange(2023, 2134, 3),
np.nanpercentile(outputs[scenario][variable][:37, :], 5, axis=1),
np.nanpercentile(outputs[scenario][variable][:37, :], 95, axis=1),
color=colors[scenario],
alpha=0.2,
lw=0
)
# ax[i//2,i%2].fill_between(
# np.arange(2023, 2134, 3),
# np.nanpercentile(outputs[scenario][variable][:37, :], 16, axis=1),
# np.nanpercentile(outputs[scenario][variable][:37, :], 84, axis=1),
# color=colors[scenario],
# alpha=0.2,
# lw=0
# )
ax[i//2,i%2].plot(
np.arange(2023, 2134, 3),
np.nanmedian(outputs[scenario][variable][:37, :], axis=1),
color=colors[scenario],
label=labels[scenario],
)
ax[i//2,i%2].set_xlim(2023,2125)
ax[i//2,i%2].set_title(title[variable])
ax[i//2,i%2].set_ylabel(yunit[variable])
ax[i//2,i%2].set_ylim(ylim[variable])
ax[i//2,i%2].set_xticks(np.arange(2025, 2130, 25))
if i==0:
ax[i//2,i%2].axhline(0, ls=':', color='k')
ax[i//2,i%2].axvline(2100, ls=':', color='k')
ax[1,0].legend(fontsize=6, frameon=False, loc='upper left')
fig.tight_layout()
for scenario in ['dice', 'dice_disc2pct', 'dice_below2deg', 'dice_1p5deglowOS']:
ax[1,1].hist(
outputs[scenario]['social_cost_of_carbon'][0, :],
alpha=0.5,
label=labels[scenario],
color=colors[scenario],
density=True,
bins=np.logspace(-1, 4, 101),
log=True
)
ax[1,1].set_xscale('log')
pl.rcParams['xtick.minor.visible'] = True
ax[1,1].set_xlim(6, 10000)
ax[1,1].set_title("(d) Social cost of carbon in 2023")
ax[1,1].set_xlabel("(2020\$)")
ax[1,1].set_ylabel("Density")
ax[1,1].set_yticklabels([])
ax[1,1].xaxis.set_major_formatter(ScalarFormatter())
line_this = Line2D([0], [0], label='Median', color='k')
# u68_this = Patch(facecolor='k', lw=0, alpha=0.4, label='16-84% range')
u90_this = Patch(facecolor='k', lw=0, alpha=0.2, label='5-95% range')
ax[0,1].legend(handles=[line_this, u90_this], fontsize=6, frameon=False, loc='upper left')
fig.tight_layout()
pl.savefig(os.path.join(here, '..', 'figures', f'projections_scc_ecs_fourscen.png'))
pl.savefig(os.path.join(here, '..', 'figures', f'projections_scc_ecs_fourscen.pdf'))
pl.show()
| [
"[email protected]"
] | |
16fab1befc0dde26d0fd57cc24fb39addc400dd5 | 2b5e1cdb4129558ec959d9af4a07f4febb37b9ef | /launcherByPython | 5dd3716e8fb410734a2f37aea7405764731a2529 | [] | no_license | Madaerpao/tlbbLauncher | c17fad6f4650eb7f2240979a307ff016b9a1f12b | f8293afc309cd31c29341cc8cee40abad0424728 | refs/heads/main | 2023-04-05T00:24:45.785501 | 2021-04-14T06:50:21 | 2021-04-14T06:50:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,245 | #!bin/bash/python3
# -*- coding=utf-8 -*-
import socket
import threading
import subprocess
def sock_close(conn):
try:
conn.close()
except:
pass
# 转发
def transport(src, dst):
try:
recp = src.recv(1024)
sname = src.getsockname()[0]
dname = dst.getsockname()[0]
while recp:
print(f"[{sname} to {dname}]> " + recp.hex())
dst.send(recp)
recp = src.recv(1024)
except:
pass
print(f'[{sname} to {dname}] End...')
sock_close(src)
sock_close(dst)
# 侦听
def accept_serv(remote_addr, sock_server):
while True:
conn, addr = sock_server.accept()
print(f"Accept connection from {addr[0]}:{addr[1]}")
tSock = socket.socket()
tSock.connect(remote_addr)
threading.Thread(target=transport, args=(conn, tSock)).start()
threading.Thread(target=transport, args=(tSock, conn)).start()
if __name__ == '__main__':
# tlbb/server/config/serverinfo.ini [server1] port0
login_addr = ('ip of your game server', 0)
# tlbb/server/config/serverinfo.ini [server0] port0
game_addr = ('ip of your game server', 0)
game_path = r'游戏根目录'
# 设置两个监听
socket1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
socket1.bind(('127.0.0.1', 0))
socket1.listen()
lip1, lport1 = socket1.getsockname()
socket2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
socket2.bind(('127.0.0.1', game_addr[1]))
socket2.listen()
lip2, lport2 = socket2.getsockname()
# 启动程序
with open(game_path + r"\Patch\LoginServer.txt", 'w') as file:
text = "VERSION 1\n\nSERVER_BEGIN\n" + \
"我的天龙,我的天龙,1,201,3,1,0,天龙八部," + f"{lip1}:{lport1},"*4 + "2\nSERVER_END\n"
file.write(text)
subprocess.Popen(game_path + r"\Bin\Game.exe -fl C0A80108 3039 12D82B")
# 开始侦听
t_login = threading.Thread(target=accept_serv, args=(login_addr, socket1))
t_game = threading.Thread(target=accept_serv, args=(game_addr, socket2))
t_login.start()
t_game.start()
t_login.join()
t_game.join()
print("End of Program")
| [
"[email protected]"
] | ||
2cb12d91c5ce732999eb89bd21d0e22224d089b4 | 61c9c7c3eadec0a0432dba387e7466df6759d914 | /paws/operations/ARRAYS/ArrayYMean.py | 02299936494860b72b61d12092ff1ee04e701804 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-other-permissive"
] | permissive | rwalroth/paws | 6f0a397daaeec9805c9223b64d177d010415b23b | bd591b0c0e7e9f0163ef5bbfce53ff51cee95532 | refs/heads/master | 2020-07-05T10:20:44.051138 | 2019-10-01T21:13:35 | 2019-10-01T21:13:35 | 202,622,126 | 1 | 0 | null | 2019-08-15T22:57:31 | 2019-08-15T22:57:31 | null | UTF-8 | Python | false | false | 849 | py | from collections import OrderedDict
import numpy as np
from ..Operation import Operation
inputs = OrderedDict(x_y_arrays=[])
outputs = OrderedDict(x_ymean=None)
class ArrayYMean(Operation):
"""
Average the second column of one or more n-by-2 arrays
"""
def __init__(self):
super(ArrayYMean, self).__init__(inputs,outputs)
self.input_doc['x_y_arrays'] = 'list of n-by-2 arrays'
self.output_doc['x_ymean'] = 'n-by-2 array of x and mean(y)'
def run(self):
x_y_arrays = self.inputs['x_y_arrays']
x_ymean = None
if len(x_y_arrays) > 0:
x_ymean = np.zeros(x_y_arrays[0].shape)
x_ymean[:,0] = x_y_arrays[0][:,0]
x_ymean[:,1] = np.mean([xy[:,1] for xy in x_y_arrays],axis=0)
self.outputs['x_ymean'] = x_ymean
return self.outputs
| [
"[email protected]"
] | |
ad1ce5baf8ce68b999f340a1a33a2c306518ee2e | 38549f01416081db6880038608c32b5e23a20be5 | /Day_24/python_files/day24_emotion_analyzer.py | e7d25e5cc1103874b4115f90123c1dc0ab00e860 | [
"MIT"
] | permissive | DuckBoss/VAIL-Training | 51da4d2919ea3eeb477dddf3dd8e017af9d885dc | de219465e9484cee619c915b37d544dc86add0cb | refs/heads/main | 2023-03-23T07:39:17.580327 | 2021-03-05T09:20:22 | 2021-03-05T09:20:22 | 336,456,326 | 0 | 0 | null | 2021-02-19T06:11:47 | 2021-02-06T04:37:39 | Jupyter Notebook | UTF-8 | Python | false | false | 2,513 | py | # -*- coding: utf-8 -*-
"""day24_emotion_analyzer.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/16BJQSbiJlWOhNCAcESRbgLLtzCoac2WR
### Import required libraries -
"""
import librosa
import glob
import librosa.display
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from matplotlib.pyplot import specgram
import tensorflow.keras
from tensorflow.keras.models import load_model
from tensorflow.keras.optimizers import RMSprop
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import LabelEncoder
import os
"""### Import the testing data (wav file) -"""
data, sampling_rate = librosa.load('/content/output10.wav')
plt.figure(figsize=(15, 5))
librosa.display.waveplot(data, sr=sampling_rate)
"""### Load the trained model -"""
# Import the model from the h5 file
# loaded_model = load_model('/content/Emotion_Voice_Detection_Model.h5')
# loaded_model.summary()
# loading json and creating model
from tensorflow.keras.models import model_from_json
with open('/content/model.json', 'r') as json_file:
loaded_model_json = json_file.read()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("/content/Emotion_Voice_Detection_Model.h5")
opt = RMSprop(lr=0.00001, decay=1e-6)
loaded_model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
print("Loaded model from disk")
"""### Read and prepare audio data from the file -"""
#livedf= pd.DataFrame(columns=['feature'])
X, sample_rate = librosa.load('/content/output10.wav', res_type='kaiser_fast', duration=2.5,sr=22050*2,offset=0.5)
sample_rate = np.array(sample_rate)
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=13),axis=0)
featurelive = mfccs
livedf2 = pd.DataFrame(data=featurelive)
livedf2 = livedf2.stack().to_frame().T
print(livedf2)
"""### Make a prediction -"""
twodim= np.expand_dims(livedf2, axis=2)
livepreds = loaded_model.predict(twodim,
batch_size=32,
verbose=1)
livepreds
"""### Convert the prediction into a readable format -"""
prediction_classes = ["female_angry", "female_calm", "female_fearful", "female_happy", "female_sad", "male_angry", "male_calm", "male_fearful", "male_happy", "male_sad"]
livepreds1=livepreds.argmax(axis=1)
liveabc = livepreds1.astype(int).flatten()
print(f"Predicted emotion: {prediction_classes[liveabc[0]]}") | [
"[email protected]"
] | |
5fb9e7ffdd9447e5cce93082ccc561665d782ba7 | 87966dabac9acf0170e873cf8ce75e81e45374a0 | /Lab8/Hryshchuk-2-1-8/__init__(python_rest).py | bfb005c28dc557e337697b1cf618e14c7cc9c250 | [
"MIT"
] | permissive | triod315/SysProgLabworks | d7fdf75024413c76519950881680617a507ea966 | c9f98e8d3d507b738334f459bb76924fb280196d | refs/heads/master | 2020-05-31T01:35:58.940293 | 2019-06-20T17:54:59 | 2019-06-20T17:54:59 | 190,051,560 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py | from flask import Flask, request, json
import os
import sys
import smtplib
from email.mime.text import MIMEText
from email.header import Header
from datetime import datetime
import requests
app = Flask(__name__)
DB_FILE_NAME = '/user/src/app/shared_folder/users.txt'
# curl -X GET localhost:5000/users/IsLoginFree?login=admin2
@app.route('/users/IsLoginFree', methods = ['GET'])
def isLoginFree():
if not os.path.exists(DB_FILE_NAME):
return json.dumps(True)
with open(DB_FILE_NAME) as f:
content = [x.strip() for x in f.readlines()]
return json.dumps(request.args.get('login') not in content)
# curl -X POST localhost:5000/users/AddLogin -d "{\"login\": \"admin\"}" -H "Content-Type: application/json; charset=UTF-8"
@app.route('/users/AddLogin', methods = ['POST'])
def addLogin():
login = str(json.loads(request.data)['login'])
if os.path.exists(DB_FILE_NAME):
with open(DB_FILE_NAME) as f:
content = [x.strip() for x in f.readlines()]
if login in content:
return 'This login already exists', 400
with open(DB_FILE_NAME, 'a' if os.path.exists(DB_FILE_NAME) else 'w') as f:
f.write(login + '\n')
return json.dumps(login)
@app.route('/users/checkFile', methods = ['POST'])
def checkFile():
source=str(json.loads(request.data)['url'])
try:
page=requests.get(source)
page.encoding='utf-8'
httpCode=page.status_code
except:
httpCode=0
return json.dumps(httpCode)
| [
"[email protected]"
] | |
262470f846c7e0deb175e5ed89df29920e2ed718 | 49ee9dacfab0e4bfc7750f3b81bd8ddfa8bd14b3 | /djangoProject/apps/blog/dev_migrations/0030_auto_20201201_1124.py | 0f4759bcb5e2401b4926ec2a84e9da054bd16a6e | [] | no_license | QingChang1204/djangoProject | 97c069d08cb2d4959b862fbb546440e349d21c27 | 559be0cfba1477a3d110d776523c4ccddafb1cf7 | refs/heads/master | 2023-01-19T04:56:18.447895 | 2020-12-02T15:11:42 | 2020-12-02T15:11:42 | 286,429,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | # Generated by Django 3.1.2 on 2020-12-01 11:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0029_auto_20201201_1120'),
]
operations = [
migrations.AlterUniqueTogether(
name='tagship',
unique_together={('article', 'tag')},
),
]
| [
"[email protected]"
] | |
0114f117a6d559f858e8d3b6a931cd7f002e046e | 9075d5a2b228de748227a36e1a9c5fa279d1a444 | /Desktop/Desktop/Semester5/AppliedScience/Python Programming/Week 06 Lecture/Lecture 06 Total Code/week06_05_MovieList.py | 3af85012fb33a9884b2493fb844f065d405dff33 | [] | no_license | fatihkgm/mobileApplication-react-fench-id | 986dfd3ddac217a8b0aee0c658c03f3a11af4094 | 78ead5874e15d8c2a6fac803c31ec52fbadc38ec | refs/heads/master | 2023-01-23T04:16:27.109957 | 2020-10-16T04:57:51 | 2020-10-16T04:57:51 | 314,138,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,569 | py | FILENAME = "movies.txt"
def write_movies(movies):
with open(FILENAME, "w") as file:
for movie in movies:
file.write(movie + "\n")
def read_movies():
movies = []
with open(FILENAME) as file:
for line in file:
line = line.replace("\n", "")
movies.append(line)
return movies
def list_movies(movies):
for i in range(len(movies)):
movie = movies[i]
print(str(i + 1) + ". " + movie)
print()
def add_movie(movies):
movie = input("Movie: ")
movies.append(movie)
write_movies(movies)
print(movie + " was added.\n")
def delete_movie(movies):
index = int(input("Number: "))
movie = movies.pop(index - 1)
write_movies(movies)
print(movie + " was deleted.\n")
def display_menu():
print("The Movie List program")
print()
print("COMMAND MENU")
print("list - List all movies")
print("add - Add a movie")
print("del - Delete a movie")
print("exit - Exit program")
print()
def main():
display_menu()
movies = read_movies()
while True:
command = input("Command: ")
if command == "list":
list_movies(movies)
elif command == "add":
add_movie(movies)
elif command == "del":
delete_movie(movies)
elif command == "exit":
print("Bye!")
break
else:
print("Not a valid command. Please try again.")
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
37ecc6de058f4da68843c1d9b4ecac697bb48148 | 46a468301f12494091fff124d1e4541f9f080c62 | /src/algorithms/DynamicProgramming/lis.py | 1f06338cd6ceee7e041ec3a8d6c13788b90f34a9 | [] | no_license | anuragsarkar97/hailey | 46c9d4328e80e3c99e7d98fa2f89599d4aec79c8 | a6b3dc0576ad8839ce1f4def0e1c1758b8857b2a | refs/heads/master | 2020-04-19T08:59:01.764133 | 2020-03-20T05:43:41 | 2020-03-20T05:43:41 | 168,096,003 | 0 | 1 | null | 2020-02-13T07:40:06 | 2019-01-29T05:37:59 | Python | UTF-8 | Python | false | false | 249 | py |
def lis_dp(a):
dp = [0]*len(a)
dp[0] = 1
for i in range(1, len(a)):
for j in range(i):
if a[i] > a[j] and dp[i] < dp[j] + 1:
dp[i] = 1 + dp[j]
print(dp)
return max(dp)
a = [4,3,1,4,2,5,6,7]
print(lis_dp(a)) | [
"[email protected]"
] | |
43d8f41a6cdd21d994e2ceb228e8dbd6514dbf75 | 27c9eb81a68dba24c1f13457025fe9578eb782ca | /app/modeladmins/accounts_transfer.py | 8f8dff0c5a9c681d9d6f12b9ba47200bf8924b4e | [] | no_license | bkm009/Daily-Finance-Django | 39469536ea9e16d86a4c9cba02851e9055696d6c | 60ae25e1d2f1352f3d90919a9f4a90f50e3792aa | refs/heads/master | 2022-12-05T12:25:16.000741 | 2020-08-19T07:14:12 | 2020-08-19T07:14:12 | 288,658,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | from django.contrib.admin import ModelAdmin
from app.modelforms.accounts_transfer_form import AccountsTransferForm
class AccountTransferAdmin(ModelAdmin):
def has_view_or_change_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
form = AccountsTransferForm
exclude = ["account", "amount", "txn_date", "txn_reason", "txn_type"]
fieldsets = (
("Transfer Details", {'fields': (
("ac_from", "ac_to"),
("tr_amount", "tr_remark"),
)}),
)
| [
"[email protected]"
] | |
24ab0e23e412856fbf3251c70d415fd78f18a9db | b254f218532fb565ca9a4acb08241dbed34dc474 | /wordplay/longest_palindrome_better.py | fd265659d67af907fe42873c440b329be71439f5 | [] | no_license | BMariscal/intermediate_python | c00420681e611069294709a6588fd551caa1cd1e | f5da095b1097de9495c111640001ab74dfad7dc4 | refs/heads/master | 2020-05-23T18:05:27.948829 | 2017-03-31T12:39:48 | 2017-03-31T12:39:48 | 84,777,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | import scrabble
import time
start = time.time()
longest =""
for word in scrabble.wordlist:
is_palindrome = True
for index in range(len(word)):
if word[index] != word[-(index + 1)]:
is_palindrome = False
if is_palindrome and len(word) > len(longest):
longest = word
print(longest)
# too slow: second slowest
stop = time.time()
print("time elapsed: %.1f seconds"%(stop-start))
| [
"[email protected]"
] | |
474cafb91cac2b18a34e05571e47f5101b705bf1 | d4b55377b92c287435b3b42254abb5263a24df5d | /18 Projects/Project_hskinner_attempt_2018-12-12-10-21-35_ProjectTemplate/ProjectTemplate/my_module/test_functions.py | 7542a3ae84be978b63872552629665b1e077e377 | [] | no_license | brn016/cogs18 | 3a7ce413504683dff5396e79aad81f1679469c71 | ee866ef7ed341f5d4f77512887c3a5521ec5ff7f | refs/heads/master | 2020-03-31T20:22:05.213429 | 2018-12-14T04:36:54 | 2018-12-14T04:36:54 | 152,537,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,531 | py | """Test for my functions.
"""
import random
from functions import *
from dictionaries import *
def test_remember_punc():
assert isinstance(remember_punc('hello.'), str)
assert remember_punc ('hello.') == '.'
assert isinstance(remember_punc('hello'), str)
assert remember_punc ('hello') == ''
def test_choose_num():
assert isinstance(choose_num(0,3), str)
assert choose_num(0,3) == '0' or '1' or '2' or '3'
def test_replace_words():
test_genre = {
'plural_noun' : ['plural_noun1', 'plural_noun2'],
'noun' : ['noun1', 'noun2'],
'adjective' : ['adjective1', 'adjective2'],
'name' : ['name1', 'name2'],
'past_verb' : ['past_verb1', 'past_verb2'],
'verb' : ['verb1', 'verb2'],
'adverb': ['adverb1', 'adverb2'],
'location': ['location1', 'location2']}
test_string = 'hello /name!'
assert isinstance(replace_words(test_string,test_genre), str)
assert replace_words('/plural_noun', test_genre) == 'plural_noun1' or 'plural_noun2'
assert replace_words('/noun', test_genre) == 'noun1' or 'noun2'
assert replace_words('/adjective', test_genre) == 'adjective1' or 'adjective2'
assert replace_words('/name', test_genre) == 'name1' or 'name2'
assert replace_words('/past_verb', test_genre) == 'past_verb1' or 'past_verb2'
assert replace_words('/verb', test_genre) == 'verb1' or 'verb2'
assert replace_words('/adverb', test_genre) == 'adverb1' or 'adverb2'
assert replace_words('/location', test_genre) == 'location1' or 'location2'
assert replace_words('/number', test_genre) == '0' or '1' or '2' or '3'
def test_remember_ends():
assert isinstance(remember_ends('hello!'), str)
assert remember_ends('hello!') == '!'
assert remember_ends('hello.') == '.'
assert remember_ends('hello?') == '?'
assert remember_ends('hello') == ''
def test_remove_ends():
assert isinstance(remove_ends('hello!'), str)
assert isinstance(remove_ends('hello'), str)
assert remove_ends('hello!') == 'hello'
assert remove_ends('hello') == 'hello'
def test_cap_sentences():
test_string = 'hi. glad to hear'
assert isinstance(cap_sentences('.',test_string), str)
assert cap_sentences('.', test_string) == 'Hi. Glad to hear'
def test_fix_all_caps():
test_string = 'hi. um, HELLO? swell! goodbye.'
assert isinstance(fix_all_caps(test_string), str)
assert fix_all_caps(test_string) == 'Hi. Um, HELLO? Swell! Goodbye.'
def test_madlib():
test_string1 = '/name1 is a /adjective1 /noun1 from /location1.'
test_string2 = '/number1 /plural_noun1 will /verb1 after they /past_verb1!'
test_string3 = 'Are you /adjective1 for the /noun1?'
output1 = madlib(test_string1, fantasy)
output2 = madlib(test_string2, animal)
output3 = madlib(test_string3, UCSD)
assert isinstance (output1, str)
assert isinstance (output2, str)
assert isinstance (output3, str)
assert output1[0].isupper
assert output2[0].isupper
assert output3[0].isupper
assert output1[-1] == '.'
assert output2 [-1] == '!'
assert output3 [-1] == '?'
for item in output1[0:-1].split():
assert item in fantasy or test_string1
for item in output2[0:-1].split():
assert item in animal or test_string2
for item in output3[0:-1].split():
assert item in UCSD or test_string3 | [
"[email protected]"
] | |
3510745d963b2b35f3bdf88d51838de062837206 | a4dc13112ecc0e510ed9ea5de4d6d62e9b336466 | /stickman_game_moving_platforms.py | 87f0c6539d16156cde3a0ff616c2025dd7350356 | [] | no_license | ethyl2/stickman | 67aa1a460070e32c8fc028385a6f06154e02f60a | 377c644c1c8b07de4aa212586a0cb0153bf52af2 | refs/heads/master | 2021-01-13T10:14:07.814617 | 2016-09-27T15:40:48 | 2016-09-27T15:40:48 | 69,371,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,917 | py | #Stickman Game from Python for Kids
#With moving platforms
from Tkinter import *
import random
import time
class Coords:
def __init__(self, x1=0, y1=0, x2=0, y2=0):
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
def within_x(co1, co2):
if (co1.x1 > co2.x1 and co1.x1 < co2.x2) \
or (co1.x2 > co2.x1 and co1.x2 < co2.x2)\
or (co2.x1 > co1.x1 and co2.x1 < co1.x2)\
or (co2.x2 > co1.x1 and co2.x2 < co1.x2): #The author changed this.
return True
else:
return False
def within_y(co1, co2):
if (co1.y1 > co2.y1 and co1.y1 < co2.y2) \
or (co1.y2 > co2.y1 and co1.y2 < co2.y2) \
or (co2.y1 > co1.y1 and co2.y1 < co1.y2) \
or (co2.y2 > co1.y1 and co2.y2 < co1.y2): #The author changed this.
return True
else:
return False
def collided_left(co1, co2):
if within_y(co1, co2):
if co1.x1 <= co2.x2 and co1.x1 >= co2.x1:
#print("Collided_left detected.")
return True
return False
def collided_top(co1, co2):
if within_x(co1, co2):
if co1.y1 <= co2.y2 and co1.y1 >= co2.y1:
return True
return False
def collided_bottom(y, co1, co2):
if within_x(co1, co2):
y_calc = co1.y2 + y
if y_calc >= co2.y1 and y_calc <= co2.y2:
return True
return False
def collided_right(co1, co2):
if within_y(co1, co2):
if co1.x2 >= co2.x1 and co1.x2 <= co2.x2:
return True
return False
class Game:
def __init__(self):
self.tk = Tk()
self.tk.title("Mr. Stick Man Races for the Exit")
self.tk.resizable(0,0)
self.tk.wm_attributes("-topmost", 1)
self.canvas = Canvas(self.tk, width=500, height=500, highlightthickness=0)
self.canvas.pack()
self.tk.update()
self.canvas_height = 500
self.canvas_width = 500
self.bg = PhotoImage(file="background.gif")
self.bg2 = PhotoImage(file="background.gif")
self.bg3 = PhotoImage(file="background_bookshelf.gif")
self.bg4 = PhotoImage(file="background_lamp.gif")
self.bg5 = PhotoImage(file="background_window.gif")
self.backgrounds = [self.bg, self.bg2, self.bg3, self.bg4, self.bg5]
w = self.bg.width()
h = self.bg.height()
for x in range(5):
for y in range(5):
num = random.randrange(len(self.backgrounds))
self.canvas.create_image(x*w, y*h, image=self.backgrounds[num], anchor='nw')
self.sprites = []
#His code has True here, but I added a click to start instead:
self.running = False
self.game_over_text = self.canvas.create_text(250, 150, \
text='Click to start', font=("Helvetica", 40), state='hidden')
def start_game(self, evt):
self.running = True
self.canvas.itemconfig(self.game_over_text, state='hidden')
self.canvas.itemconfig(self.game_over_text, text="You won!")
def mainloop(self):
while 1:
if self.running == True:
for sprite in self.sprites:
sprite.move()
else:
time.sleep(0.5)
self.canvas.itemconfig(self.game_over_text, \
state='normal')
self.tk.update_idletasks()
self.tk.update()
time.sleep(0.01)
class Sprite:
def __init__(self, game):
self.game = game
self.endgame = False
self.coordinates = None
def move(self):
pass
def coords(self):
return self.coordinates
class DoorSprite(Sprite):
def __init__(self, game, x=45, y=30, width=40, height=35):
Sprite.__init__(self, game)
self.closed_door = PhotoImage(file="door1.gif")
self.open_door = PhotoImage(file="door2.gif")
self.image = game.canvas.create_image(x, y, image=self.closed_door, \
anchor = 'nw')
# Width/2 is used so that the figure will stop in front of the door
# Instead of before it.
self.coordinates = Coords(x, y, x + (width/3), y + height)
self.endgame = True
def open_door2(self):
print("Now you're in the open_door2 method.")
self.game.canvas.itemconfig(self.image, image=self.open_door)
self.game.tk.update_idletasks()
def close_door(self):
print("Now you're in the close_door method.")
self.game.canvas.itemconfig(self.image, image=self.closed_door)
self.game.tk.update_idletasks()
class PlatformSprite(Sprite):
def __init__(self, game, photo_image, x, y, width, height):
Sprite.__init__(self, game)
self.photo_image = photo_image
self.image = game.canvas.create_image(x, y, image=self.photo_image, \
anchor='nw')
self.coordinates = Coords(x, y, x + width, y + height)
class MovingPlatformSprite(PlatformSprite):
def __init__(self, game, photo_image, x, y, width, height):
self.x = 2
self.width = width
self.height = height
self.last_time = time.time()
self.counter = 0
self.game = game
self.photo_image = photo_image
self.image = game.canvas.create_image(x, y, image=self.photo_image, \
anchor='nw')
self.coordinates = Coords(x, y, x + width, y + height)
self.endgame = False
def move(self):
if time.time() - self.last_time > 0.03:
self.last_time = time.time()
self.game.canvas.move(self.image, self.x, 0)
self.counter += 1
if self.counter > 20:
self.x *= -1
self.counter = 0
def coords(self):
xy = self.game.canvas.coords(self.image)
self.coordinates.x1 = xy[0]
self.coordinates.y1 = xy[1]
self.coordinates.x2 = xy[0] + self.width
self.coordinates.y2 = xy[1] + self.height
return self.coordinates
class StickFigureSprite(Sprite):
def __init__(self, game):
Sprite.__init__(self, game)
self.images_left = [
PhotoImage(file="figure-L1b.gif"),
PhotoImage(file="figure-L2d.gif"),
PhotoImage(file="figure-L3b.gif")
]
self.images_right = [
PhotoImage(file="figure-R1b.gif"),
PhotoImage(file="figure-R2d.gif"),
PhotoImage(file="figure-R3b.gif")
]
#To check the exit, use 90 and 30:
self.image = game.canvas.create_image(200, 470, \
image=self.images_left[0], anchor='nw')
self.x = -1 #-2
self.y = 0 #0
self.current_image = 0
self.current_image_add = 1
self.jump_count = 0
self.last_time = time.time()
self.coordinates = Coords()
game.canvas.bind_all('<KeyPress-Left>', self.turn_left)
game.canvas.bind_all('<KeyPress-Right>', self.turn_right)
game.canvas.bind_all('<space>', self.jump)
#I added the line below to start the game with a mouse click:
game.canvas.bind_all('<Button-1>', game.start_game)
def turn_left(self, evt):
if self.y == 0:
self.x = -2
def turn_right(self, evt):
if self.y == 0:
self.x = 2
def jump(self, evt):
if self.y == 0:
self.y = -4
self.jump_count = 0
def animate(self):
if self.x != 0 and self.y == 0:
if time.time() - self.last_time > 0.1:
self.last_time=time.time()
self.current_image += self.current_image_add
if self.current_image >= 2:
self.current_image_add = -1
if self.current_image <= 0:
self.current_image_add = 1
if self.x < 0:
#Stick figure is moving left.
if self.y != 0:
#Figure is jumping.
self.game.canvas.itemconfig(self.image, image=self.images_left[2])
else:
#Not jumping.
self.game.canvas.itemconfig(self.image, image=self.images_left[self.current_image])
elif self.x > 0:
#Stick figure is moving right.
if self.y != 0:
self.game.canvas.itemconfig(self.image, image=self.images_right[2])
else:
self.game.canvas.itemconfig(self.image, image=self.images_right[self.current_image])
def coords(self):
xy = self.game.canvas.coords(self.image)
self.coordinates.x1 = xy[0]
self.coordinates.y1 = xy[1]
self.coordinates.x2 = xy[0] + 27
self.coordinates.y2 = xy[1] + 30
return self.coordinates
def end(self, sprite):
print("Game Over")
self.game.running = False
sprite.open_door2()
time.sleep(1)
self.game.canvas.itemconfig(self.image, state='hidden')
sprite.close_door()
def move(self):
self.animate()
if self.y < 0:
self.jump_count += 1
if self.jump_count > 30: #20
self.y = 4
if self.y > 0:
self.jump_count -= 1
co = self.coords()
left = True
right = True
top = True
bottom = True
falling = True
if self.y > 0 and co.y2 >= self.game.canvas_height:
self.y = 0
bottom = False
elif self.y < 0 and co.y1 <= 0:
self.y = 0
top = False
if self.x > 0 and co.x2 >= self.game.canvas_width:
self.x = 0
right = False
elif self.x < 0 and co.x1 <= 0:
self.x = 0
left = False
for sprite in self.game.sprites:
if sprite == self:
continue
sprite_co = sprite.coords()
if top and self.y < 0 and collided_top(co, sprite_co):
self.y = -self.y
print("I hit my head!")
top = False
if bottom and self.y > 0 and collided_bottom(self.y, co, sprite_co):
self.y = sprite_co.y1 - co.y2
if self.y < 0:
self.y = 0
print("I landed on something!")
bottom = False
top = False
if bottom and falling and self.y == 0 and co.y2 < self.game.canvas_height and collided_bottom(1, co, sprite_co):
falling = False
if left and self.x < 0 and collided_left(co, sprite_co):
self.x = 0
print("I hit my left side!")
left = False
if sprite.endgame:
#self.game.running = False
self.end(sprite)
if right and self.x > 0 and collided_right(co, sprite_co):
self.x = 0
print("I hit my right side!")
right = False
if sprite.endgame:
self.end(sprite)
#self.game.running = False
if falling and bottom and self.y == 0 and co.y2 < self.game.canvas_height:
self.y = 2 #4
self.game.canvas.move(self.image, self.x, self.y)
g = Game()
platform1 = PlatformSprite(g, PhotoImage(file="platform3.gif"), 0,480,100,10)
platform2 = PlatformSprite(g, PhotoImage(file="platform3.gif"), 150,440,100, 10)
platform3 = MovingPlatformSprite(g, PhotoImage(file="platform3.gif"), 300,400,100, 10)
#platform3b = PlatformSprite(g, PhotoImage(file="platform3.gif"), 400,400,100,10)
platform4 = PlatformSprite(g, PhotoImage(file="platform3.gif"), 300,160,100, 10)
#Platform 2's: With 66
platform5 = MovingPlatformSprite(g, PhotoImage(file="platform2.gif"), 175,350, 66, 10)
platform6 = PlatformSprite(g, PhotoImage(file="platform2.gif"), 50,300, 66, 10)
platform7 = PlatformSprite(g, PhotoImage(file="platform2.gif"), 170,120,66, 10)
platform8 = PlatformSprite(g, PhotoImage(file="platform2.gif"), 45,60, 66, 10)
#Platform 1's with 32
platform9 = MovingPlatformSprite(g, PhotoImage(file="platform1.gif"), 170,250, 32, 10)
platform10 = PlatformSprite(g, PhotoImage(file="platform1.gif"), 230,200, 32, 10)
man = StickFigureSprite(g)
g.sprites.append(platform1)
g.sprites.append(platform2)
g.sprites.append(platform3)
#g.sprites.append(platform3b)
g.sprites.append(platform4)
g.sprites.append(platform5)
g.sprites.append(platform6)
g.sprites.append(platform7)
g.sprites.append(platform8)
g.sprites.append(platform9)
g.sprites.append(platform10)
g.sprites.append(man)
door = DoorSprite(g)
g.sprites.append(door)
g.mainloop()
| [
"[email protected]"
] | |
ab3eab1a23bb4c92b20d6dfe73579aed909130dd | 97194f1f32c2461e8f89f0ece213cedbcdac7010 | /main/ocr/wsgi.py | cc2098ce1ef23c3dd1fec173214b2a2c2ac7ceb8 | [] | no_license | tienhung2812/ocr | eb77125c88c70bd3664c77622fa52d6559d2ecd5 | a14d46426f3ab22296ba03b71c8664b5ff01cf81 | refs/heads/master | 2020-05-01T13:50:16.562865 | 2019-10-02T16:12:40 | 2019-10-02T16:12:40 | 177,503,321 | 3 | 0 | null | 2019-10-02T10:06:52 | 2019-03-25T02:52:47 | Jupyter Notebook | UTF-8 | Python | false | false | 383 | py | """
WSGI config for ocr project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ocr.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
106755deaf8d41b33c8e305145718e0b2122ac89 | 9e7c4f5e723bb85115aee2198ba84143a0cffc28 | /pipeline.py | 6b84a58d30433d713bf6e775d746aa9d8122d175 | [] | no_license | krzjoa/kaggle-sberbank | 56ba608485c96b42dda98f5edbda5f354f30321b | 10d1b2072ba5642936e2fbebfa4442b981b071a5 | refs/heads/master | 2021-01-20T06:05:06.876381 | 2017-05-20T21:40:55 | 2017-05-20T21:40:55 | 89,840,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,762 | py | # -*- coding: utf-8 -*-
def without(columns, with_no):
return list(set(columns).difference(with_no))
class TestPipe(object):
def transform(self, test):
testdf = common_pipe(test)
return testdf[without(testdf.columns, ['price_doc', 'id'])].values, test.id.values
class TrainPipe(object):
def transform(self, train):
traindf = common_pipe(train)
# Wyciąganie X i y
X = traindf[without(traindf.columns, ['price_doc', 'id'])].values
y = traindf.price_doc.values
return X, y
def common_pipe(data):
# Wartości yes/no
yes_no = [col for col in data if any(data[col].isin(['yes', 'no']))]
# Wartości 0/1
zero_one = [col for col in data if data[col].unique().tolist() == [0, 1]]
# Wartości 0, 1 ,2
zero_two = [col for col in data if data[col].unique().tolist() == [0, 1, 2]]
# Wartości typu str
strings = [col for col in data if isinstance(data[col].unique().tolist()[-1], str)]
strings = without(strings, yes_no)
# Zamiana wszystkich kategorii na 0 i 1
for yn in yes_no:
data[yn] = data[yn].apply(lambda x: 0 if x == 'no' else 1)
data.product_type = data.product_type.apply(lambda x: 0 if x == "Investment" else 1)
data.ecology = data.ecology.apply(lambda x: {'no data': 0, 'poor': 1,
'satisfactory': 2, 'good': 3,
'excellent': 4}[x])
# Zamiana time_stamp
data['year'] = data.timestamp.str[:4]
data['month'] = data.timestamp.str[5:7].apply(int)
# Usunięcie kolumny timestamp i fillna
tcols = without(data.columns, ['timestamp', 'sub_area'])
data = data[tcols].fillna(0)
return data | [
"[email protected]"
] | |
35828fc54e55365946f49f01dff96bba3f4f97c8 | 83f40e01db20bb94e84d3d6e272bd78f338ed951 | /env/bin/viewer.py | 68d8b10a12396b077777426bf8371867844b8db7 | [] | no_license | s-wirth/Snackbar | 26ffa79635b29b7721f99e0774de205a0410d0a9 | 210ec931b95893dd022542bd91bbc527d192a2c6 | refs/heads/master | 2021-01-10T03:54:33.721493 | 2015-11-25T14:46:28 | 2015-11-25T14:46:28 | 46,731,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | #!/Users/sophie/Projects/Snackbar/env/bin/python2.7
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
try:
from tkinter import Tk, Label
except ImportError:
from Tkinter import Tk, Label
from PIL import Image, ImageTk
#
# an image viewer
class UI(Label):
def __init__(self, master, im):
if im.mode == "1":
# bitmap image
self.image = ImageTk.BitmapImage(im, foreground="white")
Label.__init__(self, master, image=self.image, bg="black", bd=0)
else:
# photo image
self.image = ImageTk.PhotoImage(im)
Label.__init__(self, master, image=self.image, bd=0)
#
# script interface
if __name__ == "__main__":
import sys
if not sys.argv[1:]:
print("Syntax: python viewer.py imagefile")
sys.exit(1)
filename = sys.argv[1]
root = Tk()
root.title(filename)
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
| [
"[email protected]"
] | |
4515d580247c5ba93dec172bf0dcae2163e1b42f | 270e038578f67c05c2c1653b1bc6c8c128a68aa5 | /Object_to_Json/env/bin/pyreverse | e41dd0c844ab8ba9a3abec23d5dc771b6d9cf75b | [] | no_license | VALQUIRIAFABRO/Study_Python | 11c4ef63453f05489841fc3a52a5f0b4ebfd856c | 688de5e5f81f4f041e4ce418c95add258bc380fa | refs/heads/master | 2021-07-17T03:00:45.698147 | 2020-03-01T18:13:10 | 2020-03-01T18:13:10 | 244,197,291 | 0 | 0 | null | 2021-03-20T03:03:16 | 2020-03-01T17:56:54 | Python | UTF-8 | Python | false | false | 251 | #!/mnt/d/projPython/projVal/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_pyreverse
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_pyreverse())
| [
"[email protected]"
] | ||
1cf7cc93ebcaaf841cd3ce276c85a9655ca80797 | 772242d1354ae3864cfd82c77ce08893a2a3837c | /1-aloparca/0-BRANDS/0-DELETE/aston_martin/myproject/spiders/parts.py | 929208d8beee189572b9bc0148f4a48339be67f4 | [] | no_license | codenotespy/SCRAPY_PROJECTS | 7f1a1839a4461c76b838fb2c3cd70f6b945ee98f | c3c965fca86758510b2980d40929c0da989d4f68 | refs/heads/main | 2023-06-11T08:29:03.505349 | 2021-06-29T03:18:39 | 2021-06-29T03:18:39 | 381,224,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,817 | py | import scrapy
from myproject.items import MyprojectItem
#from scrapy.loader import ItemLoader
class PartsSpider(scrapy.Spider):
name = 'parts'
page_number = 2
start_urls = ['https://www.aloparca.com/oto-yedek-parca/ASTON_MARTIN']
def parse(self, response):
items = MyprojectItem()
for post in response.css('.crvvuQ'):
BRAND = post.css('b::text').extract()
PART_NO = post.css('b::text')[1].getall()
DESCRIPTION = post.css('b::text')[3].getall()
M_PRICE = post.css('.fiyat div::text').extract()
REMAINDER = post.css('.fiyat div span::text')[1].getall()
CROSS_REF = post.css('a.title::attr(href)').getall()
items['BRAND'] = BRAND
items['PART_NO'] = PART_NO
items['DESCRIPTION'] = DESCRIPTION
items['CROSS_REF'] = CROSS_REF
items['M_PRICE'] = M_PRICE
items['REMAINDER'] = REMAINDER
yield items
# To bring datas from the other pages too.
next_page = 'https://www.aloparca.com/oto-yedek-parca/ASTON_MARTIN?sayfa=' + str(PartsSpider.page_number) +'/'
if PartsSpider.page_number < 51:
PartsSpider.page_number +=1
yield response.follow(next_page, callback= self.parse)
if PartsSpider.page_number > 50:
yield response(callback= self.alert)
'''
next_page = response.css('a.icon-caret-right::attr(href)').get()
if next_page is not None: # To make sure there isn't next page.
next_page = response.urljoin(next_page) # If it is not none.
yield scrapy.Request(next_page, callback=self.parse) # So if next page is not none parse funtion will be colled back again and the data will be bringed from the next page.
'''
| [
"[email protected]"
] | |
0118cbde92183909ac35dc3d61941d92afaa2979 | 997a65cffc140b9005ac88f29222e7e632ec518c | /train_w_center.py | 4d6ea9310cee83550c12e2b2998285313feffcd1 | [
"Apache-2.0"
] | permissive | Maeve-D/reid_for_deepsort | 5e900be1f22b09e9079bb8a0f6a36bad3e2fae91 | f82e2280443829339de4d29c7bbede9d992c4f97 | refs/heads/master | 2022-07-10T01:47:54.645557 | 2020-05-19T06:03:42 | 2020-05-19T06:03:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,077 | py | import argparse
import os
import time
import matplotlib
import matplotlib.pyplot as plt
import torch
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torchvision
from torchvision import datasets
from eval import get_result
from models import build_model
from utils.center_loss import CenterLoss
matplotlib.use('Agg')
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
input_size = (128, 128)
parser = argparse.ArgumentParser(description="Train on my own dataset")
parser.add_argument("--data-dir", default='data', type=str)
parser.add_argument("--no-cuda", action="store_true")
parser.add_argument("--gpu-id", default=0, type=int)
parser.add_argument("--lr", default=0.001, type=float)
parser.add_argument("--interval", '-i', default=10, type=int)
parser.add_argument('--resume', '-r', action='store_true')
parser.add_argument('--model', type=str, default="mudeep")
parser.add_argument('--pretrained', action="store_true")
args = parser.parse_args()
# device
device = "cuda:{}".format(
args.gpu_id) if torch.cuda.is_available() and not args.no_cuda else "cpu"
if torch.cuda.is_available() and not args.no_cuda:
cudnn.benchmark = True
# data loading
root = args.data_dir
train_dir = os.path.join(root, "train")
test_dir = os.path.join(root, "val")
transform_train = torchvision.transforms.Compose([
torchvision.transforms.Resize(input_size),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.3568, 0.3141, 0.2781],
[0.1752, 0.1857, 0.1879])
])
transform_test = torchvision.transforms.Compose([
torchvision.transforms.Resize(input_size),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize([0.3568, 0.3141, 0.2781],
[0.1752, 0.1857, 0.1879])
])
train_datasets = datasets.ImageFolder(train_dir, transform=transform_train)
test_datasets = datasets.ImageFolder(test_dir, transform=transform_test)
trainloader = torch.utils.data.DataLoader(train_datasets,
batch_size=64,
shuffle=True,
num_workers=4)
testloader = torch.utils.data.DataLoader(test_datasets,
batch_size=64,
shuffle=True,
num_workers=4)
num_classes = len(trainloader.dataset.classes)
##################
# net definition #
##################
start_epoch = 0
net = build_model(name=args.model, num_classes=num_classes,
pretrained=args.pretrained)
if args.resume:
assert os.path.isfile(
"./weights/best.pt"), "Error: no weights file found!"
print('Loading from weights/best.pt')
weights = torch.load("./weights/best.pt")
# import ipdb; ipdb.set_trace()
net_dict = weights['net_dict']
net.load_state_dict(net_dict)
best_acc = weights['acc']
start_epoch = weights['epoch']
net.to(device)
# loss and optimizer
criterion_model = torch.nn.CrossEntropyLoss(
) # CenterLoss(num_classes=num_classes)
optimizer_model = torch.optim.SGD(
net.parameters(), args.lr) # from 3e-4 to 3e-5
criterion_center = CenterLoss(num_classes=num_classes, feat_dim=num_classes)
optimizer_center = optim.Adam(criterion_center.parameters(), lr=0.005)
scheduler = optim.lr_scheduler.StepLR( # best lr 1e-3
optimizer_model, step_size=20, gamma=0.1)
best_acc = 0.
# train function for each epoch
def train(epoch):
print('=' * 30, "Training", "=" * 30)
net.train()
training_loss = 0.
train_loss = 0.
correct = 0
total = 0
interval = args.interval
start = time.time()
for idx, (inputs, labels) in enumerate(trainloader):
# forward
inputs, labels = inputs.to(device), labels.to(device)
outputs = net(inputs)
loss_center = criterion_center(outputs, labels)
loss_model = criterion_model(outputs, labels)
loss = 0.2 * loss_center + 0.8 * loss_model
# backward
optimizer_center.zero_grad()
optimizer_model.zero_grad()
loss.backward()
optimizer_model.step()
optimizer_center.step()
# accumurating
training_loss += loss.item()
train_loss += loss.item()
correct += outputs.max(dim=1)[1].eq(labels).sum().item()
total += labels.size(0)
# print
if (idx + 1) % interval == 0:
end = time.time()
print(
"epoch:{:d}|step:{:03d}|time:{:03.2f}s|Loss:{:03.5f}|center loss:{:03.4f}|model loss:{:03.4f}|Acc:{:02.3f}%"
.format(epoch, idx, end - start, training_loss / interval,
0.2 * loss_center.item(), 0.8 * loss_model.item(),
100. * correct / total))
training_loss = 0.
start = time.time()
return train_loss / len(trainloader), 1. - correct / total
def test(epoch):
global best_acc
# net.eval()
# net.train()
test_loss = 0.
correct = 0
total = 0
start = time.time()
with torch.no_grad():
for idx, (inputs, labels) in enumerate(testloader):
inputs, labels = inputs.to(device), labels.to(device)
outputs = net(inputs)
loss = criterion_model(outputs, labels)
test_loss += loss.item()
correct += outputs.max(dim=1)[1].eq(labels).sum().item()
total += labels.size(0)
print('=' * 30, "Testing", "=" * 30)
end = time.time()
print(
"epoch:{:d}\t time:{:.2f}s\t Loss:{:.5f}\t Correct:{}/{}\t Acc:{:.3f}%"
.format(epoch, end - start, test_loss / len(testloader), correct,
total, 100. * correct / total))
# saving weights
acc = 100. * correct / total
if not os.path.isdir('weights'):
os.mkdir('weights')
save_path = os.path.join("weights", args.model)
if not os.path.exists(save_path):
os.makedirs(save_path)
if acc > best_acc:
best_acc = acc
print("Saving parameters to weights/best.pt")
weights = {
'net_dict': net.state_dict(),
'acc': acc,
'epoch': epoch,
}
torch.save(weights,
'./weights/%s/%s_best.pt' % (args.model, args.model))
torch.save(weights,
'./weights/%s/%s_last.pt' % (args.model, args.model))
else:
weights = {
'net_dict': net.state_dict(),
'acc': acc,
'epoch': epoch,
}
torch.save(weights,
'./weights/%s/%s_last.pt' % (args.model, args.model))
# rank and mAP
# net.eval()
# TODO BUG
# get_result(net, trainloader, testloader, train_datasets, test_datasets)
return test_loss / len(testloader), 1. - correct / total
# plot figure
x_epoch = []
record = {'train_loss': [], 'train_err': [], 'test_loss': [], 'test_err': []}
fig = plt.figure()
ax0 = fig.add_subplot(121, title="loss")
ax1 = fig.add_subplot(122, title="top1err")
def draw_curve(epoch, train_loss, train_err, test_loss, test_err):
global record
record['train_loss'].append(train_loss)
record['train_err'].append(train_err)
record['test_loss'].append(test_loss)
record['test_err'].append(test_err)
x_epoch.append(epoch)
ax0.plot(x_epoch, record['train_loss'], 'bo-', label='train')
ax0.plot(x_epoch, record['test_loss'], 'ro-', label='val')
ax1.plot(x_epoch, record['train_err'], 'bo-', label='train')
ax1.plot(x_epoch, record['test_err'], 'ro-', label='val')
if epoch == 0:
ax0.legend()
ax1.legend()
fig.savefig("train.jpg")
if __name__ == '__main__':
for epoch in range(start_epoch, start_epoch + 200):
train_loss, train_err = train(epoch)
test_loss, test_err = test(epoch)
draw_curve(epoch, train_loss, train_err, test_loss, test_err)
scheduler.step()
# if epoch % 10 == 0:
# os.system("python eval.py")
| [
"[email protected]"
] | |
8711b82efde4f4ea06535e355750e4d054e92d9f | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.3/django/contrib/auth/decorators.py | 90bb7a127cf216433ad50dc3c58dae54e28a51b2 | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.3/django/contrib/auth/decorators.py | [
"[email protected]"
] | |
876af148e0e1a2e8891d6f308c68e89c6048db7a | d4573e15795c02ff98aab5e02d4c8effde4691dc | /PythonTasks/isValidSubsequence.py | dddf11e0c5b685688e8010a62f97c23f47b3a15c | [] | no_license | maantos/PythonTask | 524a34ed6cfeac2651e362d15e767434b337302a | 3460fb118ad725ad5aa549b8de53dddfea9b1ca3 | refs/heads/main | 2023-08-31T16:20:16.445474 | 2021-10-24T10:09:36 | 2021-10-24T10:09:36 | 403,981,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | def isValidSubsequence(array, sequence):
indexArray = 0
indexSequence = 0
for a in array:
if a == sequence[indexSequence]:
indexSequence+=1
return indexSequence == len(sequence)
def main():
a= [5, 1, 22, 25, 6, -1, 8, 10]
s = [5, 1, 22, 25, 6, -1, 8, 10, 10]
print(isValidSubsequence(a,s))
main()
| [
"[email protected]"
] | |
b6aeae9d49474feca35b642020450f59b805f6a1 | 89c4a43a505df8fdf1f0d7386988c4896c2e631b | /examples/billing/get_invoices.py | 2ed69a7c2a8bbecccaf83ad8e70d6bd01cd8c19e | [
"Apache-2.0"
] | permissive | hurricanelennane/google-ads-python | a0a1fed690776a8bb2e81f637eb7eae10fb4992f | 310a488b6fdad9d5beea8fa4b166edce779a2511 | refs/heads/master | 2023-07-04T03:07:53.344466 | 2021-07-16T19:06:36 | 2021-07-16T19:06:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,262 | py | #!/usr/bin/env python
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Retrieves the invoices issued last month for a given billing setup."""
import argparse
from datetime import date, timedelta
import sys
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
def main(client, customer_id, billing_setup_id):
"""The main method that creates all necessary entities for the example.
Args:
client: an initialized GoogleAdsClient instance.
customer_id: a client customer ID.
billing_setup_id: a billing setup ID.
"""
# The last day of last month.
last_month = date.today().replace(day=1) - timedelta(days=1)
# [START get_invoices]
# Issues a request to list invoices.
response = client.get_service("InvoiceService").list_invoices(
customer_id=customer_id,
billing_setup=client.get_service("GoogleAdsService").billing_setup_path(
customer_id, billing_setup_id
),
# The year needs to be 2019 or later, per the docs:
# https://developers.google.com/google-ads/api/docs/billing/invoice?hl=en#retrieving_invoices
issue_year=str(last_month.year),
issue_month=last_month.strftime("%B").upper(),
)
# [END get_invoices]
# [START get_invoices_1]
for invoice in response.invoices:
print(
f"""
- Found the invoice {invoice.resource_name}
ID (also known as Invoice Number): '{invoice.id}'
Type: {invoice.type_}
Billing setup ID: '{invoice.billing_setup}'
Payments account ID (also known as Billing Account Number): '{invoice.payments_account_id}'
Payments profile ID (also known as Billing ID): '{invoice.payments_profile_id}'
Issue date (also known as Invoice Date): {invoice.issue_date}
Due date: {invoice.due_date}
Currency code: {invoice.currency_code}
Service date range (inclusive): from {invoice.service_date_range.start_date} to {invoice.service_date_range.end_date}
Adjustments:
subtotal {_micros_to_currency(invoice.adjustments_subtotal_amount_micros)}
tax {_micros_to_currency(invoice.adjustments_tax_amount_micros)}
total {_micros_to_currency(invoice.adjustments_total_amount_micros)}
Regulatory costs:
subtotal {_micros_to_currency(invoice.regulatory_costs_subtotal_amount_micros)}
tax {_micros_to_currency(invoice.regulatory_costs_tax_amount_micros)}
total {_micros_to_currency(invoice.regulatory_costs_total_amount_micros)}
Replaced invoices: {invoice.replaced_invoices.join(", ") if invoice.replaced_invoices else "none"}
Amounts:
subtotal {_micros_to_currency(invoice.subtotal_amount_micros)}
tax {_micros_to_currency(invoice.tax_amount_micros)}
total {_micros_to_currency(invoice.total_amount_micros)}
Corrected invoice: {invoice.corrected_invoice or "none"}
PDF URL: {invoice.pdf_url}
Account budgets:
"""
)
for account_budget_summary in invoice.account_budget_summaries:
print(
f"""
- Account budget '{account_budget_summary.account_budget}':
Name (also known as Account Budget): '{account_budget_summary.account_budget_name}'
Customer (also known as Account ID): '{account_budget_summary.customer}'
Customer descriptive name (also known as Account): '{account_budget_summary.customer_descriptive_name}'
Purchase order number (also known as Purchase Order): '{account_budget_summary.purchase_order_number}'
Billing activity date range (inclusive):
from #{account_budget_summary.billable_activity_date_range.start_date}
to #{account_budget_summary.billable_activity_date_range.end_date}
Amounts:
subtotal '{_micros_to_currency(account_budget_summary.subtotal_amount_micros)}'
tax '{_micros_to_currency(account_budget_summary.tax_amount_micros)}'
total '{_micros_to_currency(account_budget_summary.total_amount_micros)}'
"""
)
# [END get_invoices_1]
def _micros_to_currency(micros):
return micros / 1000000.0 if micros is not None else None
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version="v6")
parser = argparse.ArgumentParser(
description="Retrieves the invoices issued last month for a given billing setup."
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
parser.add_argument(
"-b",
"--billing_setup_id",
type=str,
required=True,
help="The billing setup ID.",
)
args = parser.parse_args()
try:
main(googleads_client, args.customer_id, args.billing_setup_id)
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
| [
"[email protected]"
] | |
4757bdbb45e9943f57229c66382e143fdb7eedb0 | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/notification-hub/azext_notification_hub/aaz/latest/notification_hub/credential/wns/__cmd_group.py | 5037cd17f20259548803ca3dbaa434de49a782a9 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 604 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
class __CMDGroup(AAZCommandGroup):
"""Commands to manage notification hub credential for Windows(WNS).
"""
pass
__all__ = ["__CMDGroup"]
| [
"[email protected]"
] | |
8385b642855ce52a447c4cbb7ee9daef09d08a97 | eccb79e51d0805d5643e4bfb37d8bf771a33d07d | /day1/test2.py | f7758110e95c0427b4246ca452a4e96d9e7d8890 | [] | no_license | kkc272104568/python_test | a05d1f07f240f9f82410a3ac283f1766591013b3 | 75ca1e937c830114a6df84d0340c438ce9a01c91 | refs/heads/master | 2020-03-26T13:45:28.831829 | 2020-02-25T09:48:45 | 2020-02-25T09:48:45 | 144,955,227 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
'''题目:企业发放的奖金根据利润提成。利润(I)低于或等于10万元时,奖金可提10%;利润高于10万元,低于20万元时,
低于10万元的部分按10%提成,高于10万元的部分,可提成7.5%;20万到40万之间时,高于20万元的部分,可提成5%;
40万到60万之间时高于40万元的部分,可提成3%;60万到100万之间时,高于60万元的部分,可提成1.5%,高于100万元时,
超过100万元的部分按1%提成,从键盘输入当月利润I,求应发放奖金总数?
程序分析:请利用数轴来分界,定位。注意定义时需把奖金定义成长整型。
程序源代码:'''
# i= int (raw_input('净利润:'))
#
# if i <=100000 :
# print '净利润为:',i*0.1
# elif 100000<i<=200000:
# print '净利润为:',10000+(i-100000)*0.075
# elif 200000<i<=400000:
# print '净利润为:',10000+7500+(i-200000)*0.05
# elif 400000 < i <= 600000:
# print '净利润为:',10000+7500+10000+(i-400000)*0.03
# elif 600000 < i <= 1000000:
# print '净利润为:',10000+7500+10000+6000+(i-600000)*0.015
# elif 1000000 < i :
# print '净利润为:',10000+7500+10000+6000+6000+(i-1000000)*0.01
i = int(raw_input('净利润:'))
arr = [1000000,600000,400000,200000,100000,0]
rat = [0.01,0.015,0.03,0.05,0.075,0.1]
r = 0
for idx in range(0,6):
if i>arr[idx]:
r+=(i-arr[idx])*rat[idx]
print (i-arr[idx])*rat[idx]
i=arr[idx]
print r | [
"[email protected]"
] | |
3ef7c170aff0467ec017ff6fa55cfec5011d76af | 7ed8b6fda0bdf14ae4145d913b1cd6675ea1e8dc | /interspeech2020_codes/vaw-gan/convert-vawgan.py | 32d99356060a5fa966244fd2c33bd3efaa2d6242 | [] | no_license | hongwen-sun/Speaker-independent-emotional-voice-conversion-based-on-conditional-VAW-GAN-and-CWT | 9a8198ad10da163704f9711bcf06f2e44f08ec8c | 876fa67c29536afcae05496ca8f8f37abde9b8ee | refs/heads/master | 2023-01-10T11:35:24.935806 | 2020-11-13T12:50:09 | 2020-11-13T12:50:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,229 | py | import json
import os
import sys
import tensorflow as tf
import numpy as np
import soundfile as sf
from util.wrapper import load
from analyzer import read_whole_features, pw2wav
from analyzer import Tanhize
from datetime import datetime
from importlib import import_module
args = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('corpus_name', 'emotion_vc', 'Corpus name')
#tf.app.flags.DEFINE_string('checkpoint', './logdir/train/0314-1843-24-2020/model.ckpt-46860', 'root of log dir')
tf.app.flags.DEFINE_string('checkpoint', './logdir/train/0409-1053-31-2020/model.ckpt-46860', 'root of log dir')
tf.app.flags.DEFINE_string('src', 'Neutral', 'source speaker [SF1 - SM2]')
tf.app.flags.DEFINE_string('trg', 'Angry', 'target speaker [SF1 - TM3]')
tf.app.flags.DEFINE_string('output_dir', './logdir', 'root of output dir')
tf.app.flags.DEFINE_string('module', 'model.vawgan', 'Module')
tf.app.flags.DEFINE_string('model', 'VAWGAN', 'Model')
tf.app.flags.DEFINE_string('file_pattern', '../vaw_original/data_multi/bin/evaluation_set/{}/*.bin', 'file pattern')
tf.app.flags.DEFINE_string(
'speaker_list', './etc/speakers.tsv', 'Speaker list (one speaker per line)'
)
def make_output_wav_name(output_dir, filename):
basename = str(filename, 'utf8')
basename = os.path.split(basename)[-1]
basename = os.path.splitext(basename)[0]
return os.path.join(
output_dir,
'{}-{}-{}.wav'.format(args.src, args.trg, basename)
)
def get_default_output(logdir_root):
STARTED_DATESTRING = datetime.now().strftime('%0m%0d-%0H%0M-%0S-%Y')
logdir = os.path.join(logdir_root, 'output', STARTED_DATESTRING)
print('Using default logdir: {}'.format(logdir))
return logdir
def convert_f0(f0, src, trg):
mu_s, std_s = np.fromfile(os.path.join('./etc', '{}.npf'.format(src)), np.float32)
mu_t, std_t = np.fromfile(os.path.join('./etc', '{}.npf'.format(trg)), np.float32)
lf0 = tf.where(f0 > 1., tf.log(f0), f0)
lf0 = tf.where(lf0 > 1., (lf0 - mu_s)/std_s * std_t + mu_t, lf0)
lf0 = tf.where(lf0 > 1., tf.exp(lf0), lf0)
return lf0
def nh_to_nchw(x):
with tf.name_scope('NH_to_NCHW'):
x = tf.expand_dims(x, 1) # [b, h] => [b, c=1, h]
return tf.expand_dims(x, -1) # => [b, c=1, h, w=1]
def nh_to_nhwc(x):
with tf.name_scope('NH_to_NHWC'):
return tf.expand_dims(tf.expand_dims(x, -1), -1)
def main(unused_args=None):
# args(sys.argv)
if args.model is None:
raise ValueError(
'\n You MUST specify `model`.' +\
'\n Use `python convert.py --help` to see applicable options.'
)
module = import_module(args.module, package=None)
MODEL = getattr(module, args.model)
FS = 16000
with open(args.speaker_list) as fp:
SPEAKERS = [l.strip() for l in fp.readlines()]
logdir, ckpt = os.path.split(args.checkpoint)
if 'VAE' in logdir:
_path_to_arch, _ = os.path.split(logdir)
else:
_path_to_arch = logdir
arch = tf.gfile.Glob(os.path.join(_path_to_arch, 'architecture*.json'))
if len(arch) != 1:
print('WARNING: found more than 1 architecture files!')
arch = arch[0]
with open(arch) as fp:
arch = json.load(fp)
normalizer = Tanhize(
xmax=np.fromfile('./etc/{}_xmax.npf'.format(args.corpus_name)),
xmin=np.fromfile('./etc/{}_xmin.npf'.format(args.corpus_name)),
)
features = read_whole_features(args.file_pattern.format(args.src))
x = normalizer.forward_process(features['sp'])
x = nh_to_nhwc(x)
y_s = features['speaker']
y_t_id = tf.placeholder(dtype=tf.int64, shape=[1,])
y_t = y_t_id * tf.ones(shape=[tf.shape(x)[0],], dtype=tf.int64)
f0_s = features['f0']
f0_t = convert_f0(f0_s, args.src, args.trg)
f0_s_convert = tf.cast(f0_s,dtype=tf.int64)
f0_t_convert = tf.cast(f0_t,dtype=tf.int64)
machine = MODEL(arch, is_training=False)
z = machine.encode(x)
x_t = machine.decode(z, y_t, f0_t_convert) # NOTE: the API yields NHWC format
x_t = tf.squeeze(x_t)
x_t = normalizer.backward_process(x_t)
# For sanity check (validation)
# x_s = machine.decode(z, y_s, f0_s_convert)
# x_s = tf.squeeze(x_s)
# x_s = normalizer.backward_process(x_s)
# f0_s = features['f0']
# f0_t = convert_f0(f0_s, args.src, args.trg)
output_dir = get_default_output(args.output_dir)
saver = tf.train.Saver()
sv = tf.train.Supervisor(logdir=output_dir)
with sv.managed_session() as sess:
load(saver, sess, logdir, ckpt=ckpt)
print()
while True:
try:
feat, f0, sp = sess.run(
[features, f0_t, x_t],
feed_dict={y_t_id: np.asarray([SPEAKERS.index(args.trg)])}
)
feat.update({'sp': sp, 'f0': f0})
y = pw2wav(feat)
oFilename = make_output_wav_name(output_dir, feat['filename'])
print('\rProcessing {}'.format(oFilename), end='')
sf.write(oFilename, y, FS)
except KeyboardInterrupt:
break
finally:
pass
print()
if __name__ == '__main__':
tf.app.run()
| [
"[email protected]"
] | |
f1a816139f85ed6a57d84606962de0bae8e26e2b | 431af022b62460f5bf81ff19450e391e22133e0a | /w1/web_app_1/bin/pip | db7ab663bb63410e004d2006c2dae86b3d2fc921 | [] | no_license | marcin-jamroz/pythonLvlUp | a8e3d758a807fa1c1ebcc079b86c90fa2aa505f6 | 1aa966c5e988dd92b7bfff9ccb599332ccbb6f71 | refs/heads/master | 2020-03-11T10:57:36.634715 | 2018-04-17T19:49:32 | 2018-04-17T19:49:45 | 129,956,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | #!/home/marcin/Kod/Python/pythonLvlUp/w1/web_app_1/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
775bfe54fc558de9f2d13cdfab215fb701858979 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5652388522229760_1/Python/hwmaltby/counting_sheep.py | 28c65f65b3bc6c9b939b87a0d25dfc2ef60af483 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 715 | py | #Henry Maltby
#Code Jam 2016
f = open('A-large.in')
g = open('A-large.out', 'w')
N = int(f.readline())
data = [int(x) for x in f.read().split('\n')]
def count_sheep(n):
"""
Takes a number and returns the last value Bleatrix would say.
Does so directly: iterates over each number said and each digit in number.
"""
digits = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
k = 0
if n == 0:
return 'INSOMNIA'
while digits != set():
k += 1
s = str(k * n)
for i in range(len(s)):
if int(s[i]) in digits:
digits.remove(int(s[i]))
return s
for i in range(len(data)):
g.write("Case #" + str(i + 1) + ": " + count_sheep(data[i]) + "\n")
| [
"[email protected]"
] | |
185f4234ea2c1aafdc20a163089442b5a7b72a70 | f8b5e12316bce07b7e1ea568e81e40442fed17bc | /meloshare/api/resources.py | bcdc0cccbe3734cd43f69597e44f68e617e63dd6 | [
"MIT"
] | permissive | xavierfav/meloshare | 56ea9d228c4c7a6d6c0bf031e5222ce15ddff5aa | a52bf321135382b415a453ad29498901c96f8cdb | refs/heads/master | 2022-12-12T21:10:24.055102 | 2018-01-30T03:05:15 | 2018-01-30T03:05:15 | 119,074,174 | 1 | 1 | MIT | 2022-12-08T00:53:21 | 2018-01-26T16:18:24 | Python | UTF-8 | Python | false | false | 15 | py | RESOURCES = []
| [
"[email protected]"
] | |
0e84c6a4d779d67cc768bbeaf77a684bb7a6e521 | ed363858c2a88ed125f7b7dce507a2978201f0ca | /week1/Project1/api/migrations/0001_initial.py | 7cabcca21f272fb82e94bba55ef3f0fcbd7abe4f | [] | no_license | progF/advanced-django2019 | cc058e99c3d3add3eb0ebac773cdee23f49a284a | e3e2cc37c88cf697446ffa8493db811c1f8a8aa8 | refs/heads/master | 2022-11-24T17:34:24.951650 | 2019-12-05T13:28:31 | 2019-12-05T13:28:31 | 206,416,155 | 0 | 0 | null | 2022-11-22T04:52:36 | 2019-09-04T21:20:02 | Python | UTF-8 | Python | false | false | 1,502 | py | # Generated by Django 2.2.5 on 2019-09-04 19:01
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.IntegerField(default=0, validators=[django.core.validators.MaxValueValidator(5), django.core.validators.MinValueValidator(0)])),
('name', models.CharField(max_length=200)),
('description', models.TextField()),
('photo', models.ImageField(upload_to='images/')),
],
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating', models.IntegerField(default=0, validators=[django.core.validators.MaxValueValidator(5), django.core.validators.MinValueValidator(0)])),
('title', models.CharField(max_length=200)),
('summary', models.TextField()),
('date', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Product')),
],
),
]
| [
"[email protected]"
] | |
a4d4aa0f096c5e18c6c767b951eeab397e31ee94 | 4eb4677d051a2372dd74f00f19ed6a1ac643662b | /practice8h.py | 0f91c5b5ae6be660570286a6e6dc4068d5a7244a | [] | no_license | harmansehmbi/Project8 | 015625578dd48c5df87631d5e0cfa712836cc8fc | f500ec90c6fa71990987e418adca0e5f5fb00b61 | refs/heads/master | 2020-06-05T08:54:00.382629 | 2019-06-17T16:37:22 | 2019-06-17T16:37:22 | 192,383,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 531 | py | class Counter:
ccount = 1
def __init__(self):
self.count = 1
Counter.ccount = 1
def incrementCount(self):
self.count = self.count + 1
Counter.ccount = Counter.ccount + 1
def showCount(self):
print("count is {} and ccount is {}".format(self.count, Counter.ccount))
c1 = Counter()
c2 = Counter()
c3 = c1
c1.incrementCount()
c2.incrementCount()
c3.incrementCount()
c1.incrementCount()
c2.incrementCount()
c3.incrementCount()
c1.showCount()
c2.showCount()
c3.showCount()
| [
"[email protected]"
] | |
b9152793bd39062b3450e1f61fd161cfbeecde12 | fbd3b8a646091400df5ba5d586eec55eff0b056d | /src/AS/s05_parse_PSI_vals.py | 658051a8e7fa77e17fae73ba111f316bccb5c469 | [] | no_license | mpandeyWU/diurnalCycleCodes | a538fe28190d5c62f848ad2ca9802fc98eb22785 | 14951f768371c1c3ed9e549b6f8863c241a6b7cf | refs/heads/master | 2021-08-06T16:06:18.559827 | 2020-04-16T04:59:15 | 2020-04-16T04:59:15 | 151,189,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,706 | py | #!/usr/bin/python
#########################################################
# Purpose: Parse tsv output files and filter the relevant LSVs
# Author: Manishi Pandey
# Date: October 16, 2018
# Usage: python s05_parse_PSI_vals.py -majiqpath ../output/Majiq_PSI_All/tsvFiles/ -outfile ../output/s05_output/all_LSV_list.txt
###########################################################
import re, sys, os
import argparse
from collections import defaultdict
import numpy as np
import scipy.stats as sp
def parse_majiq_tsv(filename):
ssDict = defaultdict(float)
with open(filename, "r") as fIN:
for line in fIN:
if not(line.startswith("#")):
line = line.rstrip("\n")
data = line.split("\t")
psi = data[3].split(";")
sscoord = data[14].split(";")
chrom = data[12]; sign = data[13]
for i,val in enumerate(sscoord):
ssID = "{}:{}:{}@{}".format(chrom,val,sign,data[2]) #data[2] is LSV-ID
psi[i] = round(float(psi[i]), 4)
ssDict[ssID] = psi[i]
return ssDict
if (__name__ == "__main__"):
parser = argparse.ArgumentParser(prog="")
parser.add_argument('-majiqpath', dest="majiqpath", help="Path to the output files of Majiq")
parser.add_argument('-outfile', dest="outcsv", help="Path to the output files of Majiq")
args = parser.parse_args()
tsv_path = args.majiqpath
csv_out = args.outcsv
allSS = defaultdict(list)
tsvFiles = os.listdir(tsv_path)
tsvOrder = ["TP_1", "TP_2", "TP_3", "TP_4", "TP_5", "TP_6", "TP_7", "TP_8", "TP_9", "TP_10", "TP_11", "TP_11.5", "TP_12", "TP_12.5", "TP_13.5", "TP_14", "TP_14.5", "TP_15", "TP_16", "TP_17", "TP_18", "TP_19","TP_20", "TP_21", "TP_22", "TP_23", "TP_24"]
print(sorted(tsvFiles))
for k,sampleID in enumerate(tsvOrder):
filename = "{}.psi.tsv".format(sampleID)
print(filename)
#fname = re.search(r'(.*)psi.tsv', filename)
#sampleID = fname.group(1)
#print(fname.group(1),filename)
filepath = "{}/{}".format(tsv_path,filename)
ssDict = parse_majiq_tsv(filepath)
for ssid in ssDict:
if (ssid in allSS):
allSS[ssid].append(ssDict[ssid]) #allSS dictionary has summary of all ssID with lsvid with a list of psi-values for each time-point.
else:
l = [0.001]*(k+1)
l[k] = ssDict[ssid]
allSS[ssid] = l
otherIDs = set(allSS.keys()) - set(ssDict.keys())
for oID in otherIDs:
allSS[oID].append(0.001)
varArr = list()
cvArr = list()
filteredSS = defaultdict(dict)
for sampleID in allSS:
arrPSI = allSS[sampleID]
#print(sampleID,arrPSI)
if (all(x < 0.05 for x in arrPSI)) | (all(x > 0.95 for x in arrPSI)):
next;
else:
varPSI = np.var(arrPSI)
cvPSI = sp.variation(arrPSI)
varArr.append(varPSI)
cvArr.append(cvPSI)
#print(arrPSI,varPSI)
if (varPSI >= 0.2): #Need to choose a proper cut-off
ids = sampleID.split("@")
filteredSS[ids[1]][ids[0]] = arrPSI
newFilter = defaultdict(dict)
#np.histogram(varArr)
#med = np.median(varArr)
#mean = np.mean(varArr)
#print(med, mean, min(varArr), max(varArr))
#med1 = np.median(cvArr)
#mean1 = np.mean(cvArr)
#print(med1, mean1, min(cvArr), max(cvArr))
allSSList = allSS.keys()
for lsvid in filteredSS:
#otherSS = [ x for x in allSSList if (re.search(r'(.*)@' + re.escape(lsvid), x))]
#print(otherSS)
#for oID in otherSS:
# sID = oID.split("@")
# newFilter[lsvid][sID[0]] = allSS[oID]
for ssID in filteredSS[lsvid]:
newFilter[lsvid][ssID] = filteredSS[lsvid][ssID]
fout = open(csv_out, "w")
tsvStr = ",".join(tsvOrder)
fout.write("LSV_ID,Chr_Coord,{}\n".format(tsvStr))
for lsvid in newFilter:
for ssID in newFilter[lsvid]:
arrPSI = newFilter[lsvid][ssID]
psistr = ",".join(map(str,arrPSI))
fout.write("{},{},{}\n".format(lsvid,ssID,psistr))
fout.close()
| [
"[email protected]"
] | |
e2d86aa7be06a9fab29cfa499261d2adc9a00dfe | b76615ff745c6d66803506251c3d4109faf50802 | /pyobjc-framework-Quartz/PyObjCTest/test_ciplugininterface.py | 9dd711b881eaa697be75fbe4494b6429783fdfd1 | [
"MIT"
] | permissive | danchr/pyobjc-git | 6ef17e472f54251e283a0801ce29e9eff9c20ac0 | 62b787fddeb381184043c7ff136f1c480755ab69 | refs/heads/master | 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | from PyObjCTools.TestSupport import *
from Quartz.QuartzCore import *
from Quartz import *
class TestCIPluginInterfaceHelper(NSObject):
def load_(self, h):
return 1
class TestCIPlugInInterface(TestCase):
def testMethods(self):
self.assertResultIsBOOL(TestCIPluginInterfaceHelper.load_)
def no_testProtocol(self):
p = objc.protocolNamed("CIPlugInRegistration")
self.assertIsInstancE(p, objc.formal_protocol)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
2159f026e2e91ee4f34ac0e77548ea497fcf9d65 | b05761d771bb5a85d39d370c649567c1ff3eb089 | /venv/lib/python3.10/site-packages/numpy/typing/tests/data/pass/ufunc_config.py | fd64c8eab5a129e2e7cedb231f7ad525b70f0bed | [] | no_license | JawshyJ/Coding_Practice | 88c49cab955eab04609ec1003b6b8c20f103fc06 | eb6b229d41aa49b1545af2120e6bee8e982adb41 | refs/heads/master | 2023-02-19T10:18:04.818542 | 2023-02-06T21:22:58 | 2023-02-06T21:22:58 | 247,788,631 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/6f/49/d6/2f2ab45761f898a657acd334913f8633ec7c8a0f6dbf76a5e74fd80163 | [
"[email protected]"
] | |
841f0308c5117d12505da207be5622d21060d121 | 4ab4f763fec96977dd37ae531c20e66528018287 | /examples/script.py | 5193361b099624704e1fa9b864033643fdd25e69 | [] | no_license | rienafairefr/py-bricklink | 4187de386dec8c128308fc27ca1a5f25d142f7f8 | 0e7ea6dbc02dc9a44042fc70feaf164a0c4e92aa | refs/heads/master | 2020-08-28T15:10:55.075476 | 2019-10-26T18:56:28 | 2019-10-26T18:56:28 | 217,735,256 | 0 | 0 | null | 2019-10-26T16:14:29 | 2019-10-26T16:14:29 | null | UTF-8 | Python | false | false | 164 | py | import json
import bricklink
auth = json.load(open('auth.json', 'r'))
client = bricklink.ApiClient(**auth)
print(client.catalog.getItem('MINIFIG', 'cty0859'))
| [
"[email protected]"
] | |
26f58016bf981e3c9bc9c093f5729428cb70b710 | 0a40a27bdc9ad234d2f3ec8bc68b082881065fc7 | /riko/modules/udf.py | e89eef2ba850ba8862ff29b9ea73cbb05034bc64 | [
"MIT"
] | permissive | anilktechie/riko | 5130ff054e03315e5e47ea99f68ec5099079675f | 9549a1a23153e274069f6d13de0b528488d7ba92 | refs/heads/master | 2022-11-30T21:39:20.328527 | 2020-08-14T16:47:45 | 2020-08-14T16:47:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,288 | py | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.modules.udf
~~~~~~~~~~~~~~~~
Provides functions for performing an arbitrary (user-defined) function on stream
items.
Examples:
basic usage::
>>> from riko.modules.udf import pipe
>>>
>>> items = [{'x': x} for x in range(5)]
>>> func = lambda item: {'y': item['x'] + 3}
>>> next(pipe(items, func=func))
{'y': 3}
"""
from . import operator
import pygogo as gogo
logger = gogo.Gogo(__name__, monolog=True).logger
def parser(stream, objconf, tuples, **kwargs):
""" Parses the pipe content
Args:
stream (Iter[dict]): The source. Note: this shares the `tuples`
iterator, so consuming it will consume `tuples` as well.
objconf (obj): the item independent configuration (an Objectify
instance).
tuples (Iter[(dict, obj)]): Iterable of tuples of (item, objconf)
`item` is an element in the source stream and `objconf` is the item
configuration (an Objectify instance). Note: this shares the
`stream` iterator, so consuming it will consume `stream` as well.
kwargs (dict): Keyword arguments.
Returns:
Iter(dict): The output stream
Examples:
>>> from meza.fntools import Objectify
>>> from itertools import repeat
>>>
>>> func = lambda item: {'y': item['x'] + 3}
>>> stream = ({'x': x} for x in range(5))
>>> tuples = zip(stream, repeat(None))
>>> next(parser(stream, None, tuples, func=func))
{'y': 3}
"""
return map(kwargs['func'], stream)
@operator(isasync=True)
def async_pipe(*args, **kwargs):
"""An aggregator that asynchronously returns a specified number of items
from a stream.
Args:
items (Iter[dict]): The source.
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
func (callable): User defined function to apply to each stream item.
Returns:
Deferred: twisted.internet.defer.Deferred truncated stream
Examples:
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>>
>>> def run(reactor):
... callback = lambda x: print(next(x))
... func = lambda item: {'y': item['x'] + 3}
... items = ({'x': x} for x in range(5))
... d = async_pipe(items, func=func)
... return d.addCallbacks(callback, logger.error)
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
...
{'y': 3}
"""
return parser(*args, **kwargs)
@operator()
def pipe(*args, **kwargs):
"""An operator that returns a specified number of items from a stream.
Args:
items (Iter[dict]): The source.
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
func (callable): User defined function to apply to each stream item.
Yields:
dict: an item
Examples:
>>> items = [{'x': x} for x in range(5)]
>>> func = lambda item: {'y': item['x'] + 3}
>>> next(pipe(items, func=func))
{'y': 3}
"""
return parser(*args, **kwargs)
| [
"[email protected]"
] | |
e7f453b02820dc50a5ea35812ca6076647f21f09 | f4bc9842fce9846fc94d73c4e52a2ad9cb69aa1b | /Workshop 2/2.3 - String operations.py | b39a3d24c878e139b7666568319f472f4ebb9443 | [] | no_license | Honza-m/python-training | df486a160dbc193b5d47a5d54853f4eb6c3a96df | 9c0da5bd37ab2fd8164abe7c8970b31ec6a4c083 | refs/heads/master | 2020-03-08T14:21:02.798713 | 2018-08-14T11:24:29 | 2018-08-14T11:24:29 | 128,182,822 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 844 | py | #definitions
def string_def():
x = 'This is a string'
print(x)
x = "This is also a string"
print(x)
z = """ This
is
a multiline
string - good for documentation!"""
print(z)
def special_chars():
x = "What is I want \"quotes\"?"
print(x)
x = "C:\\Users\\Jan"
print(x)
def string_methods():
""" https://docs.python.org/3.6/library/string.html """
x = " this IS a long sentence "
print(x.upper())
print(x.lower())
print(x.lstrip())
print(x.rstrip())
print(x.strip())
print(x.replace('IS','IS NOT'))
print()
x = "test"
print(x.capitalize())
def format_string(x, y):
result1 = "This is the first arguement: {}".format(x)
print(result1)
result2 = "These are your arguments: {}, {}".format(x, y)
print(result2)
string_methods()
| [
"[email protected]"
] | |
4a14d5356601d80e01b3a25d247ce1190222db4e | 4d993938125a1c54e11359e2baa498a8c22edbd7 | /venv/bin/player.py | b2717055a500daccdc379137dce67b5c515f8519 | [] | no_license | william-index/plantgod | 57e2454dd9718d918e73558f4951af9ea7ad882b | 6aa07ed2694a59659fe56fc95f2c5adf7757c498 | refs/heads/master | 2020-02-26T15:22:18.724558 | 2018-08-16T19:41:50 | 2018-08-16T19:41:50 | 70,263,435 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,211 | py | #!/Users/wanderson/Repos/evolving-plants/venv/bin/python
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
try:
from tkinter import *
except ImportError:
from Tkinter import *
from PIL import Image, ImageTk
import sys
# --------------------------------------------------------------------
# an image animation player
class UI(Label):
def __init__(self, master, im):
if isinstance(im, list):
# list of images
self.im = im[1:]
im = self.im[0]
else:
# sequence
self.im = im
if im.mode == "1":
self.image = ImageTk.BitmapImage(im, foreground="white")
else:
self.image = ImageTk.PhotoImage(im)
Label.__init__(self, master, image=self.image, bg="black", bd=0)
self.update()
try:
duration = im.info["duration"]
except KeyError:
duration = 100
self.after(duration, self.next)
def next(self):
if isinstance(self.im, list):
try:
im = self.im[0]
del self.im[0]
self.image.paste(im)
except IndexError:
return # end of list
else:
try:
im = self.im
im.seek(im.tell() + 1)
self.image.paste(im)
except EOFError:
return # end of file
try:
duration = im.info["duration"]
except KeyError:
duration = 100
self.after(duration, self.next)
self.update_idletasks()
# --------------------------------------------------------------------
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python player.py imagefile(s)")
sys.exit(1)
filename = sys.argv[1]
root = Tk()
root.title(filename)
if len(sys.argv) > 2:
# list of images
print("loading...")
im = []
for filename in sys.argv[1:]:
im.append(Image.open(filename))
else:
# sequence
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
| [
"[email protected]"
] | |
4e341fd28736e9eea2da274a054fc88727cef941 | 6b7e87cc96948178a9e6b7c60cba81e210607ba4 | /com.stonedog/src/com/stonedog/study/metaclass/metaclasstest.py | 22bbc3235654621a4e6d5042d807ef87b66d95cf | [] | no_license | stonedog/stonedog-learn-to-python | 9025161b70ba47c4b3b7aa60d52b28ceeae1c8c5 | a9f90f2285a19b01f91d1e188ae1fbac0e1c43a1 | refs/heads/master | 2016-09-06T07:07:56.616372 | 2013-02-17T09:15:36 | 2013-02-17T09:15:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,025 | py | '''
Created on 2013-2-12
@author: Administrator
'''
class Metaclass(type):
def __new__(typeclass,classname,superclass,classdict):
print(type(typeclass))
return type.__new__(typeclass,classname,superclass,classdict)
class Test():
__metaclass__=Metaclass
def __init__(self):
pass
def aha(self):
print ('hello world')
a=Test()
a.aha()
def TraceDecorator(func):
def wrapper(*args,**kwargs):
print ('%s is Called' %(func.__name__))
return func(*args,**kwargs)
return wrapper
def ConfigTraceDecorator(name,trace):
def realDecorator(func):
def wrapper(*args,**kwargs):
if trace==True:
print ('%s.%s is Called' %(name,func.__name__))
return func(*args,**kwargs)
return wrapper
return realDecorator
from types import FunctionType
class TraceMetaClass(type):
def __new__(typeclass,classname,superclassname,classdict):
for key,value in classdict.items():
if type(value) is FunctionType:
classdict[key]=TraceDecorator(value)
return type.__new__(typeclass,classname,superclassname,classdict)
class ConfigTraceMetaClass(type):
def __new__(typeclass,classname,superclass,classdict):
if __debug__:
for key,value in classdict.items():
if type(value) is FunctionType:
classdict[key]=ConfigTraceDecorator(classname, True)(value)
return type.__new__(typeclass,classname,superclass,classdict)
class Person(object):
__metaclass__=TraceMetaClass
def __init__(self,name):
self.__name=name
def SayHello(self):
print self.__name
class Student(object):
__metaclass__=ConfigTraceMetaClass
def __init__(self,name):
self.__name=name
def SayHello(self):
print 'hello world2'
p=Person('zhang')
p.SayHello()
s=Student('jiao')
s.SayHello() | [
"[email protected]"
] | |
4df520463024b038d97879f2ec416f27e784bbb7 | 1967af25a4e18b053682b2dbe8116f4bd38a0479 | /Basic Scripts/Animal_Quiz.py | 005ce2aae8a062947d70fc972063c90e5e2082b8 | [
"Unlicense"
] | permissive | shubhi13/Python_Scripts | c2a82a550fa26cc8a578b1e1263689c29b87facc | 9bd8fd402c61f24355a4d6b0d0815ad913becac3 | refs/heads/master | 2023-08-22T00:45:21.423803 | 2021-10-29T06:54:16 | 2021-10-29T06:54:16 | 422,703,282 | 0 | 0 | Unlicense | 2021-10-29T20:40:02 | 2021-10-29T20:20:32 | null | UTF-8 | Python | false | false | 818 | py |
def check_guess(guess, answer):
global score
still_guessing = True
attempt = 0
while still_guessing and attempt < 3:
if guess.lower() == answer.lower():
print("Correct Answer")
score = score + 1
still_guessing = False
else:
if attempt < 2:
guess = input("Sorry Wrong Answer, try again")
attempt = attempt + 1
if attempt == 3:
print("The Correct answer is ",answer )
score = 0
print("Guess the Animal")
guess1 = input("Which bear lives at the North Pole? ")
check_guess(guess1, "polar bear")
guess2 = input("Which is the fastest land animal? ")
check_guess(guess2, "Cheetah")
guess3 = input("Which is the larget animal? ")
check_guess(guess3, "Blue Whale")
print("Your Score is "+ str(score))
| [
"[email protected]"
] | |
a4f9ea4b9c476bded45c0146c0fb8c9fc24c872f | d9a22d4dcdfc0c28176c0e8afd784b30d275597e | /lib/geometry/rotations.py | 6bf545f8075e9092d440c71f9612e0e729c5803c | [] | no_license | jlec/relax | fda1b3ff77be0afc21c2e6cc52348ae7635cd07a | c317326ddeacd1a1c608128769676899daeae531 | refs/heads/master | 2016-09-08T00:27:57.256090 | 2015-02-10T12:24:55 | 2015-02-10T12:24:55 | 30,596,131 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 72,517 | py | ###############################################################################
# #
# Copyright (C) 2004-2014 Edward d'Auvergne #
# #
# This file is part of the program relax (http://www.nmr-relax.com). #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
# Python module imports.
from copy import deepcopy
from math import acos, atan2, cos, pi, sin, sqrt
from numpy import array, cross, dot, float64, hypot, transpose, zeros
from numpy.linalg import norm
from random import gauss
# relax module imports.
from lib.geometry.angles import wrap_angles
from lib.geometry.vectors import random_unit_vector
# Global variables.
EULER_NEXT = [1, 2, 0, 1] # Used in the matrix_indices() function.
EULER_TRANS_TABLE = {
'xzx': [0, 1, 1],
'yxy': [1, 1, 1],
'zyz': [2, 1, 1],
'xzy': [0, 1, 0],
'yxz': [1, 1, 0],
'zyx': [2, 1, 0],
'xyx': [0, 0, 1],
'yzy': [1, 0, 1],
'zxz': [2, 0, 1],
'xyz': [0, 0, 0],
'yzx': [1, 0, 0],
'zxy': [2, 0, 0]
}
EULER_EPSILON = 1e-5
def axis_angle_to_euler_xyx(axis, angle):
"""Convert the axis-angle notation to xyx Euler angles.
This first generates a rotation matrix via axis_angle_to_R() and then used this together with R_to_euler_xyx() to obtain the Euler angles.
@param axis: The 3D rotation axis.
@type axis: numpy array, len 3
@param angle: The rotation angle.
@type angle: float
@return: The alpha, beta, and gamma Euler angles in the xyx convention.
@rtype: float, float, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
axis_angle_to_R(axis, angle, R)
# Return the Euler angles.
return R_to_euler_xyx(R)
def axis_angle_to_euler_xyz(axis, angle):
"""Convert the axis-angle notation to xyz Euler angles.
This first generates a rotation matrix via axis_angle_to_R() and then used this together with R_to_euler_xyz() to obtain the Euler angles.
@param axis: The 3D rotation axis.
@type axis: numpy array, len 3
@param angle: The rotation angle.
@type angle: float
@return: The alpha, beta, and gamma Euler angles in the xyz convention.
@rtype: float, float, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
axis_angle_to_R(axis, angle, R)
# Return the Euler angles.
return R_to_euler_xyz(R)
def axis_angle_to_euler_xzx(axis, angle):
"""Convert the axis-angle notation to xzx Euler angles.
This first generates a rotation matrix via axis_angle_to_R() and then used this together with R_to_euler_xzx() to obtain the Euler angles.
@param axis: The 3D rotation axis.
@type axis: numpy array, len 3
@param angle: The rotation angle.
@type angle: float
@return: The alpha, beta, and gamma Euler angles in the xzx convention.
@rtype: float, float, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
axis_angle_to_R(axis, angle, R)
# Return the Euler angles.
return R_to_euler_xzx(R)
def axis_angle_to_euler_xzy(axis, angle):
"""Convert the axis-angle notation to xzy Euler angles.
This first generates a rotation matrix via axis_angle_to_R() and then used this together with R_to_euler_xzy() to obtain the Euler angles.
@param axis: The 3D rotation axis.
@type axis: numpy array, len 3
@param angle: The rotation angle.
@type angle: float
@return: The alpha, beta, and gamma Euler angles in the xzy convention.
@rtype: float, float, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
axis_angle_to_R(axis, angle, R)
# Return the Euler angles.
return R_to_euler_xzy(R)
def axis_angle_to_euler_yxy(axis, angle):
"""Convert the axis-angle notation to yxy Euler angles.
This first generates a rotation matrix via axis_angle_to_R() and then used this together with R_to_euler_yxy() to obtain the Euler angles.
@param axis: The 3D rotation axis.
@type axis: numpy array, len 3
@param angle: The rotation angle.
@type angle: float
@return: The alpha, beta, and gamma Euler angles in the yxy convention.
@rtype: float, float, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
axis_angle_to_R(axis, angle, R)
# Return the Euler angles.
return R_to_euler_yxy(R)
def axis_angle_to_euler_yxz(axis, angle):
"""Convert the axis-angle notation to yxz Euler angles.
This first generates a rotation matrix via axis_angle_to_R() and then used this together with R_to_euler_yxz() to obtain the Euler angles.
@param axis: The 3D rotation axis.
@type axis: numpy array, len 3
@param angle: The rotation angle.
@type angle: float
@return: The alpha, beta, and gamma Euler angles in the yxz convention.
@rtype: float, float, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
axis_angle_to_R(axis, angle, R)
# Return the Euler angles.
return R_to_euler_yxz(R)
def axis_angle_to_euler_yzx(axis, angle):
"""Convert the axis-angle notation to yzx Euler angles.
This first generates a rotation matrix via axis_angle_to_R() and then used this together with R_to_euler_yzx() to obtain the Euler angles.
@param axis: The 3D rotation axis.
@type axis: numpy array, len 3
@param angle: The rotation angle.
@type angle: float
@return: The alpha, beta, and gamma Euler angles in the yzx convention.
@rtype: float, float, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
axis_angle_to_R(axis, angle, R)
# Return the Euler angles.
return R_to_euler_yzx(R)
def axis_angle_to_euler_yzy(axis, angle):
"""Convert the axis-angle notation to yzy Euler angles.
This first generates a rotation matrix via axis_angle_to_R() and then used this together with R_to_euler_yzy() to obtain the Euler angles.
@param axis: The 3D rotation axis.
@type axis: numpy array, len 3
@param angle: The rotation angle.
@type angle: float
@return: The alpha, beta, and gamma Euler angles in the yzy convention.
@rtype: float, float, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
axis_angle_to_R(axis, angle, R)
# Return the Euler angles.
return R_to_euler_yzy(R)
def axis_angle_to_euler_zxy(axis, angle):
"""Convert the axis-angle notation to zxy Euler angles.
This first generates a rotation matrix via axis_angle_to_R() and then used this together with R_to_euler_zxy() to obtain the Euler angles.
@param axis: The 3D rotation axis.
@type axis: numpy array, len 3
@param angle: The rotation angle.
@type angle: float
@return: The alpha, beta, and gamma Euler angles in the zxy convention.
@rtype: float, float, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
axis_angle_to_R(axis, angle, R)
# Return the Euler angles.
return R_to_euler_zxy(R)
def axis_angle_to_euler_zxz(axis, angle):
"""Convert the axis-angle notation to zxz Euler angles.
This first generates a rotation matrix via axis_angle_to_R() and then used this together with R_to_euler_zxz() to obtain the Euler angles.
@param axis: The 3D rotation axis.
@type axis: numpy array, len 3
@param angle: The rotation angle.
@type angle: float
@return: The alpha, beta, and gamma Euler angles in the zxz convention.
@rtype: float, float, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
axis_angle_to_R(axis, angle, R)
# Return the Euler angles.
return R_to_euler_zxz(R)
def axis_angle_to_euler_zyx(axis, angle):
"""Convert the axis-angle notation to zyx Euler angles.
This first generates a rotation matrix via axis_angle_to_R() and then used this together with R_to_euler_zyx() to obtain the Euler angles.
@param axis: The 3D rotation axis.
@type axis: numpy array, len 3
@param angle: The rotation angle.
@type angle: float
@return: The alpha, beta, and gamma Euler angles in the zyx convention.
@rtype: float, float, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
axis_angle_to_R(axis, angle, R)
# Return the Euler angles.
return R_to_euler_zyx(R)
def axis_angle_to_euler_zyz(axis, angle):
"""Convert the axis-angle notation to zyz Euler angles.
This first generates a rotation matrix via axis_angle_to_R() and then used this together with R_to_euler_zyz() to obtain the Euler angles.
@param axis: The 3D rotation axis.
@type axis: numpy array, len 3
@param angle: The rotation angle.
@type angle: float
@return: The alpha, beta, and gamma Euler angles in the zyz convention.
@rtype: float, float, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
axis_angle_to_R(axis, angle, R)
# Return the Euler angles.
return R_to_euler_zyz(R)
def axis_angle_to_R(axis, angle, R):
"""Generate the rotation matrix from the axis-angle notation.
Conversion equations
====================
From Wikipedia (U{http://en.wikipedia.org/wiki/Rotation_matrix}), the conversion is given by::
c = cos(angle); s = sin(angle); C = 1-c
xs = x*s; ys = y*s; zs = z*s
xC = x*C; yC = y*C; zC = z*C
xyC = x*yC; yzC = y*zC; zxC = z*xC
[ x*xC+c xyC-zs zxC+ys ]
[ xyC+zs y*yC+c yzC-xs ]
[ zxC-ys yzC+xs z*zC+c ]
@param axis: The 3D rotation axis.
@type axis: numpy array, len 3
@param angle: The rotation angle.
@type angle: float
@param R: The 3x3 rotation matrix to update.
@type R: 3x3 numpy array
"""
# Trig factors.
ca = cos(angle)
sa = sin(angle)
C = 1 - ca
# Depack the axis.
x, y, z = axis
# Multiplications (to remove duplicate calculations).
xs = x*sa
ys = y*sa
zs = z*sa
xC = x*C
yC = y*C
zC = z*C
xyC = x*yC
yzC = y*zC
zxC = z*xC
# Update the rotation matrix.
R[0, 0] = x*xC + ca
R[0, 1] = xyC - zs
R[0, 2] = zxC + ys
R[1, 0] = xyC + zs
R[1, 1] = y*yC + ca
R[1, 2] = yzC - xs
R[2, 0] = zxC - ys
R[2, 1] = yzC + xs
R[2, 2] = z*zC + ca
def axis_angle_to_quaternion(axis, angle, quat, norm_flag=True):
"""Generate the quaternion from the axis-angle notation.
Conversion equations
====================
From Wolfram MathWorld (U{http://mathworld.wolfram.com/Quaternion.html}), the conversion is given by::
q = (cos(angle/2), n * sin(angle/2)),
where q is the quaternion and n is the unit vector representing the rotation axis.
@param axis: The 3D rotation axis.
@type axis: numpy array, len 3
@param angle: The rotation angle.
@type angle: float
@param quat: The quaternion structure.
@type quat: numpy 4D, rank-1 array
@keyword norm_flag: A flag which if True forces the axis to be converted to a unit vector.
@type norm_flag: bool
"""
# Convert to unit vector.
if norm_flag:
axis = axis / norm(axis)
# The scalar component of q.
quat[0] = cos(angle/2)
# The vector component.
quat[1:] = axis * sin(angle/2)
def copysign(x, y):
"""Return x with the sign of y.
This is defined as::
copysign(x, y) = abs(x) / abs(y) * y
@param x: The value.
@type x: float
@param y: The value.
@type y: float
@return: x with the sign of y.
@rtype: float
"""
# Return the value.
return abs(x) / abs(y) * y
def euler_to_axis_angle_xyx(alpha, beta, gamma):
"""Convert the xyx Euler angles to axis-angle notation.
This function first generates a rotation matrix via euler_*_to_R() and then uses R_to_axis_angle() to convert to the axis and angle notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The 3D rotation axis and angle.
@rtype: numpy 3D rank-1 array, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_to_R_xyx(alpha, beta, gamma, R)
# Return the axis and angle.
return R_to_axis_angle(R)
def euler_to_axis_angle_xyz(alpha, beta, gamma):
"""Convert the xyz Euler angles to axis-angle notation.
This function first generates a rotation matrix via euler_*_to_R() and then uses R_to_axis_angle() to convert to the axis and angle notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The 3D rotation axis and angle.
@rtype: numpy 3D rank-1 array, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_to_R_xyz(alpha, beta, gamma, R)
# Return the axis and angle.
return R_to_axis_angle(R)
def euler_to_axis_angle_xzx(alpha, beta, gamma):
"""Convert the xzx Euler angles to axis-angle notation.
This function first generates a rotation matrix via euler_*_to_R() and then uses R_to_axis_angle() to convert to the axis and angle notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The 3D rotation axis and angle.
@rtype: numpy 3D rank-1 array, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_to_R_xzx(alpha, beta, gamma, R)
# Return the axis and angle.
return R_to_axis_angle(R)
def euler_to_axis_angle_xzy(alpha, beta, gamma):
"""Convert the xzy Euler angles to axis-angle notation.
This function first generates a rotation matrix via euler_*_to_R() and then uses R_to_axis_angle() to convert to the axis and angle notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The 3D rotation axis and angle.
@rtype: numpy 3D rank-1 array, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_to_R_xzy(alpha, beta, gamma, R)
# Return the axis and angle.
return R_to_axis_angle(R)
def euler_to_axis_angle_yxy(alpha, beta, gamma):
"""Convert the yxy Euler angles to axis-angle notation.
This function first generates a rotation matrix via euler_*_to_R() and then uses R_to_axis_angle() to convert to the axis and angle notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The 3D rotation axis and angle.
@rtype: numpy 3D rank-1 array, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_to_R_yxy(alpha, beta, gamma, R)
# Return the axis and angle.
return R_to_axis_angle(R)
def euler_to_axis_angle_yxz(alpha, beta, gamma):
"""Convert the yxz Euler angles to axis-angle notation.
This function first generates a rotation matrix via euler_*_to_R() and then uses R_to_axis_angle() to convert to the axis and angle notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The 3D rotation axis and angle.
@rtype: numpy 3D rank-1 array, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_to_R_yxz(alpha, beta, gamma, R)
# Return the axis and angle.
return R_to_axis_angle(R)
def euler_to_axis_angle_yzx(alpha, beta, gamma):
"""Convert the yzx Euler angles to axis-angle notation.
This function first generates a rotation matrix via euler_*_to_R() and then uses R_to_axis_angle() to convert to the axis and angle notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The 3D rotation axis and angle.
@rtype: numpy 3D rank-1 array, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_to_R_yzx(alpha, beta, gamma, R)
# Return the axis and angle.
return R_to_axis_angle(R)
def euler_to_axis_angle_yzy(alpha, beta, gamma):
"""Convert the yzy Euler angles to axis-angle notation.
This function first generates a rotation matrix via euler_*_to_R() and then uses R_to_axis_angle() to convert to the axis and angle notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The 3D rotation axis and angle.
@rtype: numpy 3D rank-1 array, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_to_R_yzy(alpha, beta, gamma, R)
# Return the axis and angle.
return R_to_axis_angle(R)
def euler_to_axis_angle_zxy(alpha, beta, gamma):
"""Convert the zxy Euler angles to axis-angle notation.
This function first generates a rotation matrix via euler_*_to_R() and then uses R_to_axis_angle() to convert to the axis and angle notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The 3D rotation axis and angle.
@rtype: numpy 3D rank-1 array, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_to_R_zxy(alpha, beta, gamma, R)
# Return the axis and angle.
return R_to_axis_angle(R)
def euler_to_axis_angle_zxz(alpha, beta, gamma):
"""Convert the zxz Euler angles to axis-angle notation.
This function first generates a rotation matrix via euler_*_to_R() and then uses R_to_axis_angle() to convert to the axis and angle notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The 3D rotation axis and angle.
@rtype: numpy 3D rank-1 array, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_to_R_zxz(alpha, beta, gamma, R)
# Return the axis and angle.
return R_to_axis_angle(R)
def euler_to_axis_angle_zyx(alpha, beta, gamma):
"""Convert the zyx Euler angles to axis-angle notation.
This function first generates a rotation matrix via euler_*_to_R() and then uses R_to_axis_angle() to convert to the axis and angle notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The 3D rotation axis and angle.
@rtype: numpy 3D rank-1 array, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_to_R_zyx(alpha, beta, gamma, R)
# Return the axis and angle.
return R_to_axis_angle(R)
def euler_to_axis_angle_zyz(alpha, beta, gamma):
"""Convert the zyz Euler angles to axis-angle notation.
This function first generates a rotation matrix via euler_*_to_R() and then uses R_to_axis_angle() to convert to the axis and angle notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The 3D rotation axis and angle.
@rtype: numpy 3D rank-1 array, float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_to_R_zyz(alpha, beta, gamma, R)
# Return the axis and angle.
return R_to_axis_angle(R)
def euler_to_R_xyx(alpha, beta, gamma, R):
"""Generate the x-y-x Euler angle convention rotation matrix.
Rotation matrix
===============
The rotation matrix is defined as the vector of unit vectors::
R = [mux, muy, muz].
According to wikipedia (U{http://en.wikipedia.org/wiki/Euler_angles#Table_of_matrices}), the rotation matrix for the xyx convention is::
| cb sa*sb ca*sb |
R = | sb*sg ca*cg - sa*cb*sg -sa*cg - ca*cb*sg |,
| -sb*cg ca*sg + sa*cb*cg -sa*sg + ca*cb*cg |
where::
ca = cos(alpha),
sa = sin(alpha),
cb = cos(beta),
sb = sin(beta),
cg = cos(gamma),
sg = sin(gamma).
@param alpha: The alpha Euler angle in rad for the x-rotation.
@type alpha: float
@param beta: The beta Euler angle in rad for the y-rotation.
@type beta: float
@param gamma: The gamma Euler angle in rad for the second x-rotation.
@type gamma: float
@param R: The 3x3 rotation matrix to update.
@type R: 3x3 numpy array
"""
# Trig.
sin_a = sin(alpha)
cos_a = cos(alpha)
sin_b = sin(beta)
cos_b = cos(beta)
sin_g = sin(gamma)
cos_g = cos(gamma)
# The unit mux vector component of the rotation matrix.
R[0, 0] = cos_b
R[1, 0] = sin_b * sin_g
R[2, 0] = -sin_b * cos_g
# The unit muy vector component of the rotation matrix.
R[0, 1] = sin_a * sin_b
R[1, 1] = cos_a * cos_g - sin_a * cos_b * sin_g
R[2, 1] = cos_a * sin_g + sin_a * cos_b * cos_g
# The unit muz vector component of the rotation matrix.
R[0, 2] = cos_a * sin_b
R[1, 2] = -sin_a * cos_g - cos_a * cos_b * sin_g
R[2, 2] = -sin_a * sin_g + cos_a * cos_b * cos_g
def euler_to_R_xyz(alpha, beta, gamma, R):
"""Generate the x-y-z Euler angle convention rotation matrix.
Rotation matrix
===============
The rotation matrix is defined as the vector of unit vectors::
R = [mux, muy, muz].
According to wikipedia (U{http://en.wikipedia.org/wiki/Euler_angles#Table_of_matrices}), the rotation matrix for the xyz convention is::
| cb*cg -ca*sg + sa*sb*cg sa*sg + ca*sb*cg |
R = | cb*sg ca*cg + sa*sb*sg -sa*cg + ca*sb*sg |,
| -sb sa*cb ca*cb |
where::
ca = cos(alpha),
sa = sin(alpha),
cb = cos(beta),
sb = sin(beta),
cg = cos(gamma),
sg = sin(gamma).
@param alpha: The alpha Euler angle in rad for the x-rotation.
@type alpha: float
@param beta: The beta Euler angle in rad for the y-rotation.
@type beta: float
@param gamma: The gamma Euler angle in rad for the z-rotation.
@type gamma: float
@param R: The 3x3 rotation matrix to update.
@type R: 3x3 numpy array
"""
# Trig.
sin_a = sin(alpha)
cos_a = cos(alpha)
sin_b = sin(beta)
cos_b = cos(beta)
sin_g = sin(gamma)
cos_g = cos(gamma)
# The unit mux vector component of the rotation matrix.
R[0, 0] = cos_b * cos_g
R[1, 0] = cos_b * sin_g
R[2, 0] = -sin_b
# The unit muy vector component of the rotation matrix.
R[0, 1] = -cos_a * sin_g + sin_a * sin_b * cos_g
R[1, 1] = cos_a * cos_g + sin_a * sin_b * sin_g
R[2, 1] = sin_a * cos_b
# The unit muz vector component of the rotation matrix.
R[0, 2] = sin_a * sin_g + cos_a * sin_b * cos_g
R[1, 2] = -sin_a * cos_g + cos_a * sin_b * sin_g
R[2, 2] = cos_a * cos_b
def euler_to_R_xzx(alpha, beta, gamma, R):
"""Generate the x-z-x Euler angle convention rotation matrix.
Rotation matrix
===============
The rotation matrix is defined as the vector of unit vectors::
R = [mux, muy, muz].
According to wikipedia (U{http://en.wikipedia.org/wiki/Euler_angles#Table_of_matrices}), the rotation matrix for the xzx convention is::
| cb -ca*sb sa*sb |
R = | sb*cg -sa*sg + ca*cb*cg -ca*sg - sa*cb*cg |,
| sb*sg sa*cg + ca*cb*sg ca*cg - sa*cb*sg |
where::
ca = cos(alpha),
sa = sin(alpha),
cb = cos(beta),
sb = sin(beta),
cg = cos(gamma),
sg = sin(gamma).
@param alpha: The alpha Euler angle in rad for the x-rotation.
@type alpha: float
@param beta: The beta Euler angle in rad for the z-rotation.
@type beta: float
@param gamma: The gamma Euler angle in rad for the second x-rotation.
@type gamma: float
@param R: The 3x3 rotation matrix to update.
@type R: 3x3 numpy array
"""
# Trig.
sin_a = sin(alpha)
cos_a = cos(alpha)
sin_b = sin(beta)
cos_b = cos(beta)
sin_g = sin(gamma)
cos_g = cos(gamma)
# The unit mux vector component of the rotation matrix.
R[0, 0] = cos_b
R[1, 0] = sin_b * cos_g
R[2, 0] = sin_b * sin_g
# The unit muy vector component of the rotation matrix.
R[0, 1] = -cos_a * sin_b
R[1, 1] = -sin_a * sin_g + cos_a * cos_b * cos_g
R[2, 1] = sin_a * cos_g + cos_a * cos_b * sin_g
# The unit muz vector component of the rotation matrix.
R[0, 2] = sin_a * sin_b
R[1, 2] = -cos_a * sin_g - sin_a * cos_b * cos_g
R[2, 2] = cos_a * cos_g - sin_a * cos_b * sin_g
def euler_to_R_xzy(alpha, beta, gamma, R):
"""Generate the x-z-y Euler angle convention rotation matrix.
Rotation matrix
===============
The rotation matrix is defined as the vector of unit vectors::
R = [mux, muy, muz].
According to wikipedia (U{http://en.wikipedia.org/wiki/Euler_angles#Table_of_matrices}), the rotation matrix for the xzy convention is::
| cb*cg sa*sg - ca*sb*cg ca*sg + sa*sb*cg |
R = | sb ca*cb -sa*cb |,
| -cb*sg sa*cg + ca*sb*sg ca*cg - sa*sb*sg |
where::
ca = cos(alpha),
sa = sin(alpha),
cb = cos(beta),
sb = sin(beta),
cg = cos(gamma),
sg = sin(gamma).
@param alpha: The alpha Euler angle in rad for the x-rotation.
@type alpha: float
@param beta: The beta Euler angle in rad for the z-rotation.
@type beta: float
@param gamma: The gamma Euler angle in rad for the y-rotation.
@type gamma: float
@param R: The 3x3 rotation matrix to update.
@type R: 3x3 numpy array
"""
# Trig.
sin_a = sin(alpha)
cos_a = cos(alpha)
sin_b = sin(beta)
cos_b = cos(beta)
sin_g = sin(gamma)
cos_g = cos(gamma)
# The unit mux vector component of the rotation matrix.
R[0, 0] = cos_b * cos_g
R[1, 0] = sin_b
R[2, 0] = -cos_b * sin_g
# The unit muy vector component of the rotation matrix.
R[0, 1] = sin_a * sin_g - cos_a * sin_b * cos_g
R[1, 1] = cos_a * cos_b
R[2, 1] = sin_a * cos_g + cos_a * sin_b * sin_g
# The unit muz vector component of the rotation matrix.
R[0, 2] = cos_a * sin_g + sin_a * sin_b * cos_g
R[1, 2] = -sin_a * cos_b
R[2, 2] = cos_a * cos_g - sin_a * sin_b * sin_g
def euler_to_R_yxy(alpha, beta, gamma, R):
"""Generate the y-x-y Euler angle convention rotation matrix.
Rotation matrix
===============
The rotation matrix is defined as the vector of unit vectors::
R = [mux, muy, muz].
According to wikipedia (U{http://en.wikipedia.org/wiki/Euler_angles#Table_of_matrices}), the rotation matrix for the yxy convention is::
| ca*cg - sa*cb*sg sb*sg sa*cg + ca*cb*sg |
R = | sa*sb cb -ca*sb |,
| -ca*sg - sa*cb*cg sb*cg -sa*sg + ca*cb*cg |
where::
ca = cos(alpha),
sa = sin(alpha),
cb = cos(beta),
sb = sin(beta),
cg = cos(gamma),
sg = sin(gamma).
@param alpha: The alpha Euler angle in rad for the y-rotation.
@type alpha: float
@param beta: The beta Euler angle in rad for the x-rotation.
@type beta: float
@param gamma: The gamma Euler angle in rad for the second y-rotation.
@type gamma: float
@param R: The 3x3 rotation matrix to update.
@type R: 3x3 numpy array
"""
# Trig.
sin_a = sin(alpha)
cos_a = cos(alpha)
sin_b = sin(beta)
cos_b = cos(beta)
sin_g = sin(gamma)
cos_g = cos(gamma)
# The unit mux vector component of the rotation matrix.
R[0, 0] = cos_a * cos_g - sin_a * cos_b * sin_g
R[1, 0] = sin_a * sin_b
R[2, 0] = -cos_a * sin_g - sin_a * cos_b * cos_g
# The unit muy vector component of the rotation matrix.
R[0, 1] = sin_b * sin_g
R[1, 1] = cos_b
R[2, 1] = sin_b * cos_g
# The unit muz vector component of the rotation matrix.
R[0, 2] = sin_a * cos_g + cos_a * cos_b * sin_g
R[1, 2] = -cos_a * sin_b
R[2, 2] = -sin_a * sin_g + cos_a * cos_b * cos_g
def euler_to_R_yxz(alpha, beta, gamma, R):
"""Generate the y-x-z Euler angle convention rotation matrix.
Rotation matrix
===============
The rotation matrix is defined as the vector of unit vectors::
R = [mux, muy, muz].
According to wikipedia (U{http://en.wikipedia.org/wiki/Euler_angles#Table_of_matrices}), the rotation matrix for the yxz convention is::
| ca*cg - sa*sb*sg -cb*sg sa*cg + ca*sb*sg |
R = | ca*sg + sa*sb*cg cb*cg sa*sg - ca*sb*cg |,
| -sa*cb sb ca*cb |
where::
ca = cos(alpha),
sa = sin(alpha),
cb = cos(beta),
sb = sin(beta),
cg = cos(gamma),
sg = sin(gamma).
@param alpha: The alpha Euler angle in rad for the y-rotation.
@type alpha: float
@param beta: The beta Euler angle in rad for the x-rotation.
@type beta: float
@param gamma: The gamma Euler angle in rad for the z-rotation.
@type gamma: float
@param R: The 3x3 rotation matrix to update.
@type R: 3x3 numpy array
"""
# Trig.
sin_a = sin(alpha)
cos_a = cos(alpha)
sin_b = sin(beta)
cos_b = cos(beta)
sin_g = sin(gamma)
cos_g = cos(gamma)
# The unit mux vector component of the rotation matrix.
R[0, 0] = cos_a * cos_g - sin_a * sin_b * sin_g
R[1, 0] = cos_a * sin_g + sin_a * sin_b * cos_g
R[2, 0] = -sin_a * cos_b
# The unit muy vector component of the rotation matrix.
R[0, 1] = -cos_b * sin_g
R[1, 1] = cos_b * cos_g
R[2, 1] = sin_b
# The unit muz vector component of the rotation matrix.
R[0, 2] = sin_a * cos_g + cos_a * sin_b * sin_g
R[1, 2] = sin_a * sin_g - cos_a * sin_b * cos_g
R[2, 2] = cos_a * cos_b
def euler_to_R_yzx(alpha, beta, gamma, R):
"""Generate the y-z-x Euler angle convention rotation matrix.
Rotation matrix
===============
The rotation matrix is defined as the vector of unit vectors::
R = [mux, muy, muz].
According to wikipedia (U{http://en.wikipedia.org/wiki/Euler_angles#Table_of_matrices}), the rotation matrix for the yzx convention is::
| ca*cb -sb sa*cb |
R = | sa*sg + ca*sb*cg cb*cg -ca*sg + sa*sb*cg |,
| -sa*cg + ca*sb*sg cb*sg ca*cg + sa*sb*sg |
where::
ca = cos(alpha),
sa = sin(alpha),
cb = cos(beta),
sb = sin(beta),
cg = cos(gamma),
sg = sin(gamma).
@param alpha: The alpha Euler angle in rad for the y-rotation.
@type alpha: float
@param beta: The beta Euler angle in rad for the z-rotation.
@type beta: float
@param gamma: The gamma Euler angle in rad for the x-rotation.
@type gamma: float
@param R: The 3x3 rotation matrix to update.
@type R: 3x3 numpy array
"""
# Trig.
sin_a = sin(alpha)
cos_a = cos(alpha)
sin_b = sin(beta)
cos_b = cos(beta)
sin_g = sin(gamma)
cos_g = cos(gamma)
# The unit mux vector component of the rotation matrix.
R[0, 0] = cos_a * cos_b
R[1, 0] = sin_a * sin_g + cos_a * sin_b * cos_g
R[2, 0] = -sin_a * cos_g + cos_a * sin_b * sin_g
# The unit muy vector component of the rotation matrix.
R[0, 1] = -sin_b
R[1, 1] = cos_b * cos_g
R[2, 1] = cos_b * sin_g
# The unit muz vector component of the rotation matrix.
R[0, 2] = sin_a * cos_b
R[1, 2] = -cos_a * sin_g + sin_a * sin_b * cos_g
R[2, 2] = cos_a * cos_g + sin_a * sin_b * sin_g
def euler_to_R_yzy(alpha, beta, gamma, R):
"""Generate the y-z-y Euler angle convention rotation matrix.
Rotation matrix
===============
The rotation matrix is defined as the vector of unit vectors::
R = [mux, muy, muz].
According to wikipedia (U{http://en.wikipedia.org/wiki/Euler_angles#Table_of_matrices}), the rotation matrix for the yzy convention is::
| -sa*sg + ca*cb*cg -sb*cg ca*sg + sa*cb*cg |
R = | ca*sb cb sa*sb |,
| -sa*cg - ca*cb*sg sb*sg ca*cg - sa*cb*sg |
where::
ca = cos(alpha),
sa = sin(alpha),
cb = cos(beta),
sb = sin(beta),
cg = cos(gamma),
sg = sin(gamma).
@param alpha: The alpha Euler angle in rad for the y-rotation.
@type alpha: float
@param beta: The beta Euler angle in rad for the z-rotation.
@type beta: float
@param gamma: The gamma Euler angle in rad for the second y-rotation.
@type gamma: float
@param R: The 3x3 rotation matrix to update.
@type R: 3x3 numpy array
"""
# Trig.
sin_a = sin(alpha)
cos_a = cos(alpha)
sin_b = sin(beta)
cos_b = cos(beta)
sin_g = sin(gamma)
cos_g = cos(gamma)
# The unit mux vector component of the rotation matrix.
R[0, 0] = -sin_a * sin_g + cos_a * cos_b * cos_g
R[1, 0] = cos_a * sin_b
R[2, 0] = -sin_a * cos_g - cos_a * cos_b * sin_g
# The unit muy vector component of the rotation matrix.
R[0, 1] = -sin_b * cos_g
R[1, 1] = cos_b
R[2, 1] = sin_b * sin_g
# The unit muz vector component of the rotation matrix.
R[0, 2] = cos_a * sin_g + sin_a * cos_b * cos_g
R[1, 2] = sin_a * sin_b
R[2, 2] = cos_a * cos_g - sin_a * cos_b * sin_g
def euler_to_R_zxy(alpha, beta, gamma, R):
"""Generate the z-x-y Euler angle convention rotation matrix.
Rotation matrix
===============
The rotation matrix is defined as the vector of unit vectors::
R = [mux, muy, muz].
According to wikipedia (U{http://en.wikipedia.org/wiki/Euler_angles#Table_of_matrices}), the rotation matrix for the zxy convention is::
| ca*cg + sa*sb*sg -sa*cg + ca*sb*sg cb*sg |
R = | sa*cb ca*cb -sb |,
| -ca*sg + sa*sb*cg sa*sg + ca*sb*cg cb*cg |
where::
ca = cos(alpha),
sa = sin(alpha),
cb = cos(beta),
sb = sin(beta),
cg = cos(gamma),
sg = sin(gamma).
@param alpha: The alpha Euler angle in rad for the z-rotation.
@type alpha: float
@param beta: The beta Euler angle in rad for the x-rotation.
@type beta: float
@param gamma: The gamma Euler angle in rad for the y-rotation.
@type gamma: float
@param R: The 3x3 rotation matrix to update.
@type R: 3x3 numpy array
"""
# Trig.
sin_a = sin(alpha)
cos_a = cos(alpha)
sin_b = sin(beta)
cos_b = cos(beta)
sin_g = sin(gamma)
cos_g = cos(gamma)
# The unit mux vector component of the rotation matrix.
R[0, 0] = cos_a * cos_g + sin_a * sin_b * sin_g
R[1, 0] = sin_a * cos_b
R[2, 0] = -cos_a * sin_g + sin_a * sin_b * cos_g
# The unit muy vector component of the rotation matrix.
R[0, 1] = -sin_a * cos_g + cos_a * sin_b * sin_g
R[1, 1] = cos_a * cos_b
R[2, 1] = sin_a * sin_g + cos_a * sin_b * cos_g
# The unit muz vector component of the rotation matrix.
R[0, 2] = cos_b * sin_g
R[1, 2] = -sin_b
R[2, 2] = cos_b * cos_g
def euler_to_R_zxz(alpha, beta, gamma, R):
"""Generate the z-x-z Euler angle convention rotation matrix.
Rotation matrix
===============
The rotation matrix is defined as the vector of unit vectors::
R = [mux, muy, muz].
According to wikipedia (U{http://en.wikipedia.org/wiki/Euler_angles#Table_of_matrices}), the rotation matrix for the zxz convention is::
| ca*cg - sa*cb*sg -sa*cg - ca*cb*sg sb*sg |
R = | ca*sg + sa*cb*cg -sa*sg + ca*cb*cg -sb*cg |,
| sa*sb ca*sb cb |
where::
ca = cos(alpha),
sa = sin(alpha),
cb = cos(beta),
sb = sin(beta),
cg = cos(gamma),
sg = sin(gamma).
@param alpha: The alpha Euler angle in rad for the z-rotation.
@type alpha: float
@param beta: The beta Euler angle in rad for the y-rotation.
@type beta: float
@param gamma: The gamma Euler angle in rad for the second z-rotation.
@type gamma: float
@param R: The 3x3 rotation matrix to update.
@type R: 3x3 numpy array
"""
# Trig.
sin_a = sin(alpha)
cos_a = cos(alpha)
sin_b = sin(beta)
cos_b = cos(beta)
sin_g = sin(gamma)
cos_g = cos(gamma)
# The unit mux vector component of the rotation matrix.
R[0, 0] = cos_a * cos_g - sin_a * cos_b * sin_g
R[1, 0] = cos_a * sin_g + sin_a * cos_b * cos_g
R[2, 0] = sin_a * sin_b
# The unit muy vector component of the rotation matrix.
R[0, 1] = -sin_a * cos_g - cos_a * cos_b * sin_g
R[1, 1] = -sin_a * sin_g + cos_a * cos_b * cos_g
R[2, 1] = cos_a * sin_b
# The unit muz vector component of the rotation matrix.
R[0, 2] = sin_b * sin_g
R[1, 2] = -sin_b * cos_g
R[2, 2] = cos_b
def euler_to_R_zyx(alpha, beta, gamma, R):
"""Generate the z-y-x Euler angle convention rotation matrix.
Rotation matrix
===============
The rotation matrix is defined as the vector of unit vectors::
R = [mux, muy, muz].
According to wikipedia (U{http://en.wikipedia.org/wiki/Euler_angles#Table_of_matrices}), the rotation matrix for the zyx convention is::
| ca*cb -sa*cb sb |
R = | sa*cg + ca*sb*sg ca*cg - sa*sb*sg -cb*sg |,
| sa*sg - ca*sb*cg ca*sg + sa*sb*cg cb*cg |
where::
ca = cos(alpha),
sa = sin(alpha),
cb = cos(beta),
sb = sin(beta),
cg = cos(gamma),
sg = sin(gamma).
@param alpha: The alpha Euler angle in rad for the z-rotation.
@type alpha: float
@param beta: The beta Euler angle in rad for the y-rotation.
@type beta: float
@param gamma: The gamma Euler angle in rad for the x-rotation.
@type gamma: float
@param R: The 3x3 rotation matrix to update.
@type R: 3x3 numpy array
"""
# Trig.
sin_a = sin(alpha)
cos_a = cos(alpha)
sin_b = sin(beta)
cos_b = cos(beta)
sin_g = sin(gamma)
cos_g = cos(gamma)
# The unit mux vector component of the rotation matrix.
R[0, 0] = cos_a * cos_b
R[1, 0] = sin_a * cos_g + cos_a * sin_b * sin_g
R[2, 0] = sin_a * sin_g - cos_a * sin_b * cos_g
# The unit muy vector component of the rotation matrix.
R[0, 1] = -sin_a * cos_b
R[1, 1] = cos_a * cos_g - sin_a * sin_b * sin_g
R[2, 1] = cos_a * sin_g + sin_a * sin_b * cos_g
# The unit muz vector component of the rotation matrix.
R[0, 2] = sin_b
R[1, 2] = -cos_b * sin_g
R[2, 2] = cos_b * cos_g
def euler_to_R_zyz(alpha, beta, gamma, R):
"""Generate the z-y-z Euler angle convention rotation matrix.
Rotation matrix
===============
The rotation matrix is defined as the vector of unit vectors::
R = [mux, muy, muz].
According to wikipedia (U{http://en.wikipedia.org/wiki/Euler_angles#Table_of_matrices}), the rotation matrix for the zyz convention is::
| -sa*sg + ca*cb*cg -ca*sg - sa*cb*cg sb*cg |
R = | sa*cg + ca*cb*sg ca*cg - sa*cb*sg sb*sg |,
| -ca*sb sa*sb cb |
where::
ca = cos(alpha),
sa = sin(alpha),
cb = cos(beta),
sb = sin(beta),
cg = cos(gamma),
sg = sin(gamma).
@param alpha: The alpha Euler angle in rad for the z-rotation.
@type alpha: float
@param beta: The beta Euler angle in rad for the y-rotation.
@type beta: float
@param gamma: The gamma Euler angle in rad for the second z-rotation.
@type gamma: float
@param R: The 3x3 rotation matrix to update.
@type R: 3x3 numpy array
"""
# Trig.
sin_a = sin(alpha)
cos_a = cos(alpha)
sin_b = sin(beta)
cos_b = cos(beta)
sin_g = sin(gamma)
cos_g = cos(gamma)
# The unit mux vector component of the rotation matrix.
R[0, 0] = -sin_a * sin_g + cos_a * cos_b * cos_g
R[1, 0] = sin_a * cos_g + cos_a * cos_b * sin_g
R[2, 0] = -cos_a * sin_b
# The unit muy vector component of the rotation matrix.
R[0, 1] = -cos_a * sin_g - sin_a * cos_b * cos_g
R[1, 1] = cos_a * cos_g - sin_a * cos_b * sin_g
R[2, 1] = sin_a * sin_b
# The unit muz vector component of the rotation matrix.
R[0, 2] = sin_b * cos_g
R[1, 2] = sin_b * sin_g
R[2, 2] = cos_b
def matrix_indices(i, neg, alt):
"""Calculate the parameteric indices i, j, k, and h.
This is one of the algorithms of Ken Shoemake in "Euler Angle Conversion. Graphics Gems IV. Paul Heckbert (ed.). Academic Press, 1994, ISBN: 0123361567. pp. 222-229." (U{http://www.graphicsgems.org/}).
The indices (i, j, k) are a permutation of (x, y, z), and the index h corresponds to the row containing the Givens argument a.
@param i: The index i.
@type i: int
@param neg: Zero if (i, j, k) is an even permutation of (x, y, z) or one if odd.
@type neg: int
@param alt: Zero if the first and last system axes are the same, or one if they are different.
@type alt: int
@return: The values of j, k, and h.
@rtype: tuple of int
"""
# Calculate the indices.
j = EULER_NEXT[i + neg]
k = EULER_NEXT[i+1 - neg]
# The Givens rotation row index.
if alt:
h = k
else:
h = i
# Return.
return j, k, h
def R_random_axis(R, angle=0.0):
"""Generate a random rotation matrix of fixed angle via the axis-angle notation.
Uniform point sampling on a unit sphere is used to generate a random axis orientation. This,
together with the fixed rotation angle, is used to generate the random rotation matrix.
@param R: A 3D matrix to convert to the rotation matrix.
@type R: numpy 3D, rank-2 array
@keyword angle: The fixed rotation angle.
@type angle: float
"""
# Random rotation axis.
rot_axis = zeros(3, float64)
random_unit_vector(rot_axis)
# Generate the rotation matrix.
axis_angle_to_R(rot_axis, angle, R)
def R_random_hypersphere(R):
"""Generate a random rotation matrix using 4D hypersphere point picking.
A quaternion is generated by creating a 4D vector with each value randomly selected from a
Gaussian distribution, and then normalising.
@param R: A 3D matrix to convert to the rotation matrix.
@type R: numpy 3D, rank-2 array
"""
# The quaternion.
quat = array([gauss(0, 1), gauss(0, 1), gauss(0, 1), gauss(0, 1)], float64)
quat = quat / norm(quat)
# Convert the quaternion to a rotation matrix.
quaternion_to_R(quat, R)
def R_to_axis_angle(R):
"""Convert the rotation matrix into the axis-angle notation.
Conversion equations
====================
From Wikipedia (U{http://en.wikipedia.org/wiki/Rotation_matrix}), the conversion is given by::
x = Qzy-Qyz
y = Qxz-Qzx
z = Qyx-Qxy
r = hypot(x,hypot(y,z))
t = Qxx+Qyy+Qzz
theta = atan2(r,t-1)
@param R: The 3x3 rotation matrix to update.
@type R: 3x3 numpy array
@return: The 3D rotation axis and angle.
@rtype: numpy 3D rank-1 array, float
"""
# Axes.
axis = zeros(3, float64)
axis[0] = R[2, 1] - R[1, 2]
axis[1] = R[0, 2] - R[2, 0]
axis[2] = R[1, 0] - R[0, 1]
# Angle.
r = hypot(axis[0], hypot(axis[1], axis[2]))
t = R[0, 0] + R[1, 1] + R[2, 2]
theta = atan2(r, t-1)
# Normalise the axis.
if r != 0.0:
axis = axis / r
# Return the data.
return axis, theta
def R_to_euler(R, notation, axes_rot='static', second_sol=False):
"""Convert the rotation matrix to the given Euler angles.
This uses the algorithms of Ken Shoemake in "Euler Angle Conversion. Graphics Gems IV. Paul Heckbert (ed.). Academic Press, 1994, ISBN: 0123361567. pp. 222-229." (U{http://www.graphicsgems.org/}).
The Euler angle notation can be one of:
- xyx
- xyz
- xzx
- xzy
- yxy
- yxz
- yzx
- yzy
- zxy
- zxz
- zyx
- zyz
@param R: The 3x3 rotation matrix to extract the Euler angles from.
@type R: 3D, rank-2 numpy array
@param notation: The Euler angle notation to use.
@type notation: str
@keyword axes_rot: The axes rotation - either 'static', the static axes or 'rotating', the rotating axes.
@type axes_rot: str
@keyword second_sol: Return the second solution instead (currently unused).
@type second_sol: bool
@return: The alpha, beta, and gamma Euler angles in the given convention.
@rtype: tuple of float
"""
# Duplicate R to avoid its modification.
R = deepcopy(R)
# Get the Euler angle info.
i, neg, alt = EULER_TRANS_TABLE[notation]
# Axis rotations.
rev = 0
if axes_rot != 'static':
rev = 1
# Find the other indices.
j, k, h = matrix_indices(i, neg, alt)
# No axis repetition.
if alt:
# Sine of the beta angle.
sin_beta = sqrt(R[i, j]**2 + R[i, k]**2)
# Non-zero sin(beta).
if sin_beta > EULER_EPSILON:
alpha = atan2( R[i, j], R[i, k])
beta = atan2( sin_beta, R[i, i])
gamma = atan2( R[j, i], -R[k, i])
# sin(beta) is zero.
else:
alpha = atan2(-R[j, k], R[j, j])
beta = atan2( sin_beta, R[i, i])
gamma = 0.0
# Axis repetition.
else:
# Cosine of the beta angle.
cos_beta = sqrt(R[i, i]**2 + R[j, i]**2)
# Non-zero cos(beta).
if cos_beta > EULER_EPSILON:
alpha = atan2( R[k, j], R[k, k])
beta = atan2(-R[k, i], cos_beta)
gamma = atan2( R[j, i], R[i, i])
# cos(beta) is zero.
else:
alpha = atan2(-R[j, k], R[j, j])
beta = atan2(-R[k, i], cos_beta)
gamma = 0.0
# Remapping.
if neg:
alpha, beta, gamma = -alpha, -beta, -gamma
if rev:
alpha_old = alpha
alpha = gamma
gamma = alpha_old
# Angle wrapping.
if alt and -pi < beta < 0.0:
alpha = alpha + pi
beta = -beta
gamma = gamma + pi
alpha = wrap_angles(alpha, 0.0, 2.0*pi)
beta = wrap_angles(beta, 0.0, 2.0*pi)
gamma = wrap_angles(gamma, 0.0, 2.0*pi)
# Return the Euler angles.
return alpha, beta, gamma
def R_to_euler_xyx(R):
"""Convert the rotation matrix to the xyx Euler angles.
@param R: The 3x3 rotation matrix to extract the Euler angles from.
@type R: 3D, rank-2 numpy array
@return: The alpha, beta, and gamma Euler angles in the xyx convention.
@rtype: tuple of float
"""
# Redirect to R_to_euler()
return R_to_euler(R, 'xyx')
def R_to_euler_xyz(R):
"""Convert the rotation matrix to the xyz Euler angles.
@param R: The 3x3 rotation matrix to extract the Euler angles from.
@type R: 3D, rank-2 numpy array
@return: The alpha, beta, and gamma Euler angles in the xyz convention.
@rtype: tuple of float
"""
# Redirect to R_to_euler()
return R_to_euler(R, 'xyz')
def R_to_euler_xzx(R):
"""Convert the rotation matrix to the xzx Euler angles.
@param R: The 3x3 rotation matrix to extract the Euler angles from.
@type R: 3D, rank-2 numpy array
@return: The alpha, beta, and gamma Euler angles in the xzx convention.
@rtype: tuple of float
"""
# Redirect to R_to_euler()
return R_to_euler(R, 'xzx')
def R_to_euler_xzy(R):
"""Convert the rotation matrix to the xzy Euler angles.
@param R: The 3x3 rotation matrix to extract the Euler angles from.
@type R: 3D, rank-2 numpy array
@return: The alpha, beta, and gamma Euler angles in the xzy convention.
@rtype: tuple of float
"""
# Redirect to R_to_euler()
return R_to_euler(R, 'xzy')
def R_to_euler_yxy(R):
"""Convert the rotation matrix to the yxy Euler angles.
@param R: The 3x3 rotation matrix to extract the Euler angles from.
@type R: 3D, rank-2 numpy array
@return: The alpha, beta, and gamma Euler angles in the yxy convention.
@rtype: tuple of float
"""
# Redirect to R_to_euler()
return R_to_euler(R, 'yxy')
def R_to_euler_yxz(R):
"""Convert the rotation matrix to the yxz Euler angles.
@param R: The 3x3 rotation matrix to extract the Euler angles from.
@type R: 3D, rank-2 numpy array
@return: The alpha, beta, and gamma Euler angles in the yxz convention.
@rtype: tuple of float
"""
# Redirect to R_to_euler()
return R_to_euler(R, 'yxz')
def R_to_euler_yzx(R):
"""Convert the rotation matrix to the yzx Euler angles.
@param R: The 3x3 rotation matrix to extract the Euler angles from.
@type R: 3D, rank-2 numpy array
@return: The alpha, beta, and gamma Euler angles in the yzx convention.
@rtype: tuple of float
"""
# Redirect to R_to_euler()
return R_to_euler(R, 'yzx')
def R_to_euler_yzy(R):
"""Convert the rotation matrix to the yzy Euler angles.
@param R: The 3x3 rotation matrix to extract the Euler angles from.
@type R: 3D, rank-2 numpy array
@return: The alpha, beta, and gamma Euler angles in the yzy convention.
@rtype: tuple of float
"""
# Redirect to R_to_euler()
return R_to_euler(R, 'yzy')
def R_to_euler_zxy(R):
"""Convert the rotation matrix to the zxy Euler angles.
@param R: The 3x3 rotation matrix to extract the Euler angles from.
@type R: 3D, rank-2 numpy array
@return: The alpha, beta, and gamma Euler angles in the zxy convention.
@rtype: tuple of float
"""
# Redirect to R_to_euler()
return R_to_euler(R, 'zxy')
def R_to_euler_zxz(R):
"""Convert the rotation matrix to the zxz Euler angles.
@param R: The 3x3 rotation matrix to extract the Euler angles from.
@type R: 3D, rank-2 numpy array
@return: The alpha, beta, and gamma Euler angles in the zxz convention.
@rtype: tuple of float
"""
# Redirect to R_to_euler()
return R_to_euler(R, 'zxz')
def R_to_euler_zyx(R):
"""Convert the rotation matrix to the zyx Euler angles.
@param R: The 3x3 rotation matrix to extract the Euler angles from.
@type R: 3D, rank-2 numpy array
@return: The alpha, beta, and gamma Euler angles in the zyx convention.
@rtype: tuple of float
"""
# Redirect to R_to_euler()
return R_to_euler(R, 'zyx')
def R_to_euler_zyz(R):
"""Convert the rotation matrix to the zyz Euler angles.
@param R: The 3x3 rotation matrix to extract the Euler angles from.
@type R: 3D, rank-2 numpy array
@return: The alpha, beta, and gamma Euler angles in the zyz convention.
@rtype: tuple of float
"""
# Redirect to R_to_euler()
return R_to_euler(R, 'zyz')
def R_to_tilt_torsion(R):
"""Convert the rotation matrix to the tilt and torsion rotation angles.
This notation is taken from "Bonev, I. A. and Gosselin, C. M. (2006) Analytical determination of the workspace of symmetrical spherical parallel mechanisms. IEEE Transactions on Robotics, 22(5), 1011-1017".
@param R: The 3x3 rotation matrix to extract the tilt and torsion angles from.
@type R: 3D, rank-2 numpy array
@return: The phi, theta, and sigma tilt and torsion angles.
@rtype: tuple of float
"""
# First obtain the zyz Euler angles.
alpha, beta, gamma = R_to_euler(R, 'zyz')
# The convert to tilt and torsion.
phi = gamma
theta = beta
sigma = alpha + gamma
# Return the angles.
return phi, theta, sigma
def R_to_quaternion(R, quat):
"""Convert a rotation matrix into quaternion form.
This is from Wikipedia (U{http://en.wikipedia.org/wiki/Rotation_matrix#Quaternion}), where::
w = 0.5*sqrt(1+Qxx+Qyy+Qzz),
x = copysign(0.5*sqrt(1+Qxx-Qyy-Qzz),Qzy-Qyz),
y = copysign(0.5*sqrt(1-Qxx+Qyy-Qzz),Qxz-Qzx),
z = copysign(0.5*sqrt(1-Qxx-Qyy+Qzz),Qyx-Qxy),
where the quaternion is defined as q = (w, x, y, z), and the copysign function is x with the
sign of y::
copysign(x, y) = abs(x) / abs(y) * y
@param R: The 3D rotation matrix.
@type R: numpy 3D, rank-2 array
@param quat: The quaternion.
@type quat: numpy 4D, rank-1 array
"""
# Elements.
quat[0] = 0.5 * sqrt(1.0 + R[0, 0] + R[1, 1] + R[2, 2])
quat[1] = R[2, 1] - R[1, 2]
if quat[1]:
quat[1] = copysign(0.5*sqrt(1 + R[0, 0] - R[1, 1] - R[2, 2]), quat[1])
quat[2] = R[0, 2] - R[2, 0]
if quat[2]:
quat[2] = copysign(0.5*sqrt(1 - R[0, 0] + R[1, 1] - R[2, 2]), quat[2])
quat[3] = R[1, 0] - R[0, 1]
if quat[3]:
quat[3] = copysign(0.5*sqrt(1 - R[0, 0] - R[1, 1] + R[2, 2]), quat[3])
def reverse_euler_xyx(alpha, beta, gamma):
"""Convert the given forward rotation Euler angles into the equivalent reverse rotation Euler angles.
This if for the xyx notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The alpha, beta, and gamma Euler angles for the reverse rotation.
@rtype: tuple of float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_xyx_to_R(alpha, beta, gamma, R)
# Reverse rotation.
R = transpose(R)
# Return the Euler angles.
return R_to_euler_xyx(R)
def reverse_euler_xyz(alpha, beta, gamma):
"""Convert the given forward rotation Euler angles into the equivalent reverse rotation Euler angles.
This if for the xyz notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The alpha, beta, and gamma Euler angles for the reverse rotation.
@rtype: tuple of float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_xyz_to_R(alpha, beta, gamma, R)
# Reverse rotation.
R = transpose(R)
# Return the Euler angles.
return R_to_euler_xyz(R)
def reverse_euler_xzx(alpha, beta, gamma):
"""Convert the given forward rotation Euler angles into the equivalent reverse rotation Euler angles.
This if for the xzx notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The alpha, beta, and gamma Euler angles for the reverse rotation.
@rtype: tuple of float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_xzx_to_R(alpha, beta, gamma, R)
# Reverse rotation.
R = transpose(R)
# Return the Euler angles.
return R_to_euler_xzx(R)
def reverse_euler_xzy(alpha, beta, gamma):
"""Convert the given forward rotation Euler angles into the equivalent reverse rotation Euler angles.
This if for the xzy notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The alpha, beta, and gamma Euler angles for the reverse rotation.
@rtype: tuple of float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_xzy_to_R(alpha, beta, gamma, R)
# Reverse rotation.
R = transpose(R)
# Return the Euler angles.
return R_to_euler_xzy(R)
def reverse_euler_yxy(alpha, beta, gamma):
"""Convert the given forward rotation Euler angles into the equivalent reverse rotation Euler angles.
This if for the yxy notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The alpha, beta, and gamma Euler angles for the reverse rotation.
@rtype: tuple of float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_yxy_to_R(alpha, beta, gamma, R)
# Reverse rotation.
R = transpose(R)
# Return the Euler angles.
return R_to_euler_yxy(R)
def reverse_euler_yxz(alpha, beta, gamma):
"""Convert the given forward rotation Euler angles into the equivalent reverse rotation Euler angles.
This if for the yxz notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The alpha, beta, and gamma Euler angles for the reverse rotation.
@rtype: tuple of float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_yxz_to_R(alpha, beta, gamma, R)
# Reverse rotation.
R = transpose(R)
# Return the Euler angles.
return R_to_euler_yxz(R)
def reverse_euler_yzx(alpha, beta, gamma):
"""Convert the given forward rotation Euler angles into the equivalent reverse rotation Euler angles.
This if for the yzx notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The alpha, beta, and gamma Euler angles for the reverse rotation.
@rtype: tuple of float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_yzx_to_R(alpha, beta, gamma, R)
# Reverse rotation.
R = transpose(R)
# Return the Euler angles.
return R_to_euler_yzx(R)
def reverse_euler_yzy(alpha, beta, gamma):
"""Convert the given forward rotation Euler angles into the equivalent reverse rotation Euler angles.
This if for the yzy notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The alpha, beta, and gamma Euler angles for the reverse rotation.
@rtype: tuple of float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_yzy_to_R(alpha, beta, gamma, R)
# Reverse rotation.
R = transpose(R)
# Return the Euler angles.
return R_to_euler_yzy(R)
def reverse_euler_zxy(alpha, beta, gamma):
"""Convert the given forward rotation Euler angles into the equivalent reverse rotation Euler angles.
This if for the zxy notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The alpha, beta, and gamma Euler angles for the reverse rotation.
@rtype: tuple of float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_zxy_to_R(alpha, beta, gamma, R)
# Reverse rotation.
R = transpose(R)
# Return the Euler angles.
return R_to_euler_zxy(R)
def reverse_euler_zxz(alpha, beta, gamma):
"""Convert the given forward rotation Euler angles into the equivalent reverse rotation Euler angles.
This if for the zxz notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The alpha, beta, and gamma Euler angles for the reverse rotation.
@rtype: tuple of float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_zxz_to_R(alpha, beta, gamma, R)
# Reverse rotation.
R = transpose(R)
# Return the Euler angles.
return R_to_euler_zxz(R)
def reverse_euler_zyx(alpha, beta, gamma):
"""Convert the given forward rotation Euler angles into the equivalent reverse rotation Euler angles.
This if for the zyx notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The alpha, beta, and gamma Euler angles for the reverse rotation.
@rtype: tuple of float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_zyx_to_R(alpha, beta, gamma, R)
# Reverse rotation.
R = transpose(R)
# Return the Euler angles.
return R_to_euler_zyx(R)
def reverse_euler_zyz(alpha, beta, gamma):
"""Convert the given forward rotation Euler angles into the equivalent reverse rotation Euler angles.
This if for the zyz notation.
@param alpha: The alpha Euler angle in rad.
@type alpha: float
@param beta: The beta Euler angle in rad.
@type beta: float
@param gamma: The gamma Euler angle in rad.
@type gamma: float
@return: The alpha, beta, and gamma Euler angles for the reverse rotation.
@rtype: tuple of float
"""
# Init.
R = zeros((3, 3), float64)
# Get the rotation.
euler_to_R_zyz(alpha, beta, gamma, R)
# Reverse rotation.
R = transpose(R)
# Return the Euler angles.
return R_to_euler_zyz(R)
def quaternion_to_axis_angle(quat):
"""Convert a quaternion into the axis-angle notation.
Conversion equations
====================
From Wolfram MathWorld (U{http://mathworld.wolfram.com/Quaternion.html}), the conversion is given by::
q = (cos(angle/2), n * sin(angle/2)),
where q is the quaternion and n is the unit vector representing the rotation axis. Therfore::
angle = 2*acos(w),
axis = 2*asin([x, y, z])
@param quat: The quaternion.
@type quat: numpy 4D, rank-1 array
@return: The 3D rotation axis and angle.
@rtype: numpy 3D rank-1 array, float
"""
# The angle.
angle = 2 * acos(quat[0])
# The axis.
if angle:
axis = quat[1:] / sin(angle/2)
else:
axis = quat[1:] * 0.0
# Return
return axis, angle
def quaternion_to_R(quat, R):
"""Convert a quaternion into rotation matrix form.
This is from Wikipedia (U{http://en.wikipedia.org/wiki/Rotation_matrix#Quaternion}), where::
| 1 - 2y**2 - 2z**2 2xy - 2zw 2xz + 2yw |
Q = | 2xy + 2zw 1 - 2x**2 - 2z**2 2yz - 2xw |,
| 2xz - 2yw 2yz + 2xw 1 - 2x**2 - 2y**2 |
and where the quaternion is defined as q = (w, x, y, z). This has been verified using Simo
Saerkkae's "Notes on Quaternions" at U{http://www.lce.hut.fi/~ssarkka/}.
@param quat: The quaternion.
@type quat: numpy 4D, rank-1 array
@param R: A 3D matrix to convert to the rotation matrix.
@type R: numpy 3D, rank-2 array
"""
# Alias.
(w, x, y, z) = quat
# Repetitive calculations.
x2 = 2.0 * x**2
y2 = 2.0 * y**2
z2 = 2.0 * z**2
xw = 2.0 * x*w
xy = 2.0 * x*y
xz = 2.0 * x*z
yw = 2.0 * y*w
yz = 2.0 * y*z
zw = 2.0 * z*w
# The diagonal.
R[0, 0] = 1.0 - y2 - z2
R[1, 1] = 1.0 - x2 - z2
R[2, 2] = 1.0 - x2 - y2
# The off-diagonal.
R[0, 1] = xy - zw
R[0, 2] = xz + yw
R[1, 2] = yz - xw
R[1, 0] = xy + zw
R[2, 0] = xz - yw
R[2, 1] = yz + xw
def tilt_torsion_to_R(phi, theta, sigma, R):
"""Generate a rotation matrix from the tilt and torsion rotation angles.
This notation is taken from "Bonev, I. A. and Gosselin, C. M. (2006) Analytical determination of the workspace of symmetrical spherical parallel mechanisms. IEEE Transactions on Robotics, 22(5), 1011-1017".
@param phi: The angle defining the x-y plane rotation axis.
@type phi: float
@param theta: The tilt angle - the angle of rotation about the x-y plane rotation axis.
@type theta: float
@param sigma: The torsion angle - the angle of rotation about the z' axis.
@type sigma: float
@param R: The 3x3 rotation matrix to update.
@type R: 3D, rank-2 numpy array
"""
# Convert to zyz Euler angles.
alpha = sigma - phi
beta = theta
gamma = phi
# Update the rotation matrix using the zyz Euler angles.
euler_to_R_zyz(alpha, beta, gamma, R)
def two_vect_to_R(vector_orig, vector_fin, R):
"""Calculate the rotation matrix required to rotate from one vector to another.
For the rotation of one vector to another, there are an infinit series of rotation matrices
possible. Due to axially symmetry, the rotation axis can be any vector lying in the symmetry
plane between the two vectors. Hence the axis-angle convention will be used to construct the
matrix with the rotation axis defined as the cross product of the two vectors. The rotation
angle is the arccosine of the dot product of the two unit vectors.
Given a unit vector parallel to the rotation axis, w = [x, y, z] and the rotation angle a,
the rotation matrix R is::
| 1 + (1-cos(a))*(x*x-1) -z*sin(a)+(1-cos(a))*x*y y*sin(a)+(1-cos(a))*x*z |
R = | z*sin(a)+(1-cos(a))*x*y 1 + (1-cos(a))*(y*y-1) -x*sin(a)+(1-cos(a))*y*z |
| -y*sin(a)+(1-cos(a))*x*z x*sin(a)+(1-cos(a))*y*z 1 + (1-cos(a))*(z*z-1) |
@param vector_orig: The unrotated vector defined in the reference frame.
@type vector_orig: numpy array, len 3
@param vector_fin: The rotated vector defined in the reference frame.
@type vector_fin: numpy array, len 3
@param R: The 3x3 rotation matrix to update.
@type R: 3x3 numpy array
"""
# Convert the vectors to unit vectors.
vector_orig = vector_orig / norm(vector_orig)
vector_fin = vector_fin / norm(vector_fin)
# The rotation axis (normalised).
axis = cross(vector_orig, vector_fin)
axis_len = norm(axis)
if axis_len != 0.0:
axis = axis / axis_len
# Alias the axis coordinates.
x = axis[0]
y = axis[1]
z = axis[2]
# The rotation angle.
angle = acos(dot(vector_orig, vector_fin))
# Trig functions (only need to do this maths once!).
ca = cos(angle)
sa = sin(angle)
# Calculate the rotation matrix elements.
R[0, 0] = 1.0 + (1.0 - ca)*(x**2 - 1.0)
R[0, 1] = -z*sa + (1.0 - ca)*x*y
R[0, 2] = y*sa + (1.0 - ca)*x*z
R[1, 0] = z*sa+(1.0 - ca)*x*y
R[1, 1] = 1.0 + (1.0 - ca)*(y**2 - 1.0)
R[1, 2] = -x*sa+(1.0 - ca)*y*z
R[2, 0] = -y*sa+(1.0 - ca)*x*z
R[2, 1] = x*sa+(1.0 - ca)*y*z
R[2, 2] = 1.0 + (1.0 - ca)*(z**2 - 1.0)
| [
"bugman@b7916896-f9f9-0310-9fe5-b3996d8957d5"
] | bugman@b7916896-f9f9-0310-9fe5-b3996d8957d5 |
2d345456c811079050eed477f26593bbabcee8e7 | 9fd767635ecc37f0650a409f88ccc4f2127f1a73 | /estudios/views.py | 80bf2633acdf63bc046b8b0754f378d43ead7138 | [
"MIT"
] | permissive | JVacca12/FIRST | d3175f12595fc39403d72d436a234b7ba6ac2438 | e3906209cae1198e1fbda4d00bc0a906e8294a69 | refs/heads/main | 2023-09-04T18:11:57.460379 | 2021-09-30T04:40:48 | 2021-09-30T04:40:48 | 410,325,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,408 | py | # Django REST Framework
from rest_framework import mixins, status, viewsets
from rest_framework.response import Response
# Models
from estudios.models import Estudios
# Permissions
from rest_framework.permissions import IsAuthenticated
from users.permissions import IsStandardUser
# Serializers
from estudios.serializers import (EstudiosModelSerializer, EstudiosSerializer)
class EstudiosViewSet(mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
mixins.CreateModelMixin,
viewsets.GenericViewSet):
serializer_class = EstudiosModelSerializer
def get_permissions(self): #Acá especificamos qué permisos debe tener el usuario para usar esta VISTA
permission_classes = [IsAuthenticated, IsStandardUser]
return [permission() for permission in permission_classes]
def create(self, request, *args, **kwargs):
serializer = EstudiosSerializer(data=request.data, context={"request": self.request})
serializer.is_valid(raise_exception=True)
est = serializer.save()
data = EstudiosModelSerializer(est).data
return Response(data, status=status.HTTP_201_CREATED)
def get_queryset(self):
"""Filtra por usuario autenticado."""
queryset = Estudios.objects.filter(user=self.request.user)
return queryset | [
"[email protected]"
] | |
f02e9e368f9dbaa8e480bcb7e184e688a9e90469 | ec79881536aa2136a6bde6da632eabc4adf603ed | /src/compute_tfidf.py | d3ee1029552388103f17a1333d0a56a51999eaad | [] | no_license | knkumar/tweet_analysis | 4cebfee436eed16d81fbda8c214ea5ce56ca788b | 1718cd0a864b86379a8a5d0b84486d1d97357b4d | refs/heads/master | 2020-05-09T23:12:00.089244 | 2012-06-19T17:28:54 | 2012-06-19T17:28:54 | 4,524,337 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,911 | py | import shelve
import numpy as np
import sys
import time
import pickle as pl
import math
from settings import *
class c_tfidf:
def __init__(self):
self.fldict = shelve.open("%s_%s"%(flickr_inp,bin_size))
self.twitdict = shelve.open("%s_%s"%(twitter_inp,bin_size))
self.fl_ubins = shelve.open("%s_%s.dat"%(flickr_out,bin_size))
self.tw_ubins = shelve.open("%s_%s.dat"%(twitter_out,bin_size))
def __del__(self):
self.fldict.close()
self.twitdict.close()
self.fl_ubins.close()
self.tw_ubins.close()
def form_tuples(bin_size):
fldict = shelve.open("%s_%s"%(flickr_inp,bin_size))
twitdict = shelve.open("%s_%s"%(twitter_inp,bin_size))
funames = fldict.keys()
tunames = twitdict.keys()
unames = set(funames)&set(tunames)
fl_ubins = shelve.open("%s_%s"%(flickr_out,bin_size))
tw_ubins = shelve.open("%s_%s"%(twitter_out,bin_size))
def make_shelf(shelf,name,bin,tags):
if shelf=='f':
if name in fl_ubins.keys():
fl_ubins[name][bin] = tags
else:
fl_ubins[name] = {bin : tags}
else:
if name in fl_ubins.keys():
tw_ubins[name][bin] = tags
else:
tw_ubins[name] = {bin :tags}
map(lambda name: map(lambda bin: make_shelf('f',name,bin,fldict[name][bin][2]), fldict[name].keys() ), unames)
map(lambda name: map(lambda bin: make_shelf('t',name,bin,twitdict[name][bin][2]), twitdict[name].keys() ), unames)
fldict.close()
twitdict.close()
return fl_ubins,tw_ubins
def main():
bins = ["4"]
for bin_size in bins:
try:
fl_ubins,tw_ubins = form_tuples(bin_size)
print fl_ubins, tw_ubins
except:
print("Unexpected error:", sys.exc_info()[0])
raise
if __name__ == "__main__":
sys.exit(main())
| [
"[email protected]"
] | |
eb8f8368b4ae1c7f06f5fbb290ed6efb9ffbe3a2 | b3b066a566618f49ae83c81e963543a9b956a00a | /Python Data Science Toolbox (Part 1)/02_Default arguments, variable-length arguments and scope/05_Nested Functions II.py | 0c86dd3ba3ffbdb4f2819c05d637b47bd72b72ed | [] | no_license | ahmed-gharib89/DataCamp_Data_Scientist_with_Python_2020 | 666c4129c3f0b5d759b511529a365dfd36c12f1a | f3d20b788c8ef766e7c86c817e6c2ef7b69520b8 | refs/heads/master | 2022-12-22T21:09:13.955273 | 2020-09-30T01:16:05 | 2020-09-30T01:16:05 | 289,991,534 | 2 | 0 | null | 2020-08-24T17:15:43 | 2020-08-24T17:15:42 | null | UTF-8 | Python | false | false | 1,885 | py | '''
Nested Functions II
100xp
Great job, you've just nested a function within another function. One other pretty cool
reason for nesting functions is the idea of a closure. This means that the nested or inner
function remembers the state of its enclosing scope when called. Thus, anything defined
locally in the enclosing scope is available to the inner function even when the outer
function has finished execution.
Let's move forward then! In this exercise, you will complete the definition of the inner
function inner_echo() and then call echo() a couple of times, each with a different argument.
Complete the exercise and see what the output will be!
Instructions
-Complete the function header of the inner function with the function name inner_echo()
and a single parameter word1.
-Complete the function echo() so that it returns inner_echo.
-We have called echo(), passing 2 as an argument, and assigned the resulting function to
twice. Your job is to call echo(), passing 3 as an argument. Assign the resulting function
to thrice.
-Hit Submit to call twice() and thrice() and print the results.
'''
# Define echo
def echo(n):
"""Return the inner_echo function."""
# Define inner_echo
def inner_echo(word1):
"""Concatenate n copies of word1."""
echo_word = word1 * n
return echo_word
# Return inner_echo
return inner_echo
# Call echo: twice
twice = echo(2)
# Call echo: thrice
thrice = echo(3)
# Call twice() and thrice() then print
print(twice('hello'), thrice('hello'))
#========================================================#
# DEVELOPER #
# BasitAminBhatti #
# Github #
# https://github.com/basitaminbhatti #
#========================================================# | [
"Your-Email"
] | Your-Email |
8426db9d315c63d1b58b87fc61bc26cdac2cfdd4 | facb69b5ac8741234546ec3932b776f3f28a965b | /samples/src/main/resources/datasets/python/84.py | c1567b02f9f3629e38c3a57007b11e70996d0da6 | [
"Apache-2.0"
] | permissive | CommanderTvis/kotlingrad | 47bf579d885e893a0214269a117fe810bd5a4ecc | 56a6d4d03544db1bcaa93c31ffc7e075bc564e64 | refs/heads/master | 2023-06-25T06:43:21.644844 | 2021-07-29T04:29:55 | 2021-07-29T04:29:55 | 365,717,366 | 0 | 0 | Apache-2.0 | 2021-05-09T09:43:28 | 2021-05-09T09:43:28 | null | UTF-8 | Python | false | false | 30 | py | def test21(a, b):
a + b.x
| [
"[email protected]"
] | |
bbc86de8d4e07cb290ae05a8ae472beb042f7da0 | 6d2659f3ac85dbcc168835ab841dbfef99ad5b97 | /ctrl.py | ea99b0acc6fad0b8a273a6cc3d024ab8dedbef9a | [
"MIT"
] | permissive | sysid/munggoggo | 348db6282e1e6349321e1dceb81e6abb15706f43 | 6b2f398678c51bedc566be5dda579ffa4ff566c5 | refs/heads/master | 2023-01-28T21:45:03.330412 | 2022-12-13T15:29:58 | 2022-12-13T15:29:58 | 218,945,509 | 24 | 1 | MIT | 2023-01-17T20:59:27 | 2019-11-01T08:31:16 | Python | UTF-8 | Python | false | false | 6,870 | py | #!/usr/bin/env python
import asyncio
import logging
import sys
from datetime import datetime
from pathlib import Path
import click
sys.path.insert(0, str(Path(__file__).parent / "munggoggo"))
from behaviour import Behaviour
from core import Core
from messages import ListBehav, ManageBehav, ListTraceStore
from twpy import coro
from utils import setup_logging
_log = logging.getLogger()
setup_logging(level=logging.WARNING)
logging.getLogger("aio_pika").setLevel(logging.WARNING)
logging.getLogger("asyncio").setLevel(logging.INFO)
LOGGING_LEVEL = logging.WARNING
class Ctrl(Core):
@property
def behaviour(self) -> Behaviour:
return Behaviour(self, binding_keys=["system"], configure_rpc=True)
async def setup(self) -> None:
await self.add_runtime_dependency(self.behaviour)
@click.group()
@click.option("--debug", "-d", is_flag=True)
@click.pass_context
def cli(ctx, debug):
ctx.ensure_object(dict)
global LOGGING_LEVEL
if debug:
LOGGING_LEVEL = logging.DEBUG
@cli.command()
@click.argument("msg")
@click.argument("msg_type")
@click.argument("target")
@click.pass_context
@coro
async def send_message(ctx, msg, msg_type, target):
async with Ctrl(identity="Ctrl") as a:
a.logger.setLevel(LOGGING_LEVEL)
click.echo(f"Sending type: '{msg_type}' msg: {msg} to {target}")
await a.direct_send(msg=msg, msg_type=msg_type, target=target)
await asyncio.sleep(0.1) # required for context cleanup
# print(f"Duration: {datetime.now() - start}")
@cli.command()
@click.argument("msg")
@click.argument("msg_type")
@coro
async def broadcast(msg, msg_type):
async with Ctrl(identity="Ctrl") as a:
a.logger.setLevel(LOGGING_LEVEL)
click.echo(f"Broadcasting type: '{msg_type}' msg: {msg}")
await a.fanout_send(msg=msg, msg_type=msg_type)
await asyncio.sleep(0.1) # required for context cleanup
# print(f"Duration: {datetime.now() - start}")
@cli.command()
@click.argument("agent")
@click.pass_context
@coro
async def list_behaviour(ctx, agent):
async with Ctrl(identity="Ctrl") as a:
a.logger.setLevel(LOGGING_LEVEL)
if not await target_exists(a, agent):
return False
click.echo(f"Listing behaviours of {agent}:")
obj = ListBehav()
result = await a.call(obj.to_rpc(), agent)
for behav in result.to_dict().get("behavs", list()):
click.secho(behav, fg="cyan")
await asyncio.sleep(0.1) # required for context cleanup
# print(f"Duration: {datetime.now() - start}")
@cli.command()
@click.pass_context
@coro
async def list_peers(ctx):
async with Ctrl(identity="Ctrl") as a:
a.logger.setLevel(LOGGING_LEVEL)
click.echo(f"Listing peers.")
peers = await a.list_peers()
for peer in peers:
click.secho(f"{peer.get('name')}", fg="cyan")
await asyncio.sleep(0.1) # required for context cleanup
# print(f"Duration: {datetime.now() - start}")
async def target_exists(core: Core, target: str) -> bool:
peers = [peer.get("name") for peer in await core.list_peers()]
if target not in peers:
click.secho(f"Invalid target: {target}. Choose one of: {peers}.", fg="red")
return False
return True
@cli.command()
@click.argument("command")
@click.argument("target")
@click.argument("behav")
@click.pass_context
@coro
async def call(ctx, command, target, behav):
async with Ctrl(identity="Ctrl") as a:
a.logger.setLevel(LOGGING_LEVEL)
if not await target_exists(a, target):
return False
# peers = [peer.get("name") for peer in await a.list_peers()]
# if target not in peers:
# click.echo(f"Invalid target: {target}. Choose one of: {peers}.")
# return False
click.echo(f"Sending command: '{command}' to {target}:{behav}")
obj = ManageBehav(behav=behav, command=None,)
if command in ["Stop", "stop"]:
obj.command = "stop"
elif command in ["Start", "start"]:
obj.command = "start"
else:
click.secho(f"Invalid command.", fg="red")
click.secho(f"Expected one of [start, stop]", fg="red")
return False
result = await a.call(obj.to_rpc(), target=target)
click.secho(f"rpc result: {result}", fg="cyan")
await asyncio.sleep(0.1) # required for context cleanup
# print(f"Duration: {datetime.now() - start}")
@cli.command()
@click.argument("target")
@click.option("--limit", "-d")
@click.option("--sender", "-s")
@click.pass_context
@coro
async def list_traces(ctx, target, limit, sender):
# assert isinstance(limit, int), f"limit must be integer"
async with Ctrl(identity="Ctrl") as a:
a.logger.setLevel(LOGGING_LEVEL)
if not await target_exists(a, target):
return False
if limit is not None:
limit = int(limit)
obj = ListTraceStore(app_id=sender, limit=limit,)
result = await a.call(obj.to_rpc(), target=target)
for entry in result.traces:
click.echo(entry[1])
click.echo(f"Total number of records: {len(result.traces)}.")
await asyncio.sleep(0.1) # required for context cleanup
# print(f"Duration: {datetime.now() - start}")
if __name__ == "__main__":
"""
Examples:
python ctrl.py broadcast '{"c_type": "DemoData", "c_data": "{\"message\": \"Hallo\", \"date\": 1546300800.0}"}' "CUSTOM"
python ctrl.py list-behaviour SqlAgent
python ctrl.py send-message '{"c_type": "DemoData", "c_data": "{\"message\": \"Hallo2\", \"date\": 1546300800.0}"}' "CUSTOM" SqlAgent
python ctrl.py call start SqlAgent SqlAgent.SqlBehav
python ctrl.py call start SqlAgent SqlBehav
python ctrl.py list-traces SqlAgent --sender Ctrl
"""
start = datetime.now()
# cli(['-d', 'list-peers'], obj=dict(start=start))
# list_peers([])
# list_behaviour(['SqlAgenttt'])
# broadcast([r'{"c_type": "DemoData", "c_data": "{\"message\": \"Hallo\", \"date\": 1546300800.0}"}', "CUSTOM"])
# broadcast(['wrong format', "CUSTOM"])
# send_message([r'{"c_type": "DemoData", "c_data": "{\"message\": \"Hallo\", \"date\": 1546300800.0}"}', "CUSTOM", "SqlAgent"])
# call(['Stop', 'SqlAgent', "SqlAgent.SqlBehav"])
# call(['Stop', 'SqlAgent', "SqlBehav"])
# call(['start', 'SqlAgent', "SqlBehav"])
# list_traces(["SqlAgent"])
# list_traces(["SqlAgent", "--limit", 1])
# list_traces(["SqlAgent", "--sender", "Ctrl"])
################################################################################
# activate CLI
################################################################################
cli(obj=dict(start=start))
print(f"Duration: {datetime.now() - start}")
| [
"[email protected]"
] | |
6b0be7613cc411613219c39a2e460d4b9ff5b3c3 | 08f61d5432b6cf14bb6fc3448259a6c445b26780 | /dapodik/peserta_didik/peserta_didik_baru.py | e973ecbc1b9dba59b799edc8b00f3c1fa4b2a89d | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | nainaidaa/dapodik | 9ecfabc6f262c24a4cd0288b3b1a7116b3a09199 | d89c0fb899c89e866527f6b7b57f741abd6444ea | refs/heads/master | 2023-07-22T23:37:16.694675 | 2021-09-08T15:04:23 | 2021-09-08T15:04:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | from datetime import datetime, date
from typing import Optional
from uuid import UUID
import attr
@attr.dataclass
class PesertaDidikBaru:
pdb_id: UUID
sekolah_id: UUID
nama_pd: str
jenis_kelamin: str
nik: str
tempat_lahir: str
tanggal_lahir: date
nama_ibu_kandung: str
jenis_pendaftaran_id: int
nisn: str
sudah_diproses: int
berhasil_diproses: int
tahun_ajaran_id: int
tahun_ajaran_id_str: str
peserta_didik_id: UUID
peserta_didik_id_str: UUID
# Get only
create_date: Optional[datetime] = None
last_update: Optional[datetime] = None
soft_delete: Optional[int] = None
last_sync: Optional[datetime] = None
updater_id: Optional[UUID] = None
@attr.dataclass
class Create:
sekolah_id: UUID
tahun_ajaran_id: int
nama_pd: str
jenis_kelamin: str
nisn: str
nik: str
tempat_lahir: str
tanggal_lahir: datetime
nama_ibu_kandung: str
jenis_pendaftaran_id: int
sudah_diproses: int = 0
berhasil_diproses: int = 0
peserta_didik_id: str = ""
pdb_id: str = "Admin.model.PesertaDidikBaru-1"
sekolah_id_str: str = ""
tahun_ajaran_id_str: str = ""
jenis_pendaftaran_id_str: str = ""
peserta_didik_id_str: str = ""
| [
"[email protected]"
] | |
f5d5a6877aa307cae604b2014d304f5a80aeedbe | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/eqptcapacity/bdusagehist5min.py | 99446d6b79141cb2c690a779a3622e853429f447 | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 18,985 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class BdUsageHist5min(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.eqptcapacity.BdUsageHist5min", "Bridge domain usage count")
counter = CounterMeta("totalCap", CounterCategory.COUNTER, "count", "Bridge domain entries capacity")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "totalCapCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "totalCapPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "totalCapMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "totalCapMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "totalCapAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "totalCapSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "totalCapThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "totalCapTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "totalCapRate"
meta._counters.append(counter)
counter = CounterMeta("total", CounterCategory.COUNTER, "count", "Total Bridge domain entries")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "totalCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "totalPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "totalMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "totalMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "totalAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "totalSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "totalThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "totalTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "totalRate"
meta._counters.append(counter)
meta.moClassName = "eqptcapacityBdUsageHist5min"
meta.rnFormat = "HDeqptcapacityBdUsage5min-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical Bridge domain usage count stats in 5 minute"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.eqptcapacity.Entity")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.eqptcapacity.BdUsageHist")
meta.rnPrefixes = [
('HDeqptcapacityBdUsage5min-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 47458, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "totalAvg", "totalAvg", 47520, PropCategory.IMPLICIT_AVG)
prop.label = "Total Bridge domain entries average value"
prop.isOper = True
prop.isStats = True
meta.props.add("totalAvg", prop)
prop = PropMeta("str", "totalCapAvg", "totalCapAvg", 47499, PropCategory.IMPLICIT_AVG)
prop.label = "Bridge domain entries capacity average value"
prop.isOper = True
prop.isStats = True
meta.props.add("totalCapAvg", prop)
prop = PropMeta("str", "totalCapCum", "totalCapCum", 47495, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Bridge domain entries capacity cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("totalCapCum", prop)
prop = PropMeta("str", "totalCapMax", "totalCapMax", 47498, PropCategory.IMPLICIT_MAX)
prop.label = "Bridge domain entries capacity maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("totalCapMax", prop)
prop = PropMeta("str", "totalCapMin", "totalCapMin", 47497, PropCategory.IMPLICIT_MIN)
prop.label = "Bridge domain entries capacity minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("totalCapMin", prop)
prop = PropMeta("str", "totalCapPer", "totalCapPer", 47496, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Bridge domain entries capacity periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("totalCapPer", prop)
prop = PropMeta("str", "totalCapRate", "totalCapRate", 47503, PropCategory.IMPLICIT_RATE)
prop.label = "Bridge domain entries capacity rate"
prop.isOper = True
prop.isStats = True
meta.props.add("totalCapRate", prop)
prop = PropMeta("str", "totalCapSpct", "totalCapSpct", 47500, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Bridge domain entries capacity suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("totalCapSpct", prop)
prop = PropMeta("str", "totalCapThr", "totalCapThr", 47501, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Bridge domain entries capacity thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("totalCapThr", prop)
prop = PropMeta("str", "totalCapTr", "totalCapTr", 47502, PropCategory.IMPLICIT_TREND)
prop.label = "Bridge domain entries capacity trend"
prop.isOper = True
prop.isStats = True
meta.props.add("totalCapTr", prop)
prop = PropMeta("str", "totalCum", "totalCum", 47516, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Total Bridge domain entries cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("totalCum", prop)
prop = PropMeta("str", "totalMax", "totalMax", 47519, PropCategory.IMPLICIT_MAX)
prop.label = "Total Bridge domain entries maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("totalMax", prop)
prop = PropMeta("str", "totalMin", "totalMin", 47518, PropCategory.IMPLICIT_MIN)
prop.label = "Total Bridge domain entries minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("totalMin", prop)
prop = PropMeta("str", "totalPer", "totalPer", 47517, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Total Bridge domain entries periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("totalPer", prop)
prop = PropMeta("str", "totalRate", "totalRate", 47524, PropCategory.IMPLICIT_RATE)
prop.label = "Total Bridge domain entries rate"
prop.isOper = True
prop.isStats = True
meta.props.add("totalRate", prop)
prop = PropMeta("str", "totalSpct", "totalSpct", 47521, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Total Bridge domain entries suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("totalSpct", prop)
prop = PropMeta("str", "totalThr", "totalThr", 47522, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Total Bridge domain entries thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("totalThr", prop)
prop = PropMeta("str", "totalTr", "totalTr", 47523, PropCategory.IMPLICIT_TREND)
prop.label = "Total Bridge domain entries trend"
prop.isOper = True
prop.isStats = True
meta.props.add("totalTr", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
a10765120a7758b9dc943cb42170270d4fb26a28 | a02016f368e9e9615dabe0c30b97d4e3f494ff20 | /Transformer/Model.py | 049fc2085f4d12ebeec365b9d16f86db63f5a069 | [] | no_license | carmelocs/Learning | d46a9c58035fe22204585ebcad2001ffee07d9b4 | f0eea71a689ac4e4f5c75a69fd6bd56a0f606da8 | refs/heads/master | 2023-06-11T03:36:03.521019 | 2021-07-07T04:40:02 | 2021-07-07T04:40:02 | 286,873,786 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,493 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
from Layer import EncoderLayer, DecoderLayer
def get_pad_mask(seq_q, seq_k):
'''
Input:
seq_q: [B, len_q]
seq_k: [B, len_k]
Output:
pad_mask: [B, len_q, len_k]
'''
len_q = seq_q.size(1)
# `PAD` is 0
pad_mask = seq_k.eq(0)
# print(f"pad mask: {pad_mask.shape}")
pad_mask = pad_mask.unsqueeze(1).repeat(1, len_q,
1) # shape [B, len_q, len_k]
# print(f"output of pad mask:{pad_mask.shape}")
return pad_mask
def get_subsequent_mask(len_q, len_k):
'''
Mask out subsequent positions.
Input:
int: len_q, len_k
Output:
subsequent_mask: [1, len_q, len_k]
'''
return torch.triu(torch.ones(1, len_q, len_k), diagonal=1) == 0
class Encoder(nn.Module):
def __init__(self,
len_src_vocab,
d_word_vec,
d_model,
num_head,
num_layer,
d_k,
d_v,
d_ff,
dropout=0.1):
super(Encoder, self).__init__()
self.src_word_emb = nn.Embedding(len_src_vocab, d_word_vec)
self.dropout = nn.Dropout(p=dropout)
self.layer_stack = nn.ModuleList([
EncoderLayer(d_model, num_head, d_k, d_v, d_ff, dropout)
for _ in range(num_layer)
])
self.layer_norm = nn.LayerNorm(d_model)
self.d_model = d_model
def forward(self, src_word, src_slf_mask=None):
'''
Input:
src_word: [B, len_src]
src_slf_mask: [B, len_src, len_src]
Output:
enc_output: [B, len_src, d_model]
'''
enc_output = self.layer_norm(self.dropout(self.src_word_emb(src_word)))
# print(f"enc_output: {enc_output.shape}")
for enc_layer in self.layer_stack:
enc_output, *_ = enc_layer(enc_output, src_slf_mask)
# print(
# f"enc_output: {enc_output.shape}\nenc_src_mask: {src_mask.shape}")
return enc_output
class Decoder(nn.Module):
def __init__(self,
len_tgt_vocab,
d_word_vec,
d_model,
num_head,
num_layer,
d_k,
d_v,
d_ff,
dropout=0.1):
super(Decoder, self).__init__()
self.tgt_word_emb = nn.Embedding(len_tgt_vocab, d_word_vec)
self.dropout = nn.Dropout(p=dropout)
self.layer_stack = nn.ModuleList([
DecoderLayer(d_model, num_head, d_k, d_v, d_ff, dropout)
for _ in range(num_layer)
])
self.layer_norm = nn.LayerNorm(d_model)
self.d_model = d_model
def forward(self,
tgt_word,
enc_output,
tgt_slf_mask=None,
tgt_src_mask=None):
'''
Input:
tgt_word: [B, len_tgt]
enc_output: [B, len_src, d_model]
tgt_slf_mask: [B, len_tgt, len_tgt]
tgt_src_mask: [B, len_tgt, len_src]
Output:
dec_output: [B, len_tgt, d_model]
'''
dec_output = self.layer_norm(self.dropout(self.tgt_word_emb(tgt_word)))
# print(f"tgt_word: {tgt_word.shape}\nenc_output: {enc_output.shape}\ntgt_mask: {tgt_mask.shape}\nsrc_mask: {src_mask.shape}")
for dec_layer in self.layer_stack:
dec_output, *_ = dec_layer(dec_output,
enc_output,
dec_slf_mask=tgt_slf_mask,
dec_enc_mask=tgt_src_mask)
return dec_output
class Transformer(nn.Module):
def __init__(self,
len_src_vocab,
len_tgt_vocab,
d_word_vec,
d_model,
num_head,
num_layer,
d_k,
d_v,
d_ff,
dropout=0.1):
super(Transformer, self).__init__()
self.len_src_vocab = len_src_vocab
self.len_tgt_vocab = len_tgt_vocab
self.d_model = d_model
self.encoder = Encoder(len_src_vocab=len_src_vocab,
d_word_vec=d_word_vec,
d_model=d_model,
num_head=num_head,
num_layer=num_layer,
d_k=d_k,
d_v=d_v,
d_ff=d_ff,
dropout=dropout)
self.decoder = Decoder(len_tgt_vocab=len_tgt_vocab,
d_word_vec=d_word_vec,
d_model=d_model,
num_head=num_head,
num_layer=num_layer,
d_k=d_k,
d_v=d_v,
d_ff=d_ff,
dropout=dropout)
self.tgt_word_prj = nn.Linear(d_model, len_tgt_vocab)
assert d_word_vec == d_model
def forward(self, src_word, tgt_word):
'''
Input:
src_word: [B, len_src]
tgt_word: [B, len_tgt]
Output:
pred: [B, len_tgt]
'''
src_slf_mask = get_pad_mask(src_word, src_word)
tgt_slf_mask = get_pad_mask(tgt_word, tgt_word) & get_subsequent_mask(
self.len_tgt_vocab, self.len_tgt_vocab)
tgt_src_mask = get_pad_mask(tgt_word, src_word)
print(
f"src_slf_mask: {src_slf_mask.shape}\ntgt_slf_mask: {tgt_slf_mask.shape}\ntgt_src_mask: {tgt_src_mask.shape}"
)
enc_output = self.encoder(src_word=src_word, src_slf_mask=src_slf_mask)
# print(f"trans_src_word: {src_word.shape}\ntrans_src_mask: {src_mask.shape}\ntransf_enc_output: {enc_output.shape}\n")
dec_output = self.decoder(tgt_word=tgt_word,
enc_output=enc_output,
tgt_slf_mask=tgt_slf_mask,
tgt_src_mask=tgt_src_mask)
print(
f"trans_src_word: {src_word.shape}\nsrc_slf_mask: {src_slf_mask.shape}\ntransf_enc_output: {enc_output.shape}\ntrans_dec_output: {dec_output.shape}"
)
seq_pred = self.tgt_word_prj(dec_output)
print(f"seq_pred: {seq_pred.shape}")
return F.log_softmax(seq_pred, dim=-1).max(-1)[0]
if __name__ == '__main__':
# Make some fake data
torch.manual_seed(0)
BATCH_SIZE = 16
MAX_LEN_SEQ = 100
LEN_SRC = 100
LEN_TGT = 120
D_WORD_VEC = 512
src_word = torch.rand(BATCH_SIZE, LEN_SRC).long()
print(f"source word: {src_word.shape}")
tgt_word = torch.rand(BATCH_SIZE, LEN_TGT).long()
print(f"target word: {tgt_word.shape}")
src_word_emb = nn.Embedding(LEN_SRC, D_WORD_VEC)
tgt_word_emb = nn.Embedding(LEN_TGT, D_WORD_VEC)
# query = src_word_emb(src_word)
# key = src_word_emb(src_word)
# value = src_word_emb(src_word)
enc_input = src_word_emb(src_word)
print(f"encoder input: {enc_input.shape}")
dec_input = tgt_word_emb(tgt_word)
print(f"decoder input: {dec_input.shape}")
# Hyperparameters
# number of encoder/decoder layers
NUM_LAYER = 6
# The dimensionality of input and output for EncoderDecoder model
D_MODEL = 512
# number of heads/parallel attention layers
NUM_HEAD = 8
# The dimensionality of qurey and key in each head
D_K = D_MODEL // NUM_HEAD
# The dimensionality of value in each head (could be different from d_k)
D_V = D_K
# The dimensionality of inner-layer for Position-wise Feed-Forward Network(FFN)
D_FF = 2048
# enc_pad_mask = get_pad_mask(src_word, src_word)
# dec_enc_pad_mask = get_pad_mask(tgt_word, src_word)
# dec_pad_mask = get_pad_mask(tgt_word, tgt_word)
# dec_sub_mask = get_subsequent_mask(LEN_TGT, LEN_TGT)
# dec_slf_att_mask = dec_pad_mask & dec_sub_mask
transformer = Transformer(
len_src_vocab=LEN_SRC,
len_tgt_vocab=LEN_TGT,
d_word_vec=D_WORD_VEC,
d_model=D_MODEL,
num_head=NUM_HEAD,
num_layer=NUM_LAYER,
d_k=D_K,
d_v=D_V,
d_ff=D_FF,
)
pred = transformer(src_word, tgt_word)
print(f"pred: {pred.shape}")
| [
"[email protected]"
] | |
d47a7be89ea03b1b9eea03e8ab1f78a3ad026051 | 606c03cdf70fe642d8d0ad1b55bc0a01ade33776 | /twitter/twitter/setup.py | 4f77ed74a85bc45045ceb22a1f1bb76d2a6dd1fd | [] | no_license | pravallika2207/TWITTER-SENTIMENT-ANALYSIS-USING-STREAMING-API | f8353bb9a7ffd341bfccc2820057ade72e47185d | 69e58f6a1d10b7d3a4b1d8aeb4e096a587c7073f | refs/heads/master | 2020-05-23T18:27:28.230447 | 2019-05-15T20:00:24 | 2019-05-15T20:00:24 | 186,888,131 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | from setuptools import setup,find_packages
setup(name='twitter',
version='0.1',
description='Twitter Sentiment Analysis Using Streaming APi',
url='https://github.com/Mrudula09/twitter',
author='Mrudula Nudurupati',
author_email='[email protected]',
packages=find_packages(),
install_requires=[
'emoji==0.5.1',
'nltk==3.4',
'matplotlib==2.1.2',
'tweepy==3.7.0',
'pandas==0.22.0',
'numpy==1.14.0',
'wordcloud==1.5.0',
],
zip_safe=True)
| [
"[email protected]"
] | |
717f01eec18028cda1a7a1fc9cd5684655077da5 | a55fcbe94032f98f8a858cebf7cfe843410aea76 | /hw4/env/lib/python3.6/encodings/palmos.py | 1235c0724933a47f8554a706098c955e9509af68 | [] | no_license | quentintruong/UCLA-ECE239AS-W19 | a3aca9302125ee7b85f8cecf82485782718d8266 | f734aa7a8178b64b309761f4a390ed4689c4caed | refs/heads/master | 2022-12-10T19:01:28.589101 | 2020-02-14T08:00:33 | 2020-02-14T08:00:33 | 166,895,199 | 1 | 0 | null | 2022-12-08T01:43:39 | 2019-01-21T23:34:01 | Jupyter Notebook | UTF-8 | Python | false | false | 64 | py | /Users/quentintruong/anaconda3/lib/python3.6/encodings/palmos.py | [
"[email protected]"
] | |
6fd7f3b2f75c83c5bd806a7fa3e9207ca1a109b0 | 707afa88f15e7bbf7d0d1c89b5076a9d6d17547e | /Python-Selenium unittest/EpilepsyTC_AirCondition.py | abbd1206e995a656f1bc6447c62ade17f053ad00 | [] | no_license | Longmann94/DFI_testing | 769f7211313b6303271b556b97e1038992d927a9 | a42430a25bdea98c5551f51003078a547335f7c6 | refs/heads/master | 2020-03-15T19:11:51.965865 | 2018-06-07T15:40:27 | 2018-06-07T15:40:27 | 132,302,477 | 3 | 0 | null | 2018-05-27T15:04:44 | 2018-05-06T03:11:25 | Python | UTF-8 | Python | false | false | 10,769 | py | import unittest
import time
import HtmlTestRunner
import os
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
cwd = os.getcwd()+'/results'
class Test_setUp_menu_buttons(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.maximize_window()
def tearDown(self):
self.driver.close()
#@unittest.skip("skip")
def test_TC_AirCondition_01_iconClick(self):
driver = self.driver
driver.get("https://www.dfiinfo.com.au/beta-2-0-3d/")
time.sleep(2)
driver.find_element_by_xpath("//div[@id='firsttime']/div[2]/button/div[2]").click()
driver.find_element_by_xpath("//div[@id='wrapper']/div[8]/div/button/div[2]").click()
driver.find_element_by_xpath("//div[@id='setupContainer']/div[2]/select").click()
Select(driver.find_element_by_xpath("//div[@id='setupContainer']/div[2]/select")).select_by_visible_text("Epilepsy")
driver.find_element_by_xpath("//img[@alt='setup icon']").click()
driver.find_element_by_xpath("//img[@alt='Air Conditioning']").click()
time.sleep(2)
self.assertTrue(driver.find_element_by_id("aircon").is_selected())
driver.save_screenshot('SC_TC_AirCondition_01_iconClick.png')
#@unittest.skip("skip")
def test_TC_AirCondition_01_checkboxClick(self):
driver = self.driver
driver.get("https://www.dfiinfo.com.au/beta-2-0-3d/")
time.sleep(2)
driver.find_element_by_xpath("//div[@id='firsttime']/div[2]/button/div[2]").click()
driver.find_element_by_xpath("//div[@id='wrapper']/div[8]/div/button/div[2]").click()
driver.find_element_by_xpath("//div[@id='setupContainer']/div[2]/select").click()
Select(driver.find_element_by_xpath("//div[@id='setupContainer']/div[2]/select")).select_by_visible_text("Epilepsy")
driver.find_element_by_xpath("//img[@alt='setup icon']").click()
driver.find_element_by_xpath("//div[@id='setup3']/ul/div[4]/li/label/span").click()
time.sleep(2)
self.assertTrue(driver.find_element_by_id("aircon").is_selected())
driver.save_screenshot('SC_TC_AirCondition_01_checkboxClick.png')
#@unittest.skip("skip")
def test_TC_AirCondition_01_textClick(self):
driver = self.driver
driver.get("https://www.dfiinfo.com.au/beta-2-0-3d/")
time.sleep(2)
driver.find_element_by_xpath("//div[@id='firsttime']/div[2]/button/div[2]").click()
driver.find_element_by_xpath("//div[@id='wrapper']/div[8]/div/button/div[2]").click()
driver.find_element_by_xpath("//div[@id='setupContainer']/div[2]/select").click()
Select(driver.find_element_by_xpath("//div[@id='setupContainer']/div[2]/select")).select_by_visible_text("Epilepsy")
driver.find_element_by_xpath("//img[@alt='setup icon']").click()
driver.find_element_by_xpath("//div[@id='setup3']/ul/div[4]/li/label").click()
time.sleep(2)
self.assertTrue(driver.find_element_by_id("aircon").is_selected())
driver.save_screenshot('SC_TC_AirCondition_01_textClick.png')
#@unittest.skip("skip")
def test_TC_AirCondition_01_boxAreaClick(self):
driver = self.driver
driver.get("https://www.dfiinfo.com.au/beta-2-0-3d/")
driver.find_element_by_xpath("//div[@id='firsttime']/div[2]/button/div[2]").click()
driver.find_element_by_xpath("//div[@id='wrapper']/div[8]/div/button/div[2]").click()
driver.find_element_by_xpath("//div[@id='setupContainer']/div[2]/select").click()
Select(driver.find_element_by_xpath("//div[@id='setupContainer']/div[2]/select")).select_by_visible_text("Epilepsy")
driver.find_element_by_xpath("//img[@alt='setup icon']").click()
driver.find_element_by_xpath("//div[@id='setup3']/ul/div[4]/li").click()
time.sleep(2)
self.assertTrue(driver.find_element_by_id("aircon").is_selected())
driver.save_screenshot('SC_TC_AirCondition_01_boxAreaClick.png')
#@unittest.skip("skip")
def test_TC_AirCondition_02a(self):
driver = self.driver
driver.get("https://www.dfiinfo.com.au/beta-2-0-3d/")
time.sleep(2)
driver.find_element_by_xpath("//div[@id='firsttime']/div[2]/button/div[2]").click()
driver.find_element_by_xpath("//div[@id='wrapper']/div[8]/div/button/div[2]").click()
driver.find_element_by_xpath("//div[@id='setupContainer']/div[2]/select").click()
Select(driver.find_element_by_xpath("//div[@id='setupContainer']/div[2]/select")).select_by_visible_text("Epilepsy")
driver.find_element_by_xpath("//img[@alt='setup icon']").click()
driver.find_element_by_xpath("//div[@id='setup3']/ul/div[4]/li/label").click()
driver.find_element_by_xpath("//div[@id='setup3']/ul/div[4]/li/label").click()
driver.find_element_by_xpath("//div[@id='setup3']/ul/div[4]/li").click()
driver.find_element_by_xpath("//div[@id='setup3']/ul/div[4]/li/label/span").click()
driver.find_element_by_xpath("//div[@id='setup3']/ul/div[4]/li/label").click()
driver.find_element_by_xpath("//img[@alt='Air Conditioning']").click()
time.sleep(2)
self.assertFalse(driver.find_element_by_id("aircon").is_selected())
driver.save_screenshot('SC_TC_AirCondition_02a_unselect.png')
#@unittest.skip("skip")
def test_TC_AirCondition_02b(self):
driver = self.driver
driver.get("https://www.dfiinfo.com.au/beta-2-0-3d/")
driver.find_element_by_xpath("//div[@id='firsttime']/div[2]/button/div[2]").click()
driver.find_element_by_xpath("//div[@id='wrapper']/div[8]/div/button/div[2]").click()
driver.find_element_by_xpath("//div[@id='setupContainer']/div[2]/select").click()
Select(driver.find_element_by_xpath("//div[@id='setupContainer']/div[2]/select")).select_by_visible_text("Epilepsy")
driver.find_element_by_xpath("//img[@alt='setup icon']").click()
driver.find_element_by_xpath("//div[@id='setup3']/ul/div[4]/li/label").click()
driver.find_element_by_xpath("//div[@id='setup3']/ul/div[4]/li").click()
driver.find_element_by_xpath("//div[@id='setup3']/ul/div[4]/li/label/span").click()
driver.find_element_by_xpath("//div[@id='setup3']/ul/div[4]/li/label").click()
driver.find_element_by_xpath("//img[@alt='Air Conditioning']").click()
time.sleep(2)
self.assertTrue(driver.find_element_by_id("aircon").is_selected())
driver.save_screenshot('SC_TC_AirCondition_02b_select.png')
#@unittest.skip("skip")
def test_TC_AirCondition_03(self):
driver = self.driver
driver.get("https://www.dfiinfo.com.au/beta-2-0-3d/")
driver.find_element_by_xpath("//div[@id='firsttime']/div[2]/button/div[2]").click()
driver.find_element_by_xpath("//div[@id='wrapper']/div[8]/div/button/div[2]").click()
driver.find_element_by_xpath("//div[@id='setupContainer']/div[2]/select").click()
Select(driver.find_element_by_xpath("//div[@id='setupContainer']/div[2]/select")).select_by_visible_text("Epilepsy")
driver.find_element_by_xpath("//img[@alt='setup icon']").click()
button = driver.find_element_by_xpath("//img[@alt='Air Conditioning']")
ActionChains(driver).move_to_element(button).click_and_hold(button).pause(6).release().perform()
time.sleep(2)
self.assertTrue(driver.find_element_by_id("aircon").is_selected())
# assertEqual()
driver.save_screenshot('SC_TC_AirCondition_03.png')
#@unittest.skip("skip still working on it, drag mouse over button")
def test_TC_AirCondition_04(self):
driver = self.driver
driver.get("https://www.dfiinfo.com.au/beta-2-0-3d/")
driver.find_element_by_xpath("//div[@id='firsttime']/div[2]/button/div[2]").click()
driver.find_element_by_xpath("//div[@id='wrapper']/div[8]/div/button/div[2]").click()
driver.find_element_by_xpath("//div[@id='setupContainer']/div[2]/select").click()
Select(driver.find_element_by_xpath("//div[@id='setupContainer']/div[2]/select")).select_by_visible_text("Epilepsy")
driver.find_element_by_xpath("//img[@alt='setup icon']").click()
driver.find_element_by_xpath("//img[@alt='Air Conditioning']")
button = driver.find_element_by_xpath("//div[@id='setup3']/ul/div[4]/li/label")
ActionChains(driver).drag_and_drop_by_offset(button, 30, 10).perform()
time.sleep(2)
self.assertTrue(driver.find_element_by_id("aircon").is_selected())
# assertEqual()
driver.save_screenshot('SC_TC_AirCondition_04.png')
#@unittest.skip("skip")
def test_TC_AirCondition_05(self):
driver = self.driver
driver.get("https://www.dfiinfo.com.au/beta-2-0-3d/")
time.sleep(2)
driver.find_element_by_xpath("//div[@id='firsttime']/div[2]/button/div[2]").click()
driver.find_element_by_xpath("//div[@id='wrapper']/div[8]/div/button/div[2]").click()
driver.find_element_by_xpath("//div[@id='setupContainer']/div[2]/select").click()
Select(driver.find_element_by_xpath("//div[@id='setupContainer']/div[2]/select")).select_by_visible_text("Epilepsy")
driver.find_element_by_xpath("//img[@alt='setup icon']").click()
driver.find_element_by_xpath("//img[@alt='Air Conditioning']").click()
driver.find_element_by_xpath("//div[@id='setupContainer']/div/button/div[2]").click()
time.sleep(2)
driver.find_element_by_id("inputname").click()
driver.find_element_by_id("inputname").clear()
driver.find_element_by_id("inputname").send_keys("TEST")
driver.find_element_by_id("inputsub").click()
driver.find_element_by_id("inputsub").clear()
driver.find_element_by_id("inputsub").send_keys("Mornington")
driver.find_element_by_id("inputsub").send_keys(Keys.ENTER)
time.sleep(2)
# assertEqual()
driver.save_screenshot('SC_TC_AirCondition_05.png')
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testDFI_Search']
unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(output=cwd)) | [
"[email protected]"
] | |
77b06ffff76eaa8fc30bc45f6bbc686ae4474388 | 76f44f087082cc0092485803edacf8bdef9ac513 | /alfabet.py | be7b6a2a9d87cbbe7b58f946292014245e296c9e | [] | no_license | marcinmaslach/Enigma | 6a556fda7cb2351d2b5da41077039ad428490f5e | 68492ba60eb9140764b8429b631bef173f2972fd | refs/heads/master | 2020-04-11T04:40:09.248195 | 2018-12-12T17:46:40 | 2018-12-12T17:46:40 | 161,520,996 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | class Alphabet():
def __init__(self, alphabet):
self.alphabet = [i for i in alphabet.upper()]
def check_len_alphabet(self):
if len(self.alphabet) != 26:
raise ValueError ("Your alphabet have wrong lenght!")
def repeat_alphabet(self):
for i in range(len(self.alphabet)):
if self.alphabet.count(self.alphabet[i])>1:
raise ValueError ("Duplicates appear in the alphabet!")
def take_index(self, sign):
return self.alphabet.index(sign)
def take_sign(self, index):
return self.alphabet[index]
def len_alphabet(self):
return len(self.alphabet)
def __str__(self):
return self.alphabet
| [
"[email protected]"
] | |
47ae4bdf56fb000fd7182d52f0ac862315036afa | 44d1ce981d0a0607e5394f837e555ac7ffe5d8f2 | /Python Machine Learning CookBook/ch04_无监督学习_聚类/P68_k-means_聚类.py | 4f21dd98d25bd1a520a21487764de7ad1277b61a | [] | no_license | sola1121/references_of_machine_learning | 9b6a8dfdc41884a0cedc0c1ce93da2af91686648 | acd6b41646a7f95793b08b8590ce5daa66877b0d | refs/heads/master | 2020-04-24T14:45:22.659428 | 2019-07-31T10:02:29 | 2019-07-31T10:02:29 | 172,034,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,465 | py | import numpy as np
import matplotlib; matplotlib.use("Qt5Agg")
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans # 从聚合中导入kmeans
file_dir = "./dat/data_multivar.txt"
data = list()
with open(file_dir, 'r') as file:
for line in file.readlines():
data.append(line[:-1].split(","))
data = np.array(data).astype("float64")
# 初始化一个k-means对象, 然后对其进行训练
kmeans = KMeans(init="k-means++", n_clusters=10, n_init=10)
kmeans.fit(data)
# 作图
x_min, x_max = min(data[:, 0])-1.0, max(data[:, 0])+1.0
y_min, y_max = min(data[:, 1])-1.0, max(data[:, 1])+1.0
x_values, y_values = np.meshgrid(np.arange(x_min, x_max, 0.1), np.arange(y_min, y_max, 0.1))
predict_labels = kmeans.predict(np.c_[x_values.ravel(), y_values.ravel()])
print(predict_labels)
predict_labels = predict_labels.reshape(x_values.shape)
plt.figure()
# 画出边界
plt.imshow(predict_labels, interpolation="nearest",
extent=(x_values.min(), x_values.max(), y_values.min(), y_values.max()),
cmap=plt.cm.Paired,
aspect="auto",
origin="lower"
)
# 作画中心点
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1], marker="o", s=150, linewidths=3, color="k", zorder=10, facecolors="blue")
# 作画原始数据点
plt.scatter(data[:, 0], data[:, 1], marker="o", facecolors="none", edgecolors="black")
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.show()
| [
"[email protected]"
] | |
fdcf843cbacd5bff6bea2c85dac2f94b4f88c0b9 | a8db7a0be1cf92d1ad8f3106114209dc6518ba8e | /flaskapp.wsgi | 3e1de5ee94885429942c11636c1a041db0b17b6d | [] | no_license | mensurm/registar | b14544df2e703fc5e6ab86e4b628aa607367175e | 9008d58cd4802703d76e0638ae90f87ab1195160 | refs/heads/master | 2021-01-10T21:46:41.924983 | 2015-10-21T17:05:23 | 2015-10-21T17:05:23 | 34,619,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | wsgi | import sys
sys.path.insert(0, '/var/www/flaskapp')
from admin import app as application
| [
"[email protected]"
] | |
cda9c9c57511424f73367de4ffc507d2fc22a9ba | 94f278407a5b4254d94663be768bd9bd604804a8 | /django_boards/urls.py | f927f00b92cb90d742e65a506ab03ef89473319a | [] | no_license | SriNandan33/django-boards | 25fca0192618aae4ed1ccae507880a95340d850c | 3f12f49aef4fd3d018d91a2456ed6d024e3fb63d | refs/heads/master | 2020-04-02T03:54:58.422667 | 2018-10-25T17:58:46 | 2018-10-25T17:58:46 | 153,990,835 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,109 | py | """django_boards URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.contrib.auth.views import LogoutView, LoginView
from django.urls import path, include
from accounts import views as accounts_views
urlpatterns = [
path('', include('boards.urls')),
path('signup/', accounts_views.signup, name='signup'),
path('login/', LoginView.as_view(template_name='accounts/login.html'), name='login'),
path('logout/', LogoutView.as_view(), name='logout'),
path('admin/', admin.site.urls),
]
| [
"="
] | = |
99a3a7aed12047431b1087b08ede9b531b7d3dc8 | 7e48ab347783507edc388887827a60db9b186a12 | /thermal_2p_0salt.py | 739d5be221c81fb75c5a4cbfe39b44bee5a3b1e3 | [] | no_license | laphysique/Protein_RPA | 188b825a66799293a629a25ee214877df7f81b8c | 9668caccea88a85fd1334bc0a64938a98710a24c | refs/heads/master | 2023-04-19T11:36:11.277320 | 2021-04-28T13:45:34 | 2021-04-28T13:45:34 | 255,717,263 | 1 | 1 | null | 2021-04-28T13:45:36 | 2020-04-14T20:22:21 | Python | UTF-8 | Python | false | false | 5,611 | py | # RPA model (no FH) for 2 overall neutral charge sequences
# No salt. No conterions.
# Constant permittivity
# solute sizes are not tunable
# ver Git.1 Apr 14, 2020
# Upload to github
# Rewrite data structure: from class to dict
# Rewrtie the code for calculating S(k): from matrix product to linear summation
# ver 2 May 14, 2017
import numpy as np
import scipy
import time
import scipy.integrate as sci
from numpy import exp
from numpy import log
phi_min_sys = 1e-12
Gamma = 1 # Short-range cutoff factor, 1 for Olvera de la Cruz's electric potential
c_smear = 0 # Short-range Gaussian smearing, 0.5 for Wang/Fredrickson's smearing
intlim = 200
# twoProteins:
# Returns a dict HP, which includes basic information of the proteins and the model parameters
def twoProteins(sigma1, sigma2):
sig1, sig2 = np.array(sigma1), np.array(sigma2)
N1, N2 = sig1.shape[0], sig2.shape[0]
# linear summation for S1(k)
mel1 = np.kron(sig1, sig1).reshape((N1, N1))
Tel1 = np.array([ np.sum(mel1.diagonal(n) + mel1.diagonal(-n)) for n in range(N1)])
Tel1[0] /= 2
L1 = np.arange(N1)
# linear summation for S2(k)
mel2 = np.kron(sig2, sig2).reshape((N2, N2))
Tel2 = np.array([ np.sum(mel2.diagonal(n) + mel2.diagonal(-n)) for n in range(N2)])
Tel2[0] /= 2
L2 = np.arange(N2)
HP = { 'sig1': sig1, \
'sig2': sig2, \
'N1' : N1, \
'N2' : N2, \
'T1' : Tel1, \
'T2' : Tel2, \
'L1' : L1, \
'L2' : L2 \
}
return HP
#----------------------------------- Entropy -----------------------------------
def s_calc(x):
return (x > phi_min_sys )*x*np.log(x+(x<phi_min_sys))
def Enp(HP, phi1, phi2):
return s_calc(phi1)/HP['N1'] + s_calc(phi2)/HP['N2'] + s_calc(1-phi1-phi2)
def d_Enp_1(HP, phi1, phi2):
return -1 + 1/HP['N1'] + log(phi1)/HP['N1'] - log(1-phi1-phi2)
def d_Enp_2(HP, phi1, phi2):
return -1 + 1/HP['N2'] + log(phi2)/HP['N2'] - log(1-phi1-phi2)
def dd_Enp_11(HP, phi1, phi2):
return 1/phi1/HP['N1'] + 1/(1-phi1-phi2)
def dd_Enp_22(HP, phi1, phi2):
return 1/phi2/HP['N2'] + 1/(1-phi1-phi2)
def dd_Enp_12(HP, phi1, phi2):
return 1/(1-phi1-phi2)
#------------------------------ RPA f_el function ------------------------------
def Uel(k,u):
return 4*np.pi*u/(k*k*(1+Gamma*k*k))*np.exp(-c_smear*k*k)
def Sk(Hp,k):
return np.mean( HP['T1']*np.exp(-k*k*HP['L1']/6) ), \
np.mean( HP['T2']*np.exp(-k*k*HP['L2']/6) )
def Fel(HP, phi1, phi2, u):
f1 = sci.quad(Fel_toint1, 0, np.inf, args=(HP,phi,phis,u), limit=intlim)[0]
f2 = sci.quad(Fel_toint2, 0, np.inf, args=(HP,phi,phis,u), limit=intlim)[0]
return quad(self._Fel_to_int, 0, np.inf, \
args=(self, phi1, phi2, u), \
epsabs=err_abs, epsrel=err_rel,limit = N_int_max)[0]
# f_el
def Fel_toint1(k, HP, phi1, phi2, u):
sk1, sk2 = Sk(HP, k)
uk = Uel(k,u)
G = lk*( phi1*sk1 + phi2*sk2 )
return 1/(4*np.pi*np.pi)*k*k*( log(1 + G) - G + G*G/2 )
def Fel_toint2(k, HP, phi1,phi2, u):
sk1, sk2 = Sk(HP, k)
uk = Uel(k,u)
G = lk*( phi1*sk1 + phi2*sk2 )
return 1/(4*np.pi*np.pi)*k*k*( G - G*G/2 )
# 1st-phi1 derivative of f_el
def dFel(HP, phi1, phi2, u ):
df1 = sci.quad(dFel_1_toint, 0, np.inf, args=(HP,phi,phis,u), limit=intlim )[0]
df2 = sci.quad(dFel_2_toint, 0, np.inf, args=(HP,phi,phis,u), limit=intlim )[0]
return df1, df2
def dFel_1_toint(k, HP, phi1, phi2, u):
sk1, sk2 = Sk(HP, k)
uk = Uel(k,u)
G = lk*( phi1*sk1 + phi2*sk2 )
return 1/(4*np.pi*np.pi)*k*k*lk*sk1/( 1 + G )
def dFel_2_toint(k, HP, phi1, phi2, u):
sk1, sk2 = Sk(HP, k)
uk = Uel(k,u)
G = lk*( phi1*sk1 + phi2*sk2 )
return 1/(4*np.pi*np.pi)*k*k*lk*sk2/( 1 + G )
# 2nd-phi1^2 derivative of f_el
def ddFel(HP, phi1, phi2, u ):
ddf11 = sci.quad(ddFel_11_toint, 0, np.inf, args=(HP,phi,phis,u),limit=intlim )[0]
ddf22 = sci.quad(ddFel_22_toint, 0, np.inf, args=(HP,phi,phis,u),limit=intlim )[0]
ddf12 = sci.quad(ddFel_12_toint, 0, np.inf, args=(HP,phi,phis,u),limit=intlim )[0]
return ddf11, ddf22, ddf12
def ddFel_11_toint(k, HP, phi1, phi2, u):
sk1, sk2 = Sk(HP, k)
uk = Uel(k,u)
G = lk*( phi1*sk1 + phi2*sk2 )
A = lk*sk1/(1+G)
return -1/(4*np.pi*np.pi)*k*k*A*A
def ddFel_11_toint(k, HP, phi1, phi2, u):
sk1, sk2 = Sk(HP, k)
uk = Uel(k,u)
G = lk*( phi1*sk1 + phi2*sk2 )
B = lk*sk2/(1+G)
return -1/(4*np.pi*np.pi)*k*k*B*B
def ddFel_12_toint(k, HP, phi1, phi2, u):
sk1, sk2 = Sk(HP, k)
uk = Uel(k,u)
G = lk*( phi1*sk1 + phi2*sk2 )
A, B = lk*sk1/(1+G) , lk*sk2/(1+G)
return -1/(4*np.pi*np.pi)*k*k*A*B
#---------------------------- free energy functions ----------------------------
def feng(HP, phi1, phi2, u):
return Enp(HP, phi1, phi2) + Fel(HP,phi1,phi2,u)
# 1st derivatives
def dfeng(HP, phi1, phi2, u):
dFel_1, dFel_2 = dFel(HP, phi1,phi2,u)
df1 = d_Enp_1(HP, phi1, phi2) + dFel_1
df2 = d_Enp_2(HP, phi1, phi2) + dFel_2
return df1, df2
# 2nd derivatives
def ddfeng(P, phi1, phi2, u):
ddFel_11, ddFel_22, dFel_12 = ddFel(HP, phi1,phi2,u)
ddf11 = dd_Enp_11(HP, phi1, phi2) + ddFel_11
ddf22 = dd_Enp_22(HP, phi1, phi2) + ddFel_22
ddf12 = dd_Enp_12(HP, phi1, phi2) + ddFel_12
return ddf11, ddf22, ddf12, ddf11*ddf22-ddf12*ddf12
return s_calc(phi1)/Ns[0] + s_calc(phi2)/Ns[1] + s_calc(1-phi1-phi2)
| [
"[email protected]"
] | |
4d3f87a0b767ed6f306f99fa37a60b0cde0a70c5 | 5e35f570b2c9c6a507039ff97189c1d679b8510f | /abandon/TypeParser.py | af71f95063a96290d32b0ceac96c9ea0d776c617 | [] | no_license | FMX/qt-gui | 7e48cc21e963a69ef80b70bfa7a2198dbbfe3813 | 5185662fc4b58fcc5ad932ca79650305734cb1d3 | refs/heads/master | 2021-03-19T08:26:45.262904 | 2016-08-30T03:01:55 | 2016-08-30T03:01:55 | 65,461,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | # coding:utf-8
class TypeParse:
def __init__(self):
self.dbtype = {1: 'oracle', 2: 'sql server', 3: 'mysql'}
self.ostype = {1: 'windows', 2: 'linux', 3: 'unix'}
def parDBType(self, dbtype):
return self.dbtype.get(dbtype)
def parOSType(self, ostype):
return self.ostype.get(ostype)
| [
"[email protected]"
] | |
5b221f2049670502c0c49c8cbd7e7685ee710bd4 | 82805dab063b51dd679a1acb0693ff51cf9ea287 | /aixm_adr.py | 9dc353230b0c71c09a7a0c98196bee5e13620c06 | [] | no_license | fgraciani/connectedbyairm | 33ca73f8d42675fe1c1c19350edabf4028e9d94f | 1eefc81dd377e69fa9975df6a89792de3ceeb13e | refs/heads/master | 2023-01-27T23:33:11.431592 | 2020-12-11T11:19:19 | 2020-12-11T11:19:19 | 297,360,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,725 | py | import pandas as pd
class Aixm_adr:
aixm_adr_mapping_dataframe = pd.read_excel (r'data/xlsx/ADR_23.5.0_Semantic_Correspondence_Report.xlsx', sheet_name='semantic_correspondence') #it would be nice to align sheet names
def __init__(self):
self.aixm_adr_mapping_dataframe.fillna("missing data", inplace = True)
self.aixm_adr_mapping_dataframe.columns = ["Information Concept","Data Concept","Basic Type", "Concept Identifier", "Concept Definition", "AIRM Concept Identifier", "Special Case", "CR Number", "Rationale", "Level of semantic correspondence", "Remarks"]
def is_in_amxm_adr_mapping(self, airm_urn):
results = self.get_from_amxm_mapping(airm_urn)
if results is None:
return False
else:
print(airm_urn)
return True
def get_from_amxm_adr_mapping(self, airm_urn):
#print("Searching for " + airm_urn)
amxm_df = self.aixm_adr_mapping_dataframe.copy()
filter = amxm_df["AIRM Concept Identifier"]==airm_urn
amxm_df.sort_values("AIRM Concept Identifier", inplace = True)
amxm_df.where(filter, inplace = True)
df_results = amxm_df.dropna(how='all')
if df_results.empty:
return None
else:
results_dict = df_results.to_dict('records')
return results_dict
def get_information_concepts(self):
results_dict = []
amxm_df = self.aixm_adr_mapping_dataframe.copy()
amxm_df = amxm_df.drop_duplicates(subset='Information Concept', keep="last")
amxm_df = amxm_df.drop(["Data Concept", "Basic Type"], axis=1)
amxm_dict = amxm_df.to_dict('records')
for entry in amxm_dict:
info_concept = str(entry["Information Concept"])
concept_def = str(entry["Concept Definition"])
concept_id = str(entry["Concept Identifier"])
results_dict.append({"Information Concept": info_concept, "Concept Definition": concept_def, "Concept Identifier": concept_id,
"AIRM Concept Identifier": str(entry["AIRM Concept Identifier"]), "Semantic Correspondence": str(entry["Special Case"]), "Rationale": str(entry["Rationale"]), "Level of semantic correspondence": str(entry["Level of semantic correspondence"]), "Remarks": str(entry["Remarks"])})
return results_dict
def get_traces_by_info_concept(self, info_concept):
amxm_df = self.aixm_adr_mapping_dataframe.copy()
filter = amxm_df["Information Concept"]==info_concept
amxm_df.sort_values("Information Concept", inplace = True)
amxm_df.where(filter, inplace = True)
df_results = amxm_df.dropna(how='all')
if df_results.empty:
return None
else:
results_dict = df_results.to_dict('records')
print(results_dict)
return results_dict | [
"[email protected]"
] | |
9eefe86c11ef3e2b2f7ef456699a607f1177291e | 5fa445e459693a1fd96d6572b1ed8d3bfababebd | /tp3/testing/plot.py | 7b1a2bf3e25df14208a457b8f30838b509c173f0 | [
"MIT"
] | permissive | gabriel-milan/distributed-systems | 1cb98b091e20414be3f74f4342669db2c8a5ccf6 | 7c016eb5dda7ae7d68c33fc3048397aae9b9bc5f | refs/heads/master | 2023-03-24T20:01:40.871857 | 2020-12-15T17:01:50 | 2020-12-15T17:01:50 | 292,856,988 | 0 | 0 | MIT | 2021-03-14T11:38:29 | 2020-09-04T13:37:11 | C++ | UTF-8 | Python | false | false | 315 | py | import matplotlib.pyplot as plt
X = [2, 4, 8, 16, 32, 64, 128]
y = [
200.169,
400.339,
800.707,
1601.553,
3203.045,
6405.874,
12813.326,
]
plt.figure (figsize=(20,20))
plt.title ("Tempo vs. nº processos")
plt.plot(X, y)
plt.xlabel("Number of processes")
plt.ylabel("Execution time (s)")
plt.show()
| [
"[email protected]"
] | |
c61f5704f188e3c500d22acef254827b5a5aef7f | a8da8667bbf3281cf726f4849f61b091bf8093fe | /number.py | 5ab2ef2eb705e9bce2bb3147fa7723343227f263 | [] | no_license | gaoyanping/myself | db3dda6a8359d733e24123831d32d774807ed111 | 91c855c8266db89e9c4dfc3523fb4ce208aaefae | refs/heads/master | 2020-05-16T23:18:37.133773 | 2014-03-14T05:30:46 | 2014-03-14T05:31:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 230 | py | #-*- coding=utf-8 -*-
#python可以直接用于数学计算,对于两个句子之间可以用”,“号分开
print "the number is", 5+3
print "true or false",5<3
print 5+4/2
print 5%2
print 5/2
print 5.00%2.00
print 5.00/2.00 | [
"[email protected]"
] | |
6d1193ea718c006898d0ac5103a9ef072ba31aae | 39dc1d0988b35a3c641d6a47508dcd9cd3e1f9d6 | /Google/Code Jam/NumberGuessing.py | f53306ad52f5e14531947c50c3c05b8b1ec02ba2 | [] | no_license | luisfernado28/PythonProjects | 4934b6bc407584b05fde4fec27fd3c6245e54386 | 49b48376b8b7860eb1bd741bdd52c1356db07860 | refs/heads/master | 2022-12-11T04:25:44.640193 | 2020-08-28T01:47:01 | 2020-08-28T01:47:01 | 286,575,816 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | import sys
def guess(a, b):
guessed = (a + b) // 2
print(guessed)
sys.stdout.flush()
res = input()
if res == "CORRECT":
return
elif res == "TOO_SMALL":
a = guessed + 1
else:
b = guessed - 1
guess(a, b)
cases = int(input())
for i in range(cases):
a, b = map(int, input().split())
n = int(input())
guess(a + 1, b)
| [
"[email protected]"
] | |
5e6fc921fca826083a87a4ee84ea466f3504d39d | 21d3283b6695a2a93d316858a2b5d43e6a2caeef | /test/test1/myquetsion.py | dd9f68bf08f7967ce1063c11846573fb09cb1938 | [] | no_license | ReCodeLife/tianshoutest | d95f93e121c1d5ca168033af9cd93d0d55fffb0d | 74f701b9b90a9f2dea7c13625ee382ca40c4b120 | refs/heads/master | 2023-02-26T16:03:29.848070 | 2021-02-07T01:14:08 | 2021-02-07T01:14:08 | 336,672,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | from diylib.question.question import DiyQuestion
class DQNQuestion(DiyQuestion):
def __init__(self,gameclass:type,reward_threshold:float):
super(DQNQuestion, self).__init__(gameclass)
self.reward_threshold=reward_threshold | [
"[email protected]"
] | |
d3320481d1a478be0bd4a958ffae11f11f0eb48f | 32106821c8fa1d2de6ad38391909ae3986d2a2c1 | /python/paddle/distributed/auto_parallel/operators/dist_softmax.py | fad11aadf8020f290487874a506c6f2d3384fd99 | [
"Apache-2.0"
] | permissive | wzzju/Paddle | 7a13735b4c7d2c942e8d2b5cbc84229f99f8237a | 1353a5d0b40e7e34b812965ccda08796a1f1e398 | refs/heads/paddle_compiler | 2022-10-26T22:51:52.528161 | 2021-09-17T02:08:38 | 2021-09-17T02:08:38 | 140,524,963 | 0 | 1 | Apache-2.0 | 2021-09-17T06:50:08 | 2018-07-11T05:17:15 | Python | UTF-8 | Python | false | false | 3,199 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from .common import DistributedOperator
from .common import DistributedOperatorImpl
from .common import register_distributed_operator
from .common import register_distributed_operator_impl
from ..utils import is_dim_shard
from ..utils import is_dim_replicate
from ..utils import is_valid_list_index
from ..utils import compute_compatible_dim_mapping
from ..utils import compute_compatible_dims_mapping
from ..utils import compute_compatible_and_update_dim_mapping
class DistributedSoftmax(DistributedOperator):
def __init__(self, name):
super(DistributedSoftmax, self).__init__()
self._name = name
register_distributed_operator("softmax", DistributedSoftmax("softmax"))
class DistributedSoftmaxImpl(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedSoftmaxImpl, self).__init__()
self._name = name
def is_process_mesh_compatible(self, op_dist_attr):
""" No restriction for now. """
return True
def is_input_compatible(self, op_dist_attr):
op_desc = op_dist_attr.get_owner_op().desc
x_name = op_desc.input('X')[0]
axis = op_desc.attr('axis')
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
# print("softmax axis", axis)
if axis != -1 and axis != len(x_dims_mapping) - 1:
return False
if is_dim_shard(x_dims_mapping[axis]):
return False
return True
def is_output_compatible(self, op_dist_attr):
op_desc = op_dist_attr.get_owner_op().desc
out_name = op_desc.output('Out')[0]
axis = op_desc.attr('axis')
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
if axis != -1 and axis != len(out_dims_mapping) - 1:
return False
if is_dim_shard(out_dims_mapping[axis]):
return False
return True
def update_dims_mapping(self, op_dist_attr):
changed = False
op_desc = op_dist_attr.get_owner_op().desc
x_name = op_desc.input('X')[0]
out_name = op_desc.output('Out')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
for i in range(len(x_dims_mapping)):
dim_changed = compute_compatible_and_update_dim_mapping(
[x_dims_mapping, out_dims_mapping], [i, i])
if dim_changed:
changed = True
return changed
register_distributed_operator_impl(
"softmax", DistributedSoftmaxImpl("replicate_last_axis"))
| [
"[email protected]"
] | |
2043efaa3d05b534d4e7ff4a022e8189801833e3 | 52107dcbcb4bf45ae45eb6907c652ca96a05de17 | /MxOnline/extra_apps/xadmin/widgets.py | 801e4c5e2715fe8c6873420030ebc7d6cc9bfc4a | [] | no_license | MH-Blog/Python | 2f4fe4c55adc8766be3e0ffead6debe58d0643d2 | 7662f893120d5ffe417f37801b0135ac9fba3742 | refs/heads/master | 2022-12-08T12:14:35.559723 | 2020-01-19T07:34:14 | 2020-01-19T07:34:14 | 164,229,745 | 17 | 5 | null | 2022-12-08T06:52:38 | 2019-01-05T16:01:18 | Python | UTF-8 | Python | false | false | 8,853 | py | """
Form Widget classes specific to the Django admin site.
"""
from __future__ import absolute_import
from itertools import chain
from django import forms
try:
from django.forms.widgets import ChoiceWidget as RadioChoiceInput
except:
from django.forms.widgets import RadioFieldRenderer, RadioChoiceInput
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape
from django.utils.translation import ugettext as _
import util
class AdminDateWidget(forms.DateInput):
@property
def media(self):
return util.vendor('datepicker.js', 'datepicker.css', 'xadmin.widget.datetime.js')
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'date-field form-control', 'size': '10'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminDateWidget, self).__init__(attrs=final_attrs, format=format)
def render(self, name, value, attrs=None, renderer=None):
input_html = super(AdminDateWidget, self).render(name, value, attrs, renderer)
return mark_safe('<div class="input-group date bootstrap-datepicker"><span class="input-group-addon"><i class="fa fa-calendar"></i></span>%s'
'<span class="input-group-btn"><button class="btn btn-default" type="button">%s</button></span></div>' % (input_html, _(u'Today')))
class AdminTimeWidget(forms.TimeInput):
@property
def media(self):
return util.vendor('datepicker.js', 'clockpicker.js', 'clockpicker.css', 'xadmin.widget.datetime.js')
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'time-field form-control', 'size': '8'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTimeWidget, self).__init__(attrs=final_attrs, format=format)
def render(self, name, value, attrs=None, renderer=None):
input_html = super(AdminTimeWidget, self).render(name, value, attrs, renderer)
return mark_safe('<div class="input-group time bootstrap-clockpicker"><span class="input-group-addon"><i class="fa fa-clock-o">'
'</i></span>%s<span class="input-group-btn"><button class="btn btn-default" type="button">%s</button></span></div>' % (input_html, _(u'Now')))
class AdminSelectWidget(forms.Select):
@property
def media(self):
return util.vendor('select.js', 'select.css', 'xadmin.widget.select.js')
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def render(self, name, value, attrs=None, renderer=None):
input_html = [ht for ht in super(AdminSplitDateTime, self).render(name, value, attrs, renderer).replace('><input', '>\n<input').split('\n') if ht != '']
# return input_html
return mark_safe('<div class="datetime clearfix"><div class="input-group date bootstrap-datepicker"><span class="input-group-addon"><i class="fa fa-calendar"></i></span>%s'
'<span class="input-group-btn"><button class="btn btn-default" type="button">%s</button></span></div>'
'<div class="input-group time bootstrap-clockpicker"><span class="input-group-addon"><i class="fa fa-clock-o">'
'</i></span>%s<span class="input-group-btn"><button class="btn btn-default" type="button">%s</button></span></div></div>' % (input_html[0], _(u'Today'), input_html[1], _(u'Now')))
def format_output(self, rendered_widgets):
return mark_safe(u'<div class="datetime clearfix">%s%s</div>' %
(rendered_widgets[0], rendered_widgets[1]))
class AdminRadioInput(RadioChoiceInput):
def render(self, name=None, value=None, attrs=None, choices=()):
name = name or self.name
value = value or self.value
attrs = attrs or self.attrs
attrs['class'] = attrs.get('class', '').replace('form-control', '')
if 'id' in self.attrs:
label_for = ' for="%s_%s"' % (self.attrs['id'], self.index)
else:
label_for = ''
choice_label = conditional_escape(force_text(self.choice_label))
if attrs.get('inline', False):
return mark_safe(u'<label%s class="radio-inline">%s %s</label>' % (label_for, self.tag(), choice_label))
else:
return mark_safe(u'<div class="radio"><label%s>%s %s</label></div>' % (label_for, self.tag(), choice_label))
class AdminRadioFieldRenderer(forms.RadioSelect):
def __iter__(self):
for i, choice in enumerate(self.choices):
yield AdminRadioInput(self.name, self.value, self.attrs.copy(), choice, i)
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propogate
return AdminRadioInput(self.name, self.value, self.attrs.copy(), choice, idx)
def render(self):
return mark_safe(u'\n'.join([force_text(w) for w in self]))
class AdminRadioSelect(forms.RadioSelect):
renderer = AdminRadioFieldRenderer
class AdminCheckboxSelect(forms.CheckboxSelectMultiple):
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = []
has_id = attrs and 'id' in attrs
final_attrs = self.build_attrs(attrs, extra_attrs={'name': name})
output = []
# Normalize to strings
str_values = set([force_text(v) for v in value])
for i, (option_value, option_label) in enumerate(chain(self.choices, choices)):
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if has_id:
final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i))
label_for = u' for="%s"' % final_attrs['id']
else:
label_for = ''
cb = forms.CheckboxInput(
final_attrs, check_test=lambda value: value in str_values)
option_value = force_text(option_value)
rendered_cb = cb.render(name, option_value)
option_label = conditional_escape(force_text(option_label))
if final_attrs.get('inline', False):
output.append(u'<label%s class="checkbox-inline">%s %s</label>' % (label_for, rendered_cb, option_label))
else:
output.append(u'<div class="checkbox"><label%s>%s %s</label></div>' % (label_for, rendered_cb, option_label))
return mark_safe(u'\n'.join(output))
class AdminSelectMultiple(forms.SelectMultiple):
def __init__(self, attrs=None):
final_attrs = {'class': 'select-multi'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminSelectMultiple, self).__init__(attrs=final_attrs)
class AdminFileWidget(forms.ClearableFileInput):
template_with_initial = (u'<p class="file-upload">%s</p>'
% forms.ClearableFileInput.initial_text)
template_with_clear = (u'<span class="clearable-file-input">%s</span>'
% forms.ClearableFileInput.clear_checkbox_label)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'textarea-field'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'text-field'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'url-field'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
class AdminIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'int-field'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'sep-int-field'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminCommaSeparatedIntegerFieldWidget,
self).__init__(attrs=final_attrs)
| [
"[email protected]"
] | |
ff57f7b9e615dea41c06efec99b6c19479bce080 | 14a04c0f312f2e8bc25a528f11afc350a878dfe9 | /MembraneDiffusion/2P_DiffusionModel-units-micron-min.py | 19661251a68a9379179396c4c6f4f64cb0b56492 | [] | no_license | anujchaudhri/python-examples | f4862016feafdbddf952777184f88b78813cfc9e | fa026bb777487e084e632ed8f6800123cfce7447 | refs/heads/master | 2020-09-25T06:58:34.789573 | 2019-12-04T19:48:49 | 2019-12-04T19:48:49 | 225,943,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,683 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 3 11:14:04 2019
@author: anujchaudhri
"""
import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
Vo = 5.24e5 # microm^3, initial cell volume Vc
Vso = 0.0 # microm^3, initial solute volume in cell Vs
vbf = 0.25
Vb = vbf*Vo
# Vc = Vw + Vs + Vb, total volume of cell at time t
# Vo = Vwo + Vso + Vbo, total volume of cell at time 0
# Vbo = Vb, since cell solids volume does not change
Vwo = Vo - (Vso + Vb)# microm^3, initial water volume in cell Vw
rhoWater = 997.0e-18 # Kg/microm^3
Vsbar = 0.071e15 # microm^3/mol
Mino = 0.3*rhoWater # osmoles/Kg * Kg/m^3
Lp = 1.0 # microm/(min*atm)
A = 3.14e4 # microm^2
R = 8.2057338e13 # microm^3*atm/(K*mol)
T = 293.0 # K
Mes = 2.0*rhoWater # osmoles/Kg * Kg/m^3
Men = 0.3*rhoWater # osmoles/Kg * Kg/m^3
Psarray = np.array([0.0e4,0.001e4,0.01e4,0.03e4]) # microm/min
# function that returns dz/dt = [dx/dt dy/dt]
def model(t,z,Ps):
dxdt = ((((Lp*A*R*T)/z[0])*((z[1]/Vsbar)+(Mino*Vwo)))
- ((Lp*A*R*T)*(Mes+Men)))
dydt = -(Ps*A*z[1]/z[0]) + (Ps*A*Vsbar*Mes)
dzdt = [dxdt,dydt]
return dzdt
def jacobian(t,y,Ps):
J11 = -(Lp*A*R*T/(y[0]*y[0]))*((y[1]/Vsbar)+(Mino*Vwo))
J12 = (Lp*A*R*T)/(y[0]*Vsbar)
J21 = (y[1]*Ps*A)/(y[0]*y[0])
J22 = -(Ps*A)/y[0]
Jacob = np.array([[J11,J12],[J21,J22]])
return Jacob
# initial conditions
z0 = [Vwo,Vso]
# time points in mins
tf = 4
N = 50
t = np.linspace(0,tf,N,endpoint=True,dtype=np.double)
#print(t)
Vn = np.zeros(shape=(t.size,Psarray.size))
# solve ODE
for i in range(Psarray.size):
sol = (solve_ivp(fun=lambda t, z: model(t, z, Psarray[i]),t_span=[0,tf],y0=z0,
method='BDF',t_eval=t,vectorized=True,
jac=lambda t, y: jacobian(t, y, Psarray[i])))
# Note that z[:,0] is solution at all time points of dxdt i.e. Vw
# z[:,1] is solution at all time points of dydt i.e. Vs
# Vc = Vw+Vs+Vb and Vn = (Vw+Vs+Vb)/Vo or Vn = (z[:,0]+z[:,1]+Vb)/Vo
#print("status: explanations")
#print("-1: Integration step failed.")
#print("0: The solver successfully reached the end of tspan.")
#print("1: A termination event occurred. ")
#print("status: ", sol.status)
##print(sol.y)
#print("Number of function evaluations: ", sol.nfev)
#print("Number of Jacobian evaluations: ", sol.njev)
Vc = np.add(sol.y[0,:],sol.y[1,:])
Vbarray = np.full_like(Vc,Vb,dtype=np.double)
Vc = np.add(Vc,Vbarray)
Vn[:,i] = Vc/Vo
print(Vn)
#plt.axis([-0.2,tf,0.0,1.2])
#plt.plot(t,Vn[:,0],'r-',t,Vn[:,1],'g-',t,Vn[:,2],'b-',t,Vn[:,3],'k-')
#plt.show()
| [
"[email protected]"
] | |
6b96866b678951b2a15ac2f00edfecee8e197416 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_populists.py | 20dbebab5468cb5697405f5e98e33b4c882d36c8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py |
from xai.brain.wordbase.adjectives._populist import _POPULIST
#calss header
class _POPULISTS(_POPULIST, ):
def __init__(self,):
_POPULIST.__init__(self)
self.name = "POPULISTS"
self.specie = 'adjectives'
self.basic = "populist"
self.jsondata = {}
| [
"[email protected]"
] | |
d7af4c9fc83f3a065982703e42e47fd3721b0412 | eb9b8ed6d24b21a9fd6d768d6c0baab2ce88c2fd | /DQUANT/dquant/urls.py | 44738500ada52a9b2789019e8e8019a1c3509175 | [] | no_license | SoonMinKwun/QUANT | 40df4b354abd3bb6d97fa770c1bd1e1ede188881 | 5fe7e67840455d59c86447a29a792080b78f98fa | refs/heads/master | 2022-12-05T16:11:07.822076 | 2020-08-28T07:41:39 | 2020-08-28T07:41:39 | 289,828,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 852 | py | """DQUANT URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path , include
urlpatterns = [
path('admin/', admin.site.urls),
path('blog/' ,include('QuantApp.urls')),
path('magazine/' ,include('QuantApp.urls')),
]
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.