blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2a12f16bca3314acc2311443f9ac38090380edb1 | 250a1f14955de0e23766a7b30a7296a187a69cf5 | /seguros.py | 9a8f6ab922b7d10b5277ba5ca66dbdc248889857 | [] | no_license | dxviidmg/seguros | 98d2a6716be61e867a5670760d496643486c6171 | 12cfbecc0be141a03d24f997c9a97d7574f3e6a5 | refs/heads/master | 2021-01-12T17:58:30.522598 | 2016-10-19T03:57:12 | 2016-10-19T03:57:12 | 71,317,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,253 | py | #!/usr/bin/env python3
def preguntas():
pregunta = input("")
return pregunta
def calculo(plan, alcohol, lentes, enfermedad, edad):
if (plan == "A" or plan == "B") and edad >= 0:
if plan == "A":
cuotaBase = 950
else:
cuotaBase = 1200
if alcohol == "n" and lentes == "n" and enfermedad == "n" and edad < 40:
cuotaTotal = cuotaBase * 1.1
else:
if alcohol == "s":
cAlcohol = cuotaBase * 0.1
else:
cAlcohol = 0
if lentes == "s":
cLentes = cuotaBase * 0.05
else:
cLentes = 0
if enfermedad == "s":
cEnfermedad = cuotaBase * 0.05
else:
cEnfermedad = 0
if edad > 40:
cEdad = cuotaBase * 0.2
else:
cEdad = 0
cuotaTotal = cuotaBase + cAlcohol + cLentes + cEnfermedad + cEdad
print(cuotaTotal)
else:
print("Error al insertar datos")
def main():
print("Plan: A. Cuota base: 950")
print("Plan: B. Cuota base: 1200")
print("¿Indique el plan de seguro que desea?")
plan = preguntas()
print("¿Ingiere alcohol? s/n")
alcohol = preguntas()
print("¿Usa lentes? s/n")
lentes = preguntas()
print("¿Padece de alguna enfermedad degenerativa? s/n")
enfermedad = preguntas()
print("Edad: ")
edad = int(preguntas())
calculo(plan, alcohol, lentes, enfermedad, edad)
main() | [
"david@david-Lenovo-U310"
] | david@david-Lenovo-U310 |
d60a814c28143be0a5c7d1e0fc2c8e5f1ed37443 | 4bc87e8f3801ff8c14e3abdbc215744e29cf075e | /notebooks/test.py | 86c78557d9fa7678e0c6c08daf520b56bac05cc9 | [
"MIT"
] | permissive | brotatos/growth | 9718974ee0ea491298874985c3c75ca9087ccb8f | 27856415c769b2852d931a092d5a6846abe5ad20 | refs/heads/master | 2021-01-11T16:57:01.529713 | 2017-02-28T00:25:11 | 2017-02-28T00:25:11 | 79,701,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,177 | py | import cv2
import matplotlib.pyplot as plt
import numpy as np
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def read_image(filename):
img = cv2.imread(filename)
b, g, r = cv2.split(img)
return cv2.merge([r, g, b])
def sg(img, title=''):
pass
plt.figure()
if title:
plt.title(title)
plt.imshow(img, cmap='gray', interpolation='none')
rgb_img_orig = read_image('Lid UP cropped plus.png')
#rgb_img_orig = read_image('cropped 2.png')
rgb_img = rgb_img_orig.copy()
#canny = cv2.Canny(rgb_img, 55, 255)
#im2, contours, hierarchy = cv2.findContours(canny,
# cv2.RETR_TREE,
# cv2.CHAIN_APPROX_SIMPLE)
#cv2.drawContours(rgb_img_orig, contours, -1, (0, 255, 0), 3)
#plt.figure()
#plt.imshow(rgb_img_orig)
#print contours[0]
#plt.figure()
#plt.imshow(rgb_img_orig)
rgb_img = cv2.medianBlur(rgb_img, 9)
plt.figure()
plt.imshow(rgb_img)
# grayscale
gimg = cv2.cvtColor(rgb_img, cv2.COLOR_RGB2GRAY)
# binary thresh
res, thresh = cv2.threshold(gimg, 165, 255, cv2.THRESH_BINARY)
sg(thresh, title='thresh')
# morphology - removing super small holes
kernel = np.ones((3, 3), np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2)
sg(opening, title='morph')
# sure background
sure_bg = cv2.dilate(opening, kernel, iterations=3)
sg(sure_bg, title='dilate')
#finding sure fg area
dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
sg(dist_transform, title='distance transform')
## sure_fg threshold
ret, sure_fg = cv2.threshold(dist_transform,
0.1 * dist_transform.max(),
#0.7 * dist_transform.max(),
255,
cv2.THRESH_BINARY)
#print dist_transform.max()
sg(sure_fg, title='threshold')
# Find unknown region
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg, sure_fg)
# Marker labelling
#ret, markers = cv2.connectedComponents(sure_fg)
output = cv2.connectedComponentsWithStats(sure_fg)
num_labels, markers, stats, centroids = output
# get areas of contours
values = [0] * num_labels
for row in range(markers.shape[0]):
for col in range(markers.shape[1]):
values[markers[row, col]] += 1
del values[0]
print values
plt.figure()
#plt.hist(range(len(values)), values)
plt.hist(values)
print "mean", np.mean(values)
print "median", np.median(values)
print "max", np.max(values)
print "min", np.min(values)
print "std dev", np.std(values)
#print np.index(np.min(values))
# Add one to all labels so sure background is not 0, but 1
markers = markers + 1
markers[unknown==255] = 0
## watershed
markers = cv2.watershed(rgb_img_orig, markers)
rgb_img_orig[markers == -1] = [0, 0, 255]
plt.figure()
plt.imshow(rgb_img_orig, interpolation='none')
plt.show()
| [
"[email protected]"
] | |
12417d01dc0f8a4f9aee523f9ec745a1d1940548 | 77c965bde6202614ba4a45649efde432b67b5264 | /api/utils.py | 7f0f213da8768f09de4a9bccea2689af3d7f77d7 | [] | no_license | Munkhtur/Image-Colorizer | 1d2eb1ce5841bdc8fa1c3adf26e4f24bebbf10ee | 39b4e6913ab1a8eadd49ce5710de9a6b8482864b | refs/heads/master | 2023-07-30T18:53:18.889392 | 2021-10-04T08:25:12 | 2021-10-04T08:25:12 | 413,312,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 846 | py | import secrets
import os
from PIL import Image
from flask import url_for, current_app
import time
now = time.time()
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(
current_app.root_path, "../public/uploads", picture_fn)
# output_size = (125, 125)
i = Image.open(form_picture)
# i.thumbnail(output_size)
i.save(picture_path)
return picture_fn
def delete_pictures():
results_folder = "./frontend/build/results"
results = [
os.path.join(results_folder, filename)
for filename in os.listdir(results_folder)
]
if results:
for file in results:
if (now - os.stat(file).st_mtime) > 450:
os.remove(file)
| [
"[email protected]"
] | |
2e294f451957743c51d7c14b22be1f2f924f3dd9 | de42126083dcbdbd431a9827e3e5c8987720d4c7 | /bin/django-admin.py | 4cd14231c8cd2768c0222df94c3f9573f6104398 | [] | no_license | MicztheMike/redis-servertest | 73222230ee96ad66e99da71dc224563c5b4f2c7b | c8e5f7b3f3cebd8ce5c8fe87108fbd2bb653813f | refs/heads/master | 2020-03-23T08:52:38.063267 | 2018-07-17T23:46:09 | 2018-07-17T23:46:09 | 141,353,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | #!/home/noc/miketestimg/mike/bin/python3
# EASY-INSTALL-DEV-SCRIPT: 'Django==2.2.dev20180716182847','django-admin.py'
__requires__ = 'Django==2.2.dev20180716182847'
__import__('pkg_resources').require('Django==2.2.dev20180716182847')
__file__ = '/home/noc/miketestimg/mike/bin/django/django/bin/django-admin.py'
with open(__file__) as f:
exec(compile(f.read(), __file__, 'exec'))
| [
"[email protected]"
] | |
b4bd0d04e9435209f15707975d7bcdfc818e3abf | bcabc33ab2e052485f9609143dfdb8eaa847f47f | /Exercise33.py | de08ecd75524608b4c3d4726198f4765529ae443 | [] | no_license | ErenBtrk/Python-Exercises-2 | 3b82a92d33fce661cd003a9096cdfbf29e8c9f27 | 42f40fc26fcef7c83e19313aea1fab4396c48f6a | refs/heads/master | 2023-04-27T08:56:07.879353 | 2021-05-15T19:05:46 | 2021-05-15T19:05:46 | 367,335,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | '''
33. Write a Python program to sum of three given integers. However, if two values are equal sum will be zero.
'''
my_list = []
isEqual = False
sum = 0
i = 0
while i < 3:
number = int(input("Please enter a number : "))
sum += number
if(number in my_list):
isEqual = True
my_list.append(number)
i += 1
if(isEqual):
sum = 0
print(sum)
| [
"[email protected]"
] | |
d0d38f9ad8bfa595c858d3c508118ae75370fdd9 | 9a7e9c96ff32362064d2b3fffe13a770c054cc4a | /transpositionDecrypt.py | 881ca23bad452039a6837136d83b47ca3e73980b | [] | no_license | jlgrosch/PythonTraining | fcebe52688215b4e982ea83a052db9a6c8be9f6d | ae1e212a63f3907b24cf1e5788c9a10dafb9535c | refs/heads/master | 2020-03-28T13:27:48.866095 | 2018-11-12T00:28:40 | 2018-11-12T00:28:40 | 148,397,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,463 | py | import math, pyperclip
def main():
myMessage = 'Cenoonommstmme oo snnio. s s c'
myKey = 8
plaintext = decryptMessage(myKey, myMessage)
# Print the encrypted string in ciphertext to the screen, with
# a | after it in case there are spaces at the end of the message
print(plaintext + '|')
pyperclip.copy(plaintext)
def decryptMessage(key, message):
# The number of columns in transposition grid
numOfColumns = int(math.ceil(len(message) / float(key)))
# The number of rows in grid
numOfRows = key
# The number of empty boxes in last golumn of the grid
numOfShadedBoxes = (numOfColumns * numOfRows) - len(message)
# Each string in plaintext represents a column in the grid
plaintext = [''] * numOfColumns
# The column and row variables point to where in the grid
# the next character in the encrypted message will go
column = 0
row = 0
for symbol in message:
plaintext[column] += symbol
column += 1 # point to the next column
# If there are no more columns OR we're at a shaded box
# go back to the first column and the next row
if (column == numOfColumns) or (column == numOfColumns -1 and row >= numOfRows - numOfShadedBoxes):
column = 0
row += 1
return ''.join(plaintext)
# If transpositionDecrypt.py is run instead of imported as a module
# call the main() function
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
006b607bc7b2de98ec06c9d44f27e98caf7b211b | 4a4f1eb77d0247d3f68f41f48bd42485787c61a7 | /100q/question_041/exercise041.py | 4a9c8a76b3c126479164cc2c333b16f58bf116d2 | [] | no_license | crawsome/Python-programming-exercises | 1a14e35749dfe3049d88535638d88d3a4c06ecaa | 11f45ad7f5beb79382903ad568b96c168793fb6c | refs/heads/master | 2020-05-23T10:49:59.987977 | 2019-08-18T21:24:21 | 2019-08-18T21:24:21 | 186,725,479 | 0 | 0 | null | 2019-05-15T01:16:30 | 2019-05-15T01:16:30 | null | UTF-8 | Python | false | false | 150 | py | """2.10
Question:
Define a function which can generate and print a tuple where the value are square of numbers between 1 and 20 (both included).
""" | [
"[email protected]"
] | |
8f9b9d0b24a4d0cdabeeed1209d50d12b85a9d00 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/BAS-TCP-MIB.py | c800063426c18db4269a44ab81ea8c50dacdd298 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 17,633 | py | #
# PySNMP MIB module BAS-TCP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BAS-TCP-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:34:18 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint")
BasSlotId, basAliasTcp, BasChassisId, BasInterfaceId, BasLogicalPortId = mibBuilder.importSymbols("BAS-MIB", "BasSlotId", "basAliasTcp", "BasChassisId", "BasInterfaceId", "BasLogicalPortId")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ObjectIdentity, Integer32, IpAddress, NotificationType, Bits, Counter64, Unsigned32, ModuleIdentity, TimeTicks, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, Gauge32, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Integer32", "IpAddress", "NotificationType", "Bits", "Counter64", "Unsigned32", "ModuleIdentity", "TimeTicks", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "Gauge32", "MibIdentifier")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
basAliasTcpMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1))
if mibBuilder.loadTexts: basAliasTcpMib.setLastUpdated('9810071415Z')
if mibBuilder.loadTexts: basAliasTcpMib.setOrganization('Broadband Access Systems')
if mibBuilder.loadTexts: basAliasTcpMib.setContactInfo(' Tech Support Broadband Access Systems 201 Forest Street Marlboro, MA 01752 U.S.A. 508-485-8200 [email protected]')
if mibBuilder.loadTexts: basAliasTcpMib.setDescription('This module defines the MIB objects for a Broadband Access System IP Forwarding component.')
basTcpObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1))
basTcpTable = MibTable((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 1), )
if mibBuilder.loadTexts: basTcpTable.setStatus('current')
if mibBuilder.loadTexts: basTcpTable.setDescription('A list of TCP stats')
basTcpEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 1, 1), ).setIndexNames((0, "BAS-TCP-MIB", "basTcpChassis"), (0, "BAS-TCP-MIB", "basTcpSlot"), (0, "BAS-TCP-MIB", "basTcpIf"), (0, "BAS-TCP-MIB", "basTcpLPort"))
if mibBuilder.loadTexts: basTcpEntry.setStatus('current')
if mibBuilder.loadTexts: basTcpEntry.setDescription('Objects for the TCP mib.')
basTcpRtoAlgorithm = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("constant", 2), ("rsre", 3), ("vanj", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: basTcpRtoAlgorithm.setStatus('current')
if mibBuilder.loadTexts: basTcpRtoAlgorithm.setDescription('The algorithm used to determine the timeout value used for retransmitting unacknowledged octets.')
basTcpRtoMin = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 1, 1, 2), Integer32()).setUnits('milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: basTcpRtoMin.setStatus('current')
if mibBuilder.loadTexts: basTcpRtoMin.setDescription('The minimum value permitted by a TCP implementation for the retransmission timeout, measured in milliseconds. More refined semantics for objects of this type depend upon the algorithm used to determine the retransmission timeout. In particular, when the timeout algorithm is rsre(3), an object of this type has the semantics of the LBOUND quantity described in RFC 793.')
basTcpRtoMax = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 1, 1, 3), Integer32()).setUnits('milliseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: basTcpRtoMax.setStatus('current')
if mibBuilder.loadTexts: basTcpRtoMax.setDescription('The maximum value permitted by a TCP implementation for the retransmission timeout, measured in milliseconds. More refined semantics for objects of this type depend upon the algorithm used to determine the retransmission timeout. In particular, when the timeout algorithm is rsre(3), an object of this type has the semantics of the UBOUND quantity described in RFC 793.')
basTcpMaxConn = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: basTcpMaxConn.setStatus('current')
if mibBuilder.loadTexts: basTcpMaxConn.setDescription('The limit on the total number of TCP connections the entity can support. In entities where the maximum number of connections is dynamic, this object should contain the value -1.')
basTcpActiveOpens = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: basTcpActiveOpens.setStatus('current')
if mibBuilder.loadTexts: basTcpActiveOpens.setDescription('The number of times TCP connections have made a direct transition to the SYN-SENT state from the CLOSED state.')
basTcpPassiveOpens = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: basTcpPassiveOpens.setStatus('current')
if mibBuilder.loadTexts: basTcpPassiveOpens.setDescription('The number of times TCP connections have made a direct transition to the SYN-RCVD state from the LISTEN state.')
basTcpAttemptFails = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: basTcpAttemptFails.setStatus('current')
if mibBuilder.loadTexts: basTcpAttemptFails.setDescription('The number of times TCP connections have made a direct transition to the CLOSED state from either the SYN-SENT state or the SYN-RCVD state, plus the number of times TCP connections have made a direct transition to the LISTEN state from the SYN-RCVD state.')
basTcpEstabResets = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: basTcpEstabResets.setStatus('current')
if mibBuilder.loadTexts: basTcpEstabResets.setDescription('The number of times TCP connections have made a direct transition to the CLOSED state from either the ESTABLISHED state or the CLOSE-WAIT state.')
basTcpCurrEstab = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 1, 1, 9), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: basTcpCurrEstab.setStatus('current')
if mibBuilder.loadTexts: basTcpCurrEstab.setDescription('The number of TCP connections for which the current state is either ESTABLISHED or CLOSE- WAIT.')
basTcpInSegs = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: basTcpInSegs.setStatus('current')
if mibBuilder.loadTexts: basTcpInSegs.setDescription('The total number of segments received, including those received in error. This count includes segments received on currently established connections.')
basTcpOutSegs = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: basTcpOutSegs.setStatus('current')
if mibBuilder.loadTexts: basTcpOutSegs.setDescription('The total number of segments sent, including those on current connections but excluding those containing only retransmitted octets.')
basTcpRetransSegs = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: basTcpRetransSegs.setStatus('current')
if mibBuilder.loadTexts: basTcpRetransSegs.setDescription('The total number of segments retransmitted - that is, the number of TCP segments transmitted containing one or more previously transmitted octets.')
basTcpChassis = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 1, 1, 13), BasChassisId())
if mibBuilder.loadTexts: basTcpChassis.setStatus('current')
if mibBuilder.loadTexts: basTcpChassis.setDescription('The BAS Chassis ID of the Route Server.')
basTcpSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 1, 1, 14), BasSlotId())
if mibBuilder.loadTexts: basTcpSlot.setStatus('current')
if mibBuilder.loadTexts: basTcpSlot.setDescription('The BAS Slot ID of the Route Server.')
basTcpIf = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 1, 1, 15), BasInterfaceId())
if mibBuilder.loadTexts: basTcpIf.setStatus('current')
if mibBuilder.loadTexts: basTcpIf.setDescription('The BAS interface ID of the Route Server.')
basTcpLPort = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 1, 1, 16), BasLogicalPortId())
if mibBuilder.loadTexts: basTcpLPort.setStatus('current')
if mibBuilder.loadTexts: basTcpLPort.setDescription('The BAS logical port ID of the Route Server.')
basTcpConnTable = MibTable((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 2), )
if mibBuilder.loadTexts: basTcpConnTable.setStatus('current')
if mibBuilder.loadTexts: basTcpConnTable.setDescription('A table containing TCP connection-specific information.')
basTcpConnEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 2, 1), ).setIndexNames((0, "BAS-TCP-MIB", "basTcpConnChassis"), (0, "BAS-TCP-MIB", "basTcpConnSlot"), (0, "BAS-TCP-MIB", "basTcpConnIf"), (0, "BAS-TCP-MIB", "basTcpConnLPort"), (0, "BAS-TCP-MIB", "basTcpConnLocalAddress"), (0, "BAS-TCP-MIB", "basTcpConnLocalPort"), (0, "BAS-TCP-MIB", "basTcpConnRemAddress"), (0, "BAS-TCP-MIB", "basTcpConnRemPort"))
if mibBuilder.loadTexts: basTcpConnEntry.setStatus('current')
if mibBuilder.loadTexts: basTcpConnEntry.setDescription('A conceptual row of the tcpConnTable containing information about a particular current TCP connection. Each row of this table is transient, in that it ceases to exist when (or soon after) the connection makes the transition to the CLOSED state.')
basTcpConnState = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("closed", 1), ("listen", 2), ("synSent", 3), ("synReceived", 4), ("established", 5), ("finWait1", 6), ("finWait2", 7), ("closeWait", 8), ("lastAck", 9), ("closing", 10), ("timeWait", 11), ("deleteTCB", 12)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: basTcpConnState.setStatus('current')
if mibBuilder.loadTexts: basTcpConnState.setDescription("The state of this TCP connection. The only value which may be set by a management station is deleteTCB(12). Accordingly, it is appropriate for an agent to return a `badValue' response if a management station attempts to set this object to any other value. If a management station sets this object to the value deleteTCB(12), then this has the effect of deleting the TCB (as defined in RFC 793) of the corresponding connection on the managed node, resulting in immediate termination of the connection. As an implementation-specific option, a RST segment may be sent from the managed node to the other TCP endpoint (note however that RST segments are not sent reliably).")
basTcpConnLocalAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 2, 1, 2), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: basTcpConnLocalAddress.setStatus('current')
if mibBuilder.loadTexts: basTcpConnLocalAddress.setDescription('The local IP address for this TCP connection. In the case of a connection in the listen state which is willing to accept connections for any IP interface associated with the node, the value 0.0.0.0 is used.')
basTcpConnLocalPort = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: basTcpConnLocalPort.setStatus('current')
if mibBuilder.loadTexts: basTcpConnLocalPort.setDescription('The local port number for this TCP connection.')
basTcpConnRemAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 2, 1, 4), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: basTcpConnRemAddress.setStatus('current')
if mibBuilder.loadTexts: basTcpConnRemAddress.setDescription('The remote IP address for this TCP connection.')
basTcpConnRemPort = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: basTcpConnRemPort.setStatus('current')
if mibBuilder.loadTexts: basTcpConnRemPort.setDescription('The remote port number for this TCP connection.')
basTcpConnChassis = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 2, 1, 6), BasChassisId())
if mibBuilder.loadTexts: basTcpConnChassis.setStatus('current')
if mibBuilder.loadTexts: basTcpConnChassis.setDescription('The BAS Chassis ID of the Route Server.')
basTcpConnSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 2, 1, 7), BasSlotId())
if mibBuilder.loadTexts: basTcpConnSlot.setStatus('current')
if mibBuilder.loadTexts: basTcpConnSlot.setDescription('The BAS Slot ID of the Route Server.')
basTcpConnIf = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 2, 1, 8), BasInterfaceId())
if mibBuilder.loadTexts: basTcpConnIf.setStatus('current')
if mibBuilder.loadTexts: basTcpConnIf.setDescription('The BAS interface ID of the Route Server.')
basTcpConnLPort = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 2, 1, 9), BasLogicalPortId())
if mibBuilder.loadTexts: basTcpConnLPort.setStatus('current')
if mibBuilder.loadTexts: basTcpConnLPort.setDescription('The BAS logical port ID of the Route Server.')
basTcpStatsTable = MibTable((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 3), )
if mibBuilder.loadTexts: basTcpStatsTable.setStatus('current')
if mibBuilder.loadTexts: basTcpStatsTable.setDescription('A list of Route Server forwarding table distribution mechanism statistics.')
basTcpStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 3, 1), ).setIndexNames((0, "BAS-TCP-MIB", "basTcpStatsChassis"), (0, "BAS-TCP-MIB", "basTcpStatsSlot"), (0, "BAS-TCP-MIB", "basTcpStatsIf"), (0, "BAS-TCP-MIB", "basTcpStatsLPort"))
if mibBuilder.loadTexts: basTcpStatsEntry.setStatus('current')
if mibBuilder.loadTexts: basTcpStatsEntry.setDescription('Objects for the TCP mib.')
basTcpInErrs = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 3, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: basTcpInErrs.setStatus('current')
if mibBuilder.loadTexts: basTcpInErrs.setDescription('The total number of segments received in error (e.g., bad TCP checksums).')
basTcpOutRsts = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 3, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: basTcpOutRsts.setStatus('current')
if mibBuilder.loadTexts: basTcpOutRsts.setDescription('The number of TCP segments sent containing the RST flag.')
basTcpStatsChassis = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 3, 1, 3), BasChassisId())
if mibBuilder.loadTexts: basTcpStatsChassis.setStatus('current')
if mibBuilder.loadTexts: basTcpStatsChassis.setDescription('The BAS Chassis ID of the Route Server.')
basTcpStatsSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 3, 1, 4), BasSlotId())
if mibBuilder.loadTexts: basTcpStatsSlot.setStatus('current')
if mibBuilder.loadTexts: basTcpStatsSlot.setDescription('The BAS Slot ID of the Route Server.')
basTcpStatsIf = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 3, 1, 5), BasInterfaceId())
if mibBuilder.loadTexts: basTcpStatsIf.setStatus('current')
if mibBuilder.loadTexts: basTcpStatsIf.setDescription('The BAS interface ID of the Route Server.')
basTcpStatsLPort = MibTableColumn((1, 3, 6, 1, 4, 1, 3493, 2, 7, 3, 1, 1, 3, 1, 6), BasLogicalPortId())
if mibBuilder.loadTexts: basTcpStatsLPort.setStatus('current')
if mibBuilder.loadTexts: basTcpStatsLPort.setDescription('The BAS logical port ID of the Route Server.')
mibBuilder.exportSymbols("BAS-TCP-MIB", basTcpStatsTable=basTcpStatsTable, basTcpChassis=basTcpChassis, basTcpConnLocalAddress=basTcpConnLocalAddress, basAliasTcpMib=basAliasTcpMib, basTcpObjects=basTcpObjects, basTcpEstabResets=basTcpEstabResets, basTcpConnRemAddress=basTcpConnRemAddress, basTcpSlot=basTcpSlot, basTcpConnRemPort=basTcpConnRemPort, basTcpConnChassis=basTcpConnChassis, basTcpConnEntry=basTcpConnEntry, basTcpRetransSegs=basTcpRetransSegs, basTcpConnTable=basTcpConnTable, basTcpConnLPort=basTcpConnLPort, basTcpInErrs=basTcpInErrs, basTcpRtoMax=basTcpRtoMax, basTcpConnState=basTcpConnState, basTcpCurrEstab=basTcpCurrEstab, basTcpEntry=basTcpEntry, basTcpTable=basTcpTable, basTcpAttemptFails=basTcpAttemptFails, basTcpIf=basTcpIf, basTcpActiveOpens=basTcpActiveOpens, basTcpStatsLPort=basTcpStatsLPort, basTcpConnIf=basTcpConnIf, basTcpOutSegs=basTcpOutSegs, PYSNMP_MODULE_ID=basAliasTcpMib, basTcpInSegs=basTcpInSegs, basTcpStatsSlot=basTcpStatsSlot, basTcpConnLocalPort=basTcpConnLocalPort, basTcpRtoAlgorithm=basTcpRtoAlgorithm, basTcpRtoMin=basTcpRtoMin, basTcpPassiveOpens=basTcpPassiveOpens, basTcpStatsChassis=basTcpStatsChassis, basTcpMaxConn=basTcpMaxConn, basTcpStatsEntry=basTcpStatsEntry, basTcpLPort=basTcpLPort, basTcpStatsIf=basTcpStatsIf, basTcpConnSlot=basTcpConnSlot, basTcpOutRsts=basTcpOutRsts)
| [
"[email protected]"
] | |
a03022e69efb6d0ca7a4bb1f9420747bb30f37d9 | 9c3bb98eb9d0a587a302bdfa811f7b5c6a5a0a37 | /Week 1/id_050/LeetCode_70_050.py | 07a2820dbb45ced18ddecff9df6516a9993540fd | [] | permissive | chenlei65368/algorithm004-05 | 842db9d9017556656aef0eeb6611eec3991f6c90 | 60e9ef1051a1d0441ab1c5484a51ab77a306bf5b | refs/heads/master | 2020-08-07T23:09:30.548805 | 2019-12-17T10:48:22 | 2019-12-17T10:48:22 | 213,617,423 | 1 | 0 | Apache-2.0 | 2019-12-17T10:48:24 | 2019-10-08T10:50:41 | Java | UTF-8 | Python | false | false | 284 | py | class Solution(object):
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
if n <= 2: return n
f1, f2 = 1, 2
for i in range(3, n+1):
f3 = f1 + f2
f1 = f2
f2 = f3
return f3
| [
"[email protected]"
] | |
453292745d5696c32db20ed256af1e42152f0252 | 0bd7685c419f54fbcbb36129ee61a1ad5a79c44a | /NewsWebsite/urls.py | 5726b9035718cd68f13c36f4a0976309f0667543 | [] | no_license | AS-AmanSinghal/NewsWebsite | d3c3aeeb08248d51ade5ad1c5003e1cca73db535 | 42db57a64cfeb5b810d4ff0da113522540f60b94 | refs/heads/master | 2023-04-30T23:51:46.511751 | 2021-05-22T10:34:51 | 2021-05-22T10:34:51 | 367,278,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,314 | py | """NewsWebsite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from NewsWebsite.settings import STATIC_ROOT, MEDIA_ROOT
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('newsApp.urls')),
url(r'', include('LatestNews.urls')),
url(r'', include('category.urls')),
url(r'', include('subcategory.urls')),
url(r'', include('contactUs.urls')),
url(r'', include('manageUsers.urls'))
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=MEDIA_ROOT)
| [
"[email protected]"
] | |
0bdf2c06f437b597d430d1218c0994b998651b0f | 8351de4523dccf6f7dc515e0d9e6f8aef3465d38 | /arsmagica/arsmagica/sqllite_settings.py | 66fcf3df085064c9c6f23323a9f04ff4502db82b | [] | no_license | DaveTheTroll/ArsMagica | 1db5089bfa54dd472af1ee47dfa7d6fb4328e785 | b6323cf94f6dde15ba97b770d778fe6c58bbb058 | refs/heads/master | 2022-11-16T01:45:56.303057 | 2020-07-15T16:59:59 | 2020-07-15T16:59:59 | 271,326,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,135 | py | """
Django settings for arsmagica project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-hg9o9p^h=s_=^r7tvfus!%s@&tqkh!tg!wen2*k3sn57_120$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'character.apps.CharacterConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'arsmagica.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'arsmagica.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
e63438f8d0fb4edfc07e2ca115759d3f1e683380 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_267/ch78_2020_06_21_00_43_30_170269.py | ccb49ce8e0e8810c449896d168656b21ea4add01 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | def calcula_tempo(dicio):
nome_tempo = {}
for e,a in dicio.items():
t = (200/a)**(1/2)
nome_tempo[e] = t
return nome_tempo
nome_acel = {}
nome = input("Digite o nome: ")
while nome != "sair":
aceleracao = input("Digite a aceleração: ")
nome_acel[nome] = aceleracao
nome = input("Digite o nome: ")
menor_tempo = float('inf')
chama_funcao = calcula_tempo(nome_acel)
for tempo in chama_funcao:
if tempo < menor_tempo:
menor_tempo = tempo
| [
"[email protected]"
] | |
5f7446f489a68650bf5413ecc2062f9a325e93f6 | 2f5c38b8c7c6f2285f44b72c686875ada094f89e | /webim_oauth_vk/main/models.py | 17a73cb676f9512201f6bdd88ea12fbc9fa0858b | [] | no_license | Pegorino82/VK_OAuth | 956166de516b17ecf19e24e21ef1d29406f5052c | 8c92a501147a46af8a2f89a453dcc8ff9c51c089 | refs/heads/master | 2020-04-09T19:45:14.550287 | 2018-12-05T17:21:56 | 2018-12-05T17:21:56 | 160,552,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | from django.db import models
from django.contrib.auth.models import User
class Friend(models.Model):
friend_id = models.IntegerField()
first_name = models.CharField(
max_length=150,
null=True
)
last_name = models.CharField(
max_length=150,
null=True
)
nickname = models.CharField(
max_length=150,
null=True
)
photo_50 = models.CharField(
max_length=500,
null=True
)
class Friends(models.Model):
user = models.ForeignKey(
User,
on_delete=models.CASCADE
)
friend = models.TextField()
| [
"[email protected]"
] | |
55f1675ef1c6713370c088eaa413f7f0540bc987 | 08b0fa07fb13392784dd1948d4b8da5fff96a650 | /DisplacedJetMCProd/test/DisplacedJetProd/makeCfgFiles.py | 562be028a33ea4f64fbdc874164ebc64ce062b3b | [] | no_license | Cristian42/UsercodeCMS | 5b59078c4914b8156a44afbb086137eb8e85be30 | dad91a5760bb35711b61c358aa360df09fa4e3c8 | refs/heads/master | 2021-01-17T23:01:33.838580 | 2013-09-16T19:44:26 | 2013-09-16T19:44:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,474 | py | import os
os.mkdir('cfgFiles')
for f in os.listdir('../../python'):
if f.find("HTo2Long")==-1 : continue
if f.endswith('pyc') : continue
command = "cmsDriver.py MyAnalysis/DisplacedJetMCProd/python/"+f+" \
--step GEN:ProductionFilterSequence,SIM,DIGI,L1,DIGI2RAW,HLT:GRun\
--beamspot Realistic7TeVCollision\
--fileout GEN-SIM-RAWDEBUG.root\
--conditions START50_V10::All\
--pileup E7TeV_Ave23_50ns\
--datamix NODATAMIXER\
--eventcontent RAWDEBUG\
--datatier GEN-SIM-RAWDEBUG\
-n 500\
--no_exec "
os.system(command)
os.system('mv HTo2* cfgFiles/')
os.mkdir('crab/crabjobs')
for f in os.listdir('cfgFiles/'):
if f.endswith('pyc') : continue
file = open('cfgFiles/'+f,)
tmp = open('tmp','write')
for line in file:
if line.strip == "" : continue
if line.find('input = cms.untracked.int32(500)')>-1:
line = line.replace('input','output')
if line.find("6, 11, 13, 15),")>-1:
line = line.replace(', 11, 13, 15','')
if line.find("6000111, 6000112, 6000113")>-1:
line = line.replace(', 6000113','')
tmp.write(line)
os.system('mv tmp cfgFiles/'+f)
name=f[f.find('MH'):f.find('pythia')-1]
os.mkdir('crab/crabjobs/'+name)
template = open('crab.cfg')
tmp = open('tmp','write')
for line in template:
if line.find('pset=') > -1:
line='pset=../../cfgFiles/'+f+"\n"
if line.find('publish_data_name=') > -1:
line='publish_data_name='+name+'GEN_SIM_RAWDEBUG\n'
tmp.write(line)
os.system('mv tmp crab/crabjobs/'+name+'/crab.cfg')
| [
""
] | |
5e8deb881009950cdc20313fbf6fd802f964a34f | 8f35599e264e57e87916381cd03eb9f17e82be74 | /stepik_tasks/python_programming/unit3.7/task3-7-3.py | ba9ef3c13e56d46b763daca27e72ff8180e20d95 | [] | no_license | DorogAD/stepik | 6c1b91d68dc09ce2255501724b0b2ee22624467c | 80203de57fa9f226cb23d35c7f08cc43eb91cf56 | refs/heads/master | 2023-02-22T19:53:45.145799 | 2021-01-24T10:16:48 | 2021-01-24T10:16:48 | 310,315,653 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,637 | py | """
https://stepik.org/lesson/3380/step/3?unit=963
Простейшая система проверки орфографии может быть основана на использовании списка известных слов.
Если введённое слово не найдено в этом списке, оно помечается как "ошибка".
Попробуем написать подобную систему.
На вход программе первой строкой передаётся количество d известных нам слов, после чего на d строках
указываются эти слова. Затем передаётся количество l строк текста для проверки, после чего l строк текста.
Выведите уникальные "ошибки" в произвольном порядке. Работу производите без учёта регистра.
Sample Input:
4
champions
we
are
Stepik
3
We are the champignons
We Are The Champions
Stepic
Sample Output:
stepic
champignons
the
"""
# count_words = int(input())
# test_words = [input().lower() for i in range(count_words)]
# count_strings = int(input())
# check_strings = [input() for i in range(count_strings)]
test_words = ['champions', 'we', 'are', 'stepik']
check_strings = ['We are the champignons', 'We Are The Champions', 'Stepic']
errors = []
for i in check_strings:
for j in i.split():
if j.lower() not in test_words:
errors.append(j.lower())
errors = list(set(errors))
for i in errors:
print(i)
| [
"[email protected]"
] | |
68f03066fd5fa39a29476db1b2d3a557829e1395 | 4a85a4048212a796c4b4893d97ce189f95d984b4 | /document/views.py | 650f0f82f106f7b012764280c4d3be52cd279459 | [
"MIT"
] | permissive | harshit212705/MyPen-Backend | 6169254b30195feaeea5bb73973e2b47f5c80013 | 3d1b496c7c64c455a167e59db820556893ff3ece | refs/heads/master | 2023-02-22T07:16:12.150932 | 2021-01-28T03:51:53 | 2021-01-28T03:51:53 | 286,424,170 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,064 | py | from rest_framework import viewsets
from rest_framework import permissions
from django.http import HttpResponse, JsonResponse,FileResponse
from django.views.decorators.csrf import csrf_exempt
from django.core.files.storage import default_storage
from shutil import copyfile, rmtree
from fpdf import FPDF
from PyPDF2 import PdfFileWriter, PdfFileReader
import uuid
import os
import base64
from django.conf import settings
BASE_PATH_DOCUMENT_FILES_FOLDER = os.path.join(settings.BASE_DIR, 'document_files')
@csrf_exempt
def generate_document(request):
if request.method == 'POST':
# print(request.POST)
document_name = request.POST['document_name'][1:-1]
font_file_content64 = request.POST['font_file_content64'][1:-1]
font_file_content64 = font_file_content64.replace("\\n", "")
font_size = request.POST['font_size'][1:-1]
font_ink_color = request.POST['font_ink_color'][1:-1]
paper_margin = request.POST['paper_margin'][1:-1]
paper_lines = request.POST['paper_lines'][1:-1]
text_file_content64 = request.POST['text_file_content64'][1:-1]
text_file_content64 = text_file_content64.replace("\\n", "\n")
unique_foldername = str(uuid.uuid4())
# print(unique_foldername)
folder_path = os.path.join(BASE_PATH_DOCUMENT_FILES_FOLDER, unique_foldername)
try:
os.mkdir(folder_path)
except OSError as error:
print(error)
with default_storage.open('document_files/' + unique_foldername + '/' + unique_foldername + '.ttf', "wb") as fh:
fh.write(base64.b64decode(font_file_content64))
with default_storage.open('document_files/' + unique_foldername + '/' + unique_foldername + '.txt', "w+") as fh:
# fh.write(base64.b64decode(text_file_content64))
fh.write(text_file_content64)
pdf = PDF('P', 'mm', 'A4')
pdf.add_font('MyCustomFont', '', BASE_PATH_DOCUMENT_FILES_FOLDER + '/' + unique_foldername + '/' + unique_foldername + '.ttf', uni=True)
pdf.set_font('MyCustomFont', '', int(font_size))
if font_ink_color == 'Blue':
pdf.set_text_color(0, 15, 85) # blue
elif font_ink_color == 'Black':
pdf.set_text_color(51, 51, 51) # black
elif font_ink_color == 'Red':
pdf.set_text_color(247, 2, 15) # red
if paper_margin == 'true':
pdf.set_left_margin(29.0)
pdf.set_top_margin(29.0)
pdf.print_content_to_pdf(BASE_PATH_DOCUMENT_FILES_FOLDER + '/' + unique_foldername + '/' + unique_foldername + '.txt')
pdf.output(BASE_PATH_DOCUMENT_FILES_FOLDER + '/' + unique_foldername + '/' + unique_foldername + '.pdf', 'F')
watermark_filename = ''
if paper_margin == 'true' and paper_lines == 'true':
watermark_filename = 'watermark_paper_margin_lines.pdf'
elif paper_margin == 'true':
watermark_filename = 'watermark_paper_margin.pdf'
elif paper_lines == 'true':
watermark_filename = 'watermark_paper_lines.pdf'
if watermark_filename != '':
create_watermark(input_pdf=BASE_PATH_DOCUMENT_FILES_FOLDER + '/' + unique_foldername + '/' + unique_foldername + '.pdf', output=BASE_PATH_DOCUMENT_FILES_FOLDER + '/' + unique_foldername + '/' + unique_foldername + '.pdf', watermark=BASE_PATH_DOCUMENT_FILES_FOLDER + '/' + watermark_filename)
with open(BASE_PATH_DOCUMENT_FILES_FOLDER + '/' + unique_foldername + '/' + unique_foldername + '.pdf', 'rb') as binary_file:
binary_file_data = binary_file.read()
base64_encoded_data = base64.b64encode(binary_file_data)
base64_message = base64_encoded_data.decode('utf-8')
# rmtree(BASE_PATH_DOCUMENT_FILES_FOLDER + '/' + unique_foldername + '/')
return JsonResponse({
"document_name" : document_name + ".pdf",
"content64" : base64_message
})
# my_data = {'received': 'yes'}
# response = HttpResponse(my_data, content_type='application/json')
# return JsonResponse(my_data)
else:
return HttpResponse("Invalid Request")
class PDF(FPDF):
'''def __init__(self, paper_margin, paper_lines, orientation = 'P', unit = 'mm', format = 'A4'):
FPDF.__init__(self, orientation, unit, format)
self.paper_margin = paper_margin
self.paper_lines = paper_lines
def footer(self):
if self.paper_margin == 'true':
pdf.set_draw_color(255, 192, 203)
pdf.set_line_width(0.4)
pdf.line(0, 25, 210, 25)
pdf.line(25, 0, 25, 297)
if self.paper_lines == 'true':
if self.paper_margin == 'true':
self.add_lines_to_page(33)
else:
self.add_lines_to_page(0)
def add_lines_to_page(self, y):
pdf.set_draw_color(174, 181, 176)
while y < 297:
pdf.set_line_width(0.2)
pdf.line(0, y, 210, y)
y += 8
'''
def print_content_to_pdf(self, name):
self.add_page()
# Read text file
with open(name, 'rb') as fh:
txt = fh.read().decode('latin-1')
# Output justified text
self.multi_cell(0, 8, txt)
# Line break
self.ln()
def create_watermark(input_pdf, output, watermark):
watermark_obj = PdfFileReader(watermark)
watermark_page = watermark_obj.getPage(0)
pdf_reader = PdfFileReader(input_pdf)
pdf_writer = PdfFileWriter()
# Watermark all the pages
for page in range(pdf_reader.getNumPages()):
page = pdf_reader.getPage(page)
page.mergePage(watermark_page)
pdf_writer.addPage(page)
with open(output, 'wb') as out:
pdf_writer.write(out)
| [
"[email protected]"
] | |
bb1df2a73f76642867e6419de64f363b50a7fdf1 | b2b8da5f84433be8aa0358b50f8f92c1a33c1ad4 | /frontiers.py | bd29d3f1d2bd3451544190180ab296e702983cf0 | [] | no_license | HUIYINGLEE/Artificial-Intelligence-Search | 754655ea1fa48f251218327ba8399ca25a28d2f4 | 31d39055b2f57e16cb9bbc274ebf73083b2c903e | refs/heads/master | 2021-01-23T04:23:48.799642 | 2017-09-05T07:30:39 | 2017-09-05T07:30:39 | 102,445,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,675 | py |
""" This file contains data structures useful for representing search frontiers
for your depth-first, breadth-first, and a-star search algorithms (Q1-3).
You do not have to use these, but it is strongly recommended.
"""
import heapq, collections
class Stack(object):
""" A container with a last-in-first-out (LIFO) queuing policy."""
def __init__(self):
""" Make a new empty Stack.
(Stack) -> None
"""
self.contents = []
def push(self, item):
""" Push item onto the stack.
(Stack, object) -> None
"""
self.contents.append(item)
def pop(self):
""" Pop and return the most recently pushed item from the stack.
(Stack) -> object
"""
return self.contents.pop()
def peek(self):
""" Return the most recently pushed item from the stack.
(Stack) -> object
"""
return self.contents[-1]
def is_empty(self):
""" Returns True if the stack is empty and False otherwise.
(Stack) -> bool
"""
return not self.contents
def find(self, f):
""" Returns some item n from the queue such that f(n) == True and None
if there is no such item.
(Stack, (object) -> object/None) -> object
"""
for elem in self.contents:
if f(elem):
return elem
return None
def __str__(self):
""" Return a string representation of the Stack.
(Stack) -> str
"""
return str(self.contents)
class Queue(object):
""" A container with a first-in-first-out (FIFO) queuing policy.
Its contents are stored in a collections.deque. This allows constant
time insertion and removal of elements at both ends -- whereas a list
is constant time to add or remove elements at the end, but linear
time at the head.
"""
def __init__(self):
""" Make a new empty Queue.
(Queue) -> None
"""
self.contents = collections.deque()
def push(self, item):
""" Enqueue the item into the queue
(Queue, object) -> None
"""
self.contents.append(item)
def pop(self):
""" Dequeue and return the earliest enqueued item still in the queue.
(Queue) -> object
"""
return self.contents.popleft()
def peek(self):
""" Return the earliest enqueued item still in the queue.
(Queue) -> object
"""
return self.contents[0]
def is_empty(self):
""" Returns True if the queue is empty and False otherwise.
(Queue) -> bool
"""
return not self.contents
def find(self, f):
""" Returns some item n from the queue such that f(n) == True and None
if there is no such item.
(Queue, (object) -> object/None) -> object
"""
for elem in self.contents:
if f(elem):
return elem
return None
def __str__(self):
""" Return a string representation of the queue.
(Queue) -> str
"""
return str(list(self.contents))
class PriorityQueue(object):
""" This class implements a priority queue data structure. Each inserted item
has a priority associated with it and we are usually interested in quick
retrieval of the lowest-priority item in the queue. This data structure
allows O(1) access to the lowest-priority item.
"""
def __init__(self):
""" Make a new empty priority queue.
(PriorityQueue) -> None
"""
self.heap = []
self.count = 0
def push(self, item, priority):
""" Enqueue an item to the priority queue with a given priority.
(PriorityQueue, object, number) -> None
"""
heapq.heappush(self.heap, (priority, self.count, item))
self.count += 1
def pop(self):
""" Dequeue and return the item with the lowest priority, breaking ties
in a FIFO order.
(PriorityQueue) -> object
"""
return heapq.heappop(self.heap)[2]
def peek(self):
""" Return the item with the lowest priority, breaking ties in a FIFO order.
(PriorityQueue) -> object
"""
return self.heap[0][2]
def is_empty(self):
""" Returns True if the queue is empty and False otherwise.
(PriorityQueue) -> bool
"""
return not self.heap
def find(self, f):
""" Returns some item n from the queue such that f(n) == True and None
if there is no such item.
(PriorityQueue, (object) -> object/None) -> object
"""
for elem in self.heap:
if f(elem[2]):
return elem[2]
return None
def change_priority(self, item, priority):
""" Change the priority of the given item to the specified value. If
the item is not in the queue, a ValueError is raised.
(PriorityQueue, object, int) -> None
"""
for eid, elem in enumerate(self.heap):
if elem[2] == item:
self.heap[eid] = (priority, self.count, item)
self.count += 1
heapq.heapify(self.heap)
return
raise ValueError("Error: " + str(item) + " is not in the PriorityQueue.")
def __str__(self):
""" Return a string representation of the queue. This will not be in
order.
(PriorityQueue) -> str
"""
return str([x[2] for x in self.heap])
class PriorityQueueWithFunction(PriorityQueue):
""" Implements a priority queue with the same push/pop signature of the
Queue and the Stack classes. This is designed for drop-in replacement for
those two classes. The caller has to provide a priority function, which
extracts each item's priority.
"""
def __init__(self, priority_function):
""" Make a new priority queue with the given priority function.
(PriorityQueueWithFunction, (object) -> number) -> None
"""
super(PriorityQueueWithFunction, self).__init__()
self.priority_function = priority_function
def push(self, item):
"""" Adds an item to the queue with priority from the priority function.
(PriorityQueueWithFunction, object) -> None
"""
heapq.heappush(self.heap, (self.priority_function(item), self.count, item))
self.count += 1
| [
"[email protected]"
] | |
22d5ffcad123fb4eadc95116c69093d02e661bc4 | cc1ea36872e95190582813dc8235c8d4afbf4d78 | /python/ray/experimental/data/tests/test_dataset.py | 10680d1de1c7895d8d9b5e1321216782aba92526 | [
"MIT",
"Apache-2.0"
] | permissive | tmct/ray | dfde69624742b6c24b27f0b5c6be5fd77d07c135 | 53206dd4401665ec599118241c236ac9e6f4852a | refs/heads/master | 2023-05-30T18:25:23.838922 | 2021-06-30T10:32:11 | 2021-06-30T10:32:11 | 377,907,576 | 0 | 0 | Apache-2.0 | 2021-06-17T17:14:18 | 2021-06-17T17:14:18 | null | UTF-8 | Python | false | false | 3,618 | py | import os
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import ray
from ray.tests.conftest import * # noqa
def test_basic(ray_start_regular_shared):
ds = ray.experimental.data.range(5)
assert sorted(ds.map(lambda x: x + 1).take()) == [1, 2, 3, 4, 5]
assert ds.count() == 5
assert sorted(ds.to_local_iterator()) == [0, 1, 2, 3, 4]
def test_convert_types(ray_start_regular_shared):
plain_ds = ray.experimental.data.range(1)
arrow_ds = plain_ds.map(lambda x: {"a": x})
assert arrow_ds.take() == [{"a": 0}]
assert "ArrowRow" in arrow_ds.map(lambda x: str(x)).take()[0]
arrow_ds = ray.experimental.data.range_arrow(1)
assert arrow_ds.map(lambda x: "plain_{}".format(x["value"])).take() \
== ["plain_0"]
assert arrow_ds.map(lambda x: {"a": (x["value"],)}).take() == \
[{"a": (0,)}]
def test_from_items(ray_start_regular_shared):
ds = ray.experimental.data.from_items(["hello", "world"])
assert ds.take() == ["hello", "world"]
def test_repartition(ray_start_regular_shared):
ds = ray.experimental.data.range(20, parallelism=10)
assert ds.num_blocks() == 10
assert ds.sum() == 190
assert ds._block_sizes() == [2] * 10
ds2 = ds.repartition(5)
assert ds2.num_blocks() == 5
assert ds2.sum() == 190
# TODO: would be nice to re-distribute these more evenly
ds2._block_sizes() == [10, 10, 0, 0, 0]
ds3 = ds2.repartition(20)
assert ds3.num_blocks() == 20
assert ds3.sum() == 190
ds2._block_sizes() == [2] * 10 + [0] * 10
large = ray.experimental.data.range(10000, parallelism=10)
large = large.repartition(20)
assert large._block_sizes() == [500] * 20
def test_repartition_arrow(ray_start_regular_shared):
ds = ray.experimental.data.range_arrow(20, parallelism=10)
assert ds.num_blocks() == 10
assert ds.count() == 20
assert ds._block_sizes() == [2] * 10
ds2 = ds.repartition(5)
assert ds2.num_blocks() == 5
assert ds2.count() == 20
ds2._block_sizes() == [10, 10, 0, 0, 0]
ds3 = ds2.repartition(20)
assert ds3.num_blocks() == 20
assert ds3.count() == 20
ds2._block_sizes() == [2] * 10 + [0] * 10
large = ray.experimental.data.range_arrow(10000, parallelism=10)
large = large.repartition(20)
assert large._block_sizes() == [500] * 20
def test_parquet(ray_start_regular_shared, tmp_path):
df1 = pd.DataFrame({"one": [1, 2, 3], "two": ["a", "b", "c"]})
table = pa.Table.from_pandas(df1)
pq.write_table(table, os.path.join(tmp_path, "test1.parquet"))
df2 = pd.DataFrame({"one": [4, 5, 6], "two": ["e", "f", "g"]})
table = pa.Table.from_pandas(df2)
pq.write_table(table, os.path.join(tmp_path, "test2.parquet"))
ds = ray.experimental.data.read_parquet(tmp_path)
values = [[s["one"], s["two"]] for s in ds.take()]
assert sorted(values) == [[4, "e"], [4, "e"], [5, "f"], [5, "f"], [6, "g"],
[6, "g"]]
def test_pyarrow(ray_start_regular_shared):
ds = ray.experimental.data.range_arrow(5)
assert ds.map(lambda x: {"b": x["value"] + 2}).take() == \
[{"b": 2}, {"b": 3}, {"b": 4}, {"b": 5}, {"b": 6}]
assert ds.map(lambda x: {"b": x["value"] + 2}) \
.filter(lambda x: x["b"] % 2 == 0).take() == \
[{"b": 2}, {"b": 4}, {"b": 6}]
assert ds.filter(lambda x: x["value"] == 0) \
.flat_map(lambda x: [{"b": x["value"] + 2}, {"b": x["value"] + 20}]) \
.take() == [{"b": 2}, {"b": 20}]
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| [
"[email protected]"
] | |
696e39ff3ce938c57d77bbd1d19720114a429717 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/fractions_20200802120344.py | 295d66f8b4e038a18999b672b169036dff3785bd | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,426 | py | def fractions(numerator,denominator):
if denominator == 0 :
return str(numerator)
number = numerator / denominator
if numerator % denominator == 0:
return str(numerator // denominator)
newStr = str(number)
print(newStr)
largeStr = newStr.split(".")
if len(largeStr[1]) > 1:
return largeStr[0] + "." + '(' + largeStr[1][0] + ')'
return newStr
def frac(numerator,denominator):
res = ""
if numerator == 0:
return "0"
if denominator == 0:
return "undefined"
if (numerator < 0 and denominator > 0) or (numerator > 0 and denominator <0):
res += "-"
numerator = abs(numerator)
denominator = abs(denominator)
if numerator % denominator == 0:
return str(numerator // denominator)
else:
# this means its has a remainder
res += str(numerator // denominator)
res += "."
newDict = {}
rem = numerator % denominator
while rem != 0:
if rem in newDict:
position = res.find() + 2
print(position)
res = res[:position] + "(" + res[position:] + ')'
break
newDict[rem] = len(res)
rem *=10
res_part = rem // denominator
res += str(res_part)
rem = rem % denominator
return res
print(frac(1,6)) | [
"[email protected]"
] | |
d8d1fdfd39c67f49a3452b03040757c0ece60164 | b34017f6b6a6ba2579d31ba147201efd49a1318a | /View/client_view.py | 3b42d8abd082098d5aa3610a15b3ed24de57413a | [] | no_license | RichardLukacs/MyStore | ca207ab670abb88e30c6f76e28fe59258abc53ab | 11b82b3f0afe67f1ae96f7143c4f033662f177fd | refs/heads/main | 2023-06-30T16:46:08.384988 | 2021-07-30T10:30:09 | 2021-07-30T10:30:09 | 391,020,504 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,364 | py | class ClientConsoleView:
@staticmethod
def display_one_client(client):
"""
Display one client in the console
:param client: Client
"""
if client:
print(client)
else:
print('No such client')
@staticmethod
def display_all_clients(clients):
"""
Display all clients in the console
:param clients: List(Client)
"""
for client in clients:
print(client)
@staticmethod
def get_client_cnp():
"""
Read a client cnp from the console
:return: cnp: str
"""
return input('Please provide a client cnp')
@staticmethod
def get_client_fields():
"""
Read all client fields from the console: cnp, last_name, first_name, age_group, email
:return: cnp:str, last_name:str, first_name:str, email:str
"""
cnp = input('Please provide the client cnp: ') # TODO Validate cnp
last_name = input('Please provide the client last name: ')
first_name = input('Please provide the client first name: ')
email = input('Please provide the client email: ') # TODO Validate email
return cnp, last_name, first_name, email
@staticmethod
def display_delete_message():
"""
Display delete message to console
"""
print('You are about to delete a client')
@staticmethod
def display_update_message():
"""
Display update message to console
"""
print('You are about to update a client, leave the fields you want the same empty')
@staticmethod
def client_menu():
"""
Display client menu to console
:return: Selected option from the menu
"""
print('1 Add')
print('2 Update')
print('3 Remove')
print('4 Display One')
print('5 Display All')
print('x Exit')
option = input('Select Option: ')
return option
@staticmethod
def display_fields_not_empty():
"""
Display message that fields cannot be empty
"""
print('Fields cannot be empty')
@staticmethod
def display_duplicate(field):
print(f'Field {field} is duplicated')
| [
"[email protected]"
] | |
91304baf41353160c2dc62465e514ad735ed0613 | 1001a92bb42809d73b2320b94fc9a6239122afd8 | /social-website/bookmarks/account/migrations/0001_initial.py | 48c07afa3dbe90d9aaceea49c8c53b48129390d9 | [] | no_license | iluxonchik/django-by-example | 566924bb702eadf1c4fd2c760fa170e13048fc02 | b752b20dd5d01fc443f2d0b6d00dfaf804a4a453 | refs/heads/master | 2021-01-17T17:37:50.725295 | 2016-07-22T16:38:09 | 2016-07-22T16:38:09 | 60,127,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-28 22:49
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_of_birth', models.DateField(blank=True, null=True)),
('photo', models.ImageField(upload_to='users/%Y/%m/%d')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
6a1b21ebf2ba5d540449901d367ac42258853d0f | be6daee5e78faf9d501b4c4d9eee696eb20dc787 | /SignalWire/outboundsms.py | ca406390346ff0be24dcdab66c082ed16d90b232 | [] | no_license | goodmorningsms/website | 58bb39d3e78748e23e7ea2a58ea59da610b73a92 | 43ca53b3e0add9f3b345d9501f91e4a4396b6766 | refs/heads/master | 2020-05-16T07:29:10.922346 | 2019-04-25T21:30:54 | 2019-04-25T21:30:54 | 182,879,424 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py | print("Hello world")
__pacage__ = None
from signalwire.rest import Client as signalwire_client
import private_key as key
txt_service = signalwire_client(key.project_key, key.api_key,
signalwire_space_url = key.signalwire_url)
"""
import gspread
from oauth2client.service_account import ServiceAccountCredentials
# use creds to create a client to interact with the Google Drive API
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
creds = ServiceAccountCredentials.from_json_keyfile_name('GM_Test.json', scope)
client = gspread.authorize(creds)
print(scope)
# Find a workbook by name and open the first sheet
# Make sure you use the right name here.
sheet = client.open("GM Test").sheet1
# Extract and print all of the values
list_of_hashes = sheet.get_all_records()
# rint(list_of_hashes)
numberlist = []
# Parse Phone Numbers
for i in list_of_hashes:
numberlist.append(i['Phone Number'])
"""
numberlist = ['9018286567']#, #'9012860576','4043172600']
api ='23LPQNOOOJSFP5T2'
from alpha_vantage.timeseries import TimeSeries
ts = TimeSeries(key=api)
data = ts.get_batch_stock_quotes(symbols=['TSLA'])
print(data[0][0]['2. price'])
for i in numberlist:
i = '+1' + i
print(i)
message = txt_service.messages.create(
from_='+19014259501',
body='Hey Mr. Doug ( ͡° ͜ʖ ͡°)The current TSLA price is: ' + data[0][0]['2. price'] ,
to=i
)
print(message.status) | [
"[email protected]"
] | |
a0c798b8f6efc1d95512a72dedd752659655ec9d | cf7c33cd8def8d9e55bcd23c76f174b2d22312fa | /Python/test_b.py | b4de07ffcc9b78d7993b8e80231c23b76ed39de2 | [] | no_license | tinchodipalma/winclap-fullstack | f4ffee440844827caac9391c1c0c131f3079efea | cb82a525108cabe0d73620b4f842b8c966fb664e | refs/heads/master | 2021-04-30T16:43:23.910727 | 2017-01-26T13:12:42 | 2017-01-26T13:12:42 | 80,114,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | import re
def annograms(word):
words = [w.rstrip() for w in open('WORD.LST')]
# Get same length words
words = list(filter(lambda x: len(x) == len(word), words))
# Check for each letter in word with regex
regex_pattern_list = ['(?!.*{}.*{})'.format(s, s) for s in list(word)]
regex_pattern_str = ''.join(regex_pattern_list)
regex_pattern = '^{}[{}]*$'.format(regex_pattern_str, word)
pattern = re.compile(r'{}'.format(regex_pattern))
# Get matches for the regex
annograms = [i for i in words if pattern.match(i)]
# Return list of results
return annograms
if __name__ == "__main__":
print(annograms("train"))
print('--')
print(annograms('drive'))
print('--')
print(annograms('python'))
| [
"[email protected]"
] | |
1ced2d5c55106a19d0fa414dd17ea6b1564230fd | 9d35caf28fa2e4cb0ec56491fed240c6a0da2f02 | /Acunetix12-Scan-xray.py | b94dda80043a31635ce64489e2f1b2d61c5dfe18 | [] | no_license | Jadore147258369/Acunetix12-Scan-xray | b49eb9838756a6ccdeaa51c62aa33ce37e8cc036 | 5be491d1c408374a7d5a0637eaa9cc01f323b0fc | refs/heads/master | 2022-12-08T12:02:37.702308 | 2020-09-03T07:51:11 | 2020-09-03T07:51:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,750 | py | import requests
import json
import tableprint
import os
import sys
import openpyxl as ws
from config import *
requests.packages.urllib3.disable_warnings()
def get_scans():
creat_xlsx()
c = 0
while True:
result = requests.get(define.host + "api/v1/scans?c=" + str(c), headers=define.api_header, timeout=30,
verify=False)
results = json.loads(result.content)
c = c + 100
if results['scans'] == []:
return print(define.RED + "[*]任务执行完毕 文件保存为:%s" % define.filename)
for s in results["scans"]:
get_vulnerabilities(s['scan_id'], s['current_session']['scan_session_id'], s['target']['address'])
def get_vulnerabilities(scan_id, scan_session_id, host):
c = 0
while True:
result = requests.get(
url=define.host + "api/v1/scans/" + scan_id + "/results/" + scan_session_id + "/vulnerabilities?c=%s" % str(
c), headers=define.api_header, timeout=30, verify=False)
results = json.loads(result.text)
c = c + 100
if results['vulnerabilities'] == []:
return print(define.BLUE + "[-]当前扫描抓取结束 scan_id:%s" % scan_id)
for s in results['vulnerabilities']:
get_details(scan_id, scan_session_id, host, s['vuln_id'])
def get_details(scan_id, scan_session_id, host, vuln_id):
vulnerabilities = {}
result = requests.get(
url=define.host + "api/v1/scans/" + scan_id + "/results/" + scan_session_id + "/vulnerabilities/" + vuln_id,
headers=define.api_header, timeout=30, verify=False)
results = json.loads(result.text)
vulnerabilities['details'] = results['details']
vulnerabilities['request'] = results['request']
vulnerabilities['affects_url'] = results['affects_url']
vulnerabilities['description'] = results['description']
vulnerabilities['vt_name'] = results['vt_name']
vulnerabilities['recommendation'] = results['recommendation']
vulnerabilities['severity'] = results['severity']
vulnerabilities['host'] = host
vulnerabilities['affects_detail'] = results['affects_detail']
write_xlsx(vulnerabilities)
def write_xlsx(vulnerabilities):
print(define.GREEN + "[*]内容正在写入 vu_name:%s" % vulnerabilities['vt_name'])
wb = ws.load_workbook(define.filename)
sheet1 = wb['Sheet']
num = sheet1.max_row
sheet1.cell(row=num + 1, column=1, value=vulnerabilities['host'])
sheet1.cell(row=num + 1, column=2, value=vulnerabilities['vt_name'])
sheet1.cell(row=num + 1, column=3, value=vulnerabilities['severity'])
sheet1.cell(row=num + 1, column=4, value=vulnerabilities['affects_detail'])
sheet1.cell(row=num + 1, column=5, value=vulnerabilities['affects_url'])
sheet1.cell(row=num + 1, column=6, value=vulnerabilities['request'])
sheet1.cell(row=num + 1, column=7, value=vulnerabilities['recommendation'])
sheet1.cell(row=num + 1, column=8, value=vulnerabilities['description'])
sheet1.cell(row=num + 1, column=9, value=vulnerabilities['details'])
wb.save(define.filename)
def creat_xlsx():
if os.path.exists(define.filename) == False:
s = 0
wb = ws.Workbook()
ws1 = wb.active
if os.path.exists('out/') == False:
os.mkdir('out')
word = ['风险目标', '风险名称', '风险等级(3-0由高危到infomation)', '风险参数', '风险地址', '风险请求', '整改意见', '风险描述', '风险详情']
for i in word:
s = s + 1
ws1.cell(row=1, column=s, value=i)
wb.save(define.filename)
print(define.RED + "[*]创建文件成功 %s" % define.filename)
else:
print(define.RED + "[*]文件已存在 文件为:%s" % define.filename)
x = []
def task(files):
s = open('%s' % files, 'r')
for i in s.readlines():
i = i.strip()
x.append(i)
s.close
#设置代理的模块
def set_proxy(target_url,locationone):
url = define.host + '/api/v1/targets/'+locationone+'/configuration'
datajson = {
"enabled": "true",
"address": define.xray_address,
"protocol": "http",
"port": define.xray_port
}
datajsontwo = {
"proxy": datajson
}
try:
res = requests.patch(url, headers=define.api_header, verify=False,data=json.dumps(datajsontwo))
if(res.status_code == 204):
print("[+] " + target_url + " 代理设置成功")
except:
print("[-] " + target_url + " 代理设置失败")
pass
def add_crawlonly(url):#仅爬取链接时,使用此模块对接xray
# 添加任务
data = {"address": url, "description": url, "criticality": "10"}
try:
response = requests.post(define.host + "api/v1/targets", data=json.dumps(data), headers=define.api_header,
timeout=30, verify=False)
result = json.loads(response.content)
try:
respa = response.headers['Location']
except:
pass
if "/api/v1/targets/" in respa:
respa = respa.replace('/api/v1/targets/', '')
set_proxy(url, respa)
else:
pass
return result['target_id']
except Exception as e:
print(str(e))
return
def add(url):#正常扫描时批量添加任务使用此模块
#添加任务
data = {"address":url,"description":url,"criticality":"10"}
try:
response = requests.post(define.host+"api/v1/targets",data=json.dumps(data),headers=define.api_header,timeout=30,verify=False)
result = json.loads(response.content)
return result['target_id']
except Exception as e:
print(str(e))
return
def single_scan(url,scan):
if scan == define.awvs_scan_rule['crawlonly']:
target_id = add_crawlonly(url)
else:
target_id = add(url)
data = {'target_id': target_id, 'profile_id': scan,
'schedule': {'disable': False, 'start_date': None, 'time_sensitive': False}}
try:
r = requests.post(url=define.host + 'api/v1/scans', timeout=10, verify=False, headers=define.api_header,
data=json.dumps(data))
if r.status_code == 201:
print(define.BLUE + '[-] OK, 扫描任务已经启动 当前扫描:%s...' % url)
except Exception as e:
print(e)
def delete_all():
c = 0
print(define.RED + "[*]开始清除任务")
while True:
result = requests.get(define.host + "api/v1/targets?c=" + str(c), headers=define.api_header, timeout=30,
verify=False)
results = json.loads(result.content)
c = c + 100
if results['targets'] == []:
return print(define.RED + "[*]任务全部清除完毕")
for s in results["targets"]:
r = requests.delete(url=define.host + 'api/v1/targets/' + s['target_id'], timeout=10, verify=False,
headers=define.api_header)
print(define.BLUE + "[-]当前删除 target_id:%s" % s['target_id'])
if __name__ == '__main__':
print(define.ORANGE + define.banner)
if len(sys.argv) < 2:
print(define.ORANGE + define.usage)
elif sys.argv[1] == '-f':
scan_list = [define.awvs_scan_rule['full'],define.awvs_scan_rule['highrisk'],define.awvs_scan_rule['XSS'],define.awvs_scan_rule['SQL'],
define.awvs_scan_rule['Weakpass'],define.awvs_scan_rule['crawlonly']]
scan = input('请选择运行模式,1:full,2:highrisk,3:XSS,4:SQL,5:Weakpass,6:crawlonly\n')
if scan.isdigit() == True and int(scan) <= 6 and int(scan) != 0:
scan = scan_list[int(scan) - 1]
print(scan)
try:
task(str(sys.argv[2]))
print(define.RED + "[*]扫描开始添加")
for s in x:
print(s)
single_scan(s,scan)
print(define.RED + "[*]扫描添加完毕")
except:
print(define.BLUE + ' [*]Usage example: Python3 Acunetix12-Scan-Agent.py -f url.txt')
else:
print('只能输入1到6的整数,请重新输入')
elif sys.argv[1] == '-d':
delete_all()
elif sys.argv[1] == '-o':
get_scans()
else:
print(define.ORANGE + define.usage)
| [
"[email protected]"
] | |
b1d23fd7f98b9381ceec7d7651a3b6654f0f24dd | aa7d92a5ac6be781b77882dc83ec0b6bb2b7f34c | /vocab/student.py | d29ebdf6d1185936e8da385d374b49d00a43effe | [] | no_license | timthetinythug/lexicon | bc2ccc3e166b98cae72492508d890ca2829e2b2a | 67aded618fc9c41af6656dbdedf6f3d50d773dcd | refs/heads/master | 2020-03-22T10:28:15.057275 | 2018-07-05T22:12:28 | 2018-07-05T22:12:28 | 139,904,810 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | from format_txt import format
from parsed import parser
from word_dict import vocab
# Student will create an object for student with student name &
# and parse their vocabs for further implementation
# type(name) == string
# type(vocab_sheet) == string (file.txt)
# have the following file format:
# Date
# word, 0 or 1
# word, 0 or 1
# .
# .
# .
# ***
# .
# .
# .
# ***
# \n
class Student:
def __init__(self, name, vocab_sheet):
self.name = name
self.vocab = {}
self.excel = []
with open(vocab_sheet) as f:
data = f.readlines()
formatted = format(data)
self.excel = parser(formatted, '***')
self.vocab = vocab(self.excel)
def __eq__(self, other):
if (self.name == other.name) and \
(self.excel == other.excel):
return True
else:
return False
def __repr__(self):
temp = "Student: "
temp += self.name + '\n' + "Vocab: "
for i in self.vocab:
temp += i + '\n' + " "*len("vocab: ")
return str(temp)
| [
"[email protected]"
] | |
9fd1055937018450212f454ac0800c8b37638cb4 | 12541a13c92f50b5369ee7f99ed0c1e97a45f035 | /2/2-10.py | ea0ed8aceae926373a46bd329d8cc417f82a6ddc | [
"BSD-3-Clause"
] | permissive | liuhanyu200/pygame | 6364d90914c244fa53f56a5616a9bd95b86f440c | 38a68e779e6b0a63edb1758fca98ebbf40bb0444 | refs/heads/master | 2023-06-07T04:32:57.946745 | 2021-07-01T10:17:41 | 2021-07-01T10:17:41 | 375,933,464 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | #-*- coding:utf-8 -*-
# 2.5.2 注释
# 作者 hsyagk
# 邮箱 [email protected]
# 功能: python 注释学习 | [
"[email protected]"
] | |
820930a5e53816c16ff55040a2c335e0fd53fe5c | 938b4df6428da214d25cb268b0d4099d56f1c8cf | /CodingBat/warmUp2.py | 5bdf0c535060622959a7b13096b0bac4ff506706 | [] | no_license | AL-knowledgeSeeker/Python_Prac | 5f2500b80c50bbeca5a1ff879a91f7582d1e2d80 | 878f07921cd30914e80675e8955f99b946d98561 | refs/heads/master | 2020-11-26T01:58:07.382605 | 2020-01-06T19:37:11 | 2020-01-06T19:37:11 | 228,930,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,003 | py | class warmUp2:
def string_times(self,str, n):
return str*n
def front_times(str, n):
if len(str) < 3:
return str * 4
else:
return str[:3] * n
def string_bits(str):
temp = ''
for i in range(len(str)):
if i % 2 == 0:
temp = temp + str[i]
return temp
def string_splosion(self,str):
result=''
for i in range(len(str)):
result=result+str[:i+1]
print (result)
return result
def array_count9(self,nums):
cnt = 0
for i in range(len(nums)):
if nums[i]==9:
cnt = cnt + 1
return cnt
def array_front9(nums):
for i in range(len(nums)):
if i < 4 and nums[i] == 9:
return True
return False
up2=warmUp2()
#print(up2.string_times("test",3))
#print(up2.front_times("test",2))
print (up2.string_splosion("code"))
print (up2.array_count9([1,9,9])) | [
"[email protected]"
] | |
b7c67d3c5f23c1d92ab7030f1b19e6adbbfc0969 | 7c0ac3eb9c84662a748579190a5f563b1980c668 | /python_wagtail/mysite/mysite/wsgi.py | 0a9eb600d85c3554ba7e021fba2991251d5274ee | [] | no_license | ngduytra/python-utilities | 97cf49d4e5c7f68111c4aef89c1b6fdeda2e8fbc | 9db51b19735e632f8dccd2dc1e09f77744d13547 | refs/heads/main | 2023-09-03T02:18:33.290112 | 2021-11-10T03:25:45 | 2021-11-10T03:25:45 | 426,470,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | """
WSGI config for mysite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings.dev")
application = get_wsgi_application()
| [
"[email protected]"
] | |
0db523f629f5e49a1e06ffc80d9771b1054577b5 | 76839071c203b7dc09f8da9afe776ba5e2de8159 | /testing/prepare_data.py | 1f888d4c6b226329b0487d49a56cb2dd256e6811 | [] | no_license | TwineTickler/tradingproject | 6200ac656bc03195dc2833c05007c501a1081d3b | c9f16c48f49d74c64d1d1951c7aa45a961167fa7 | refs/heads/main | 2023-02-21T14:25:40.960274 | 2021-01-26T17:00:19 | 2021-01-26T17:00:19 | 316,829,982 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,981 | py | import numpy
from sklearn.preprocessing import MinMaxScaler
def prepare_data(df, look_back=1):
# returns 4 arrays
# trainX, trainY, testX, and testY
numpy.random.seed(7) # set random seed
dataset = df.values # move the data into a numpy array
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset) # normalize the dataset
# spit the data in to train and test data sets.
# This will use the Index to keep everything in time properly
train_size = int(len(dataset) * 0.67)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
print(len(train), len(test))
#reshape into X=t and Y=t+1
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# The LSTM expects the input data to be a specific array structure in the form of:
# [samples, time steps, features]
# currently we have: [samples, features]
# I think what we have to do is just add a increment value for the "time steps" part of the array
#reshape input to be [samples, time steps, features]
trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
return trainX, trainY, testX, testY
# convert an array of values into a dataset matrix
# This should give us something like:
# look_back will be the number of time events to use to predict the next time event (default 1)
# x will be the price at a given event (t) and Y will be the price at the next event (t + 1)
# (later we might want to change this to say.. 60? I don't know)
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY) | [
"[email protected]"
] | |
3cf4abfdaf5094a306978d20df90e429f4110f67 | 927b8ad06ac318c9fbff96587bc2ae3325c1b71a | /webapp/app.py | 187dd78013addda8b124b7aa61360b89664287e3 | [] | no_license | anthonydillon/random.webteam.space | 095b494f95c531d1fa87205b5a33f9f434ab01d6 | 1a38d5aabfe78a1430743a230cd989c9196aa587 | refs/heads/master | 2020-12-30T09:02:51.638677 | 2019-11-22T16:23:10 | 2019-11-22T16:23:10 | 238,942,061 | 0 | 0 | null | 2020-02-07T14:21:22 | 2020-02-07T14:21:21 | null | UTF-8 | Python | false | false | 1,175 | py | import flask
from werkzeug.contrib.fixers import ProxyFix
from werkzeug.debug import DebuggedApplication
from webapp.handlers import add_headers, clear_trailing_slash
from webapp.webteam.views import webteam
def create_app(testing=False):
app = flask.Flask(
__name__, template_folder="../templates", static_folder="../static"
)
app.testing = testing
app.wsgi_app = ProxyFix(app.wsgi_app)
app.url_map.strict_slashes = False
if app.debug:
app.wsgi_app = DebuggedApplication(app.wsgi_app)
app.before_request(clear_trailing_slash)
app.after_request(add_headers)
init_handler(app)
init_blueprint(app)
return app
def init_handler(app):
@app.errorhandler(404)
def page_not_found(error):
"""
For 404 pages, display the 404.html template,
passing through the error description.
"""
return flask.render_template("404.html", error=error.description), 404
@app.route("/_status/check")
def health_check():
""" Health check end point used by Talisker.
"""
return ("", 200)
def init_blueprint(app):
app.register_blueprint(webteam)
| [
"[email protected]"
] | |
7a55f0da86c53f5851d0f3bf98eac22dc24a968b | 1373deef4c5c20c3f1f43f3ad8d3f98cfb602a49 | /novice/01-04/created/task/views.py | e14bb109481094396928a4d83232ed348046f31c | [] | no_license | giko99/praxis-academy | 53e0dc5497a42e8317a72b5879a68ecfc09e0b7d | 7b48da3930b7701a827a5d5ad16e629aa21267f3 | refs/heads/master | 2023-04-18T08:10:57.193051 | 2021-04-27T19:52:50 | 2021-04-27T19:52:50 | 287,189,913 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | from django.shortcuts import render
from . import models
# Create your views here.
def index(req):
if req.POST:
models.Task.objects.create(name=req.POST['name'], status=req.POST['status'])
tasks = models.Task.objects.all()
return render(req, 'task/index.html',{
'data' :tasks,
}) | [
"[email protected]"
] | |
f95a67440ef70c23e6aa2a98be806bd8f7595efe | 3f67b218b7d949894593cb8c2dacb28379cf542d | /SFM/SFM.py | fa9f629e3216b7eb658ff204c8b6b03be50a61a9 | [] | no_license | PonsletV/Structure-from-motion | 768724e58caf5eee82caf71f3d9c869d63d91319 | cdddfc5e250d99fc307cc9097a27997351c0aa13 | refs/heads/master | 2022-04-14T21:59:22.316210 | 2020-03-20T13:48:57 | 2020-03-20T13:48:57 | 245,221,341 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,932 | py | import numpy as np
import cv2
from Descriptors.matching import Match
from SFM.camera import Projection, compose, concat, Calibration
class Sfm(object):
""" Class for representing the correspondence between two images"""
def __init__(self, calibration: Calibration, match: Match, projection_left: Projection):
"""
Initialize the structure needed to reconstruct the 3D points from the scene
:param calibration: calibration object (contains the K matrix)
:param match: Match object with the point detector and images
:param projection_left: projection object, current position of the left camera
"""
self.calibration = calibration
self.match = match
self.P_left = projection_left
self.img_shape = match.img_shape
self.is_fitted = False
""" reconstruction properties """
self.P_transformation = None
self.E = None
self.F = None
self.P_right = None
""" points """
self.x_l = None
self.x_r = None
self.idx_l = None
self.idx_r = None
self.xp_l = None
self.xp_r = None
self.threeDpoints = None
self.mask = None
def fit_descriptor(self):
""" compute the keypoints and descriptors, lowe_factor only used for SIFT"""
self.match.fit()
def fit_reconstruction(self):
"""
Reconstruct the 3D points (after fit_descriptor)
:return: list of 3D points of size (3*n)
"""
if not self.match.is_fitted:
raise ValueError("must call fit_descriptor before")
K = self.calibration.K
P_l = self.P_left
# get the coordinates of the points from the keypoints lists
pts_l = np.array([self.match.kp_l[m.queryIdx].pt for m in self.match.matches])
idx_l = np.array([m.queryIdx for m in self.match.matches])
pts_r = np.array([self.match.kp_r[m.trainIdx].pt for m in self.match.matches])
idx_r = np.array([m.trainIdx for m in self.match.matches])
# estimate E with RANSAC
E, mask = cv2.findEssentialMat(pts_l, pts_r, cameraMatrix=self.calibration.K, method=cv2.FM_RANSAC)
inliers = np.where(mask == 1)[0]
# select the inliers from RANSAC
pts_l = pts_l[inliers, :]
pts_r = pts_r[inliers, :]
idx_l = idx_l[inliers]
idx_r = idx_r[inliers]
self.mask = inliers
# compute the fundamental F
self.F = np.dot(np.linalg.inv(K.T), np.dot(E, np.linalg.inv(K)))
# compute the rotation and translation of the transformation
_, R, t, mask_c = cv2.recoverPose(E, pts_l, pts_r, cameraMatrix=K)
self.P_transformation = concat(R, t)
# compute the new position of the camera
P_r = compose(P_l, self.P_transformation)
# select the points that are in front of the selected camera
infront = np.where(mask_c != 0)[0]
pts_l = pts_l[infront, :]
pts_r = pts_r[infront, :]
idx_l = idx_l[infront]
idx_r = idx_r[infront]
self.mask = self.mask[infront]
# find the position of the 3D points with a triangulation
X = cv2.triangulatePoints(np.dot(K, P_l.P), np.dot(K, P_r.P), pts_l.T, pts_r.T)
X = X / X[3]
threeDpoints = X[:3, :]
# reproject the 3D points through the cameras
xlp, _ = cv2.projectPoints(threeDpoints.T, P_l.rv, P_l.t, K, distCoeffs=self.calibration.dist_coeff)
xrp, _ = cv2.projectPoints(threeDpoints.T, P_r.rv, P_r.t, K, distCoeffs=self.calibration.dist_coeff)
# kept points from the initial keypoints on the two images
self.x_l = pts_l.T
self.x_r = pts_r.T
self.idx_l = idx_l
self.idx_r = idx_r
# reprojected points
self.xp_l = np.vstack((xlp[:, 0, 0], xlp[:, 0, 1]))
self.xp_r = np.vstack((xrp[:, 0, 0], xrp[:, 0, 1]))
# save the results
self.E = E
self.P_right = P_r
self.threeDpoints = threeDpoints
self.is_fitted = True
return threeDpoints
def fit(self):
""" compute the matchings between the two images then the 3D points corresponding to the keypoints"""
self.fit_descriptor()
return self.fit_reconstruction()
def compute_reprojection_error(self, verbose=False):
if not self.is_fitted:
raise ValueError("must be fitted before computation")
error1 = np.linalg.norm(self.x_l - self.xp_l, axis=0).mean()
error2 = np.linalg.norm(self.x_r - self.xp_r, axis=0).mean()
if verbose:
print('Reprojection error on left image : ', error1)
print('Reprojection error on right image : ', error2)
return error1, error2
def num_points(self):
if self.threeDpoints is not None:
return self.threeDpoints.shape[0]
else:
return 0
| [
"[email protected]"
] | |
e6778ae2069b9e2e1466c351be963150e6dbbb48 | 08ad52ed4d6b33d464d245273f060bd0722029ea | /hwrt/handwritten_data.py | aa1ad136ec299d60f1d8b175390c321856fb681d | [
"MIT"
] | permissive | MartinThoma/hwrt | 8468d5fee904f54b1a925693aff76911666308e9 | f271046c64679f38fb98a2af115a92c258c5897a | refs/heads/master | 2023-02-21T16:48:51.348954 | 2022-06-22T05:22:37 | 2022-06-22T05:22:37 | 24,846,573 | 73 | 20 | MIT | 2023-02-16T02:35:36 | 2014-10-06T13:20:25 | Python | UTF-8 | Python | false | false | 16,680 | py | """
Representation of a recording of on-line handwritten data.
On-line means that the pen trajectory is given (and not online as in
'Internet').
"""
# Core Library modules
import json
import logging
from typing import Callable, List
# Third party modules
import numpy
from PIL import Image, ImageDraw
logger = logging.getLogger(__name__)
class HandwrittenData:
"""Represents a handwritten symbol."""
def __init__(
self,
raw_data_json,
formula_id=None,
raw_data_id=None,
formula_in_latex=None,
wild_point_count=0,
missing_stroke=0,
user_id=0,
user_name="",
segmentation=None,
):
self.raw_data_json = raw_data_json
self.formula_id = formula_id
self.raw_data_id = raw_data_id
self.formula_in_latex = formula_in_latex
self.wild_point_count = wild_point_count
self.missing_stroke = missing_stroke
self.user_id = user_id
self.user_name = user_name
self.segmentation = segmentation
assert type(json.loads(self.raw_data_json)) is list, (
"raw_data_json is not JSON list: %r" % self.raw_data_json
)
assert len(self.get_pointlist()) >= 1, (
f"The pointlist of formula_id {self.formula_id} "
f"is {self.get_pointlist()}"
)
if segmentation is None:
# If no segmentation is given, assume all strokes belong to the
# same symbol.
self.segmentation = [list(range(len(json.loads(self.raw_data_json))))]
assert wild_point_count >= 0, f"wild_point_count = {wild_point_count}"
assert missing_stroke >= 0, f"missing_stroke = {missing_stroke}"
self.fix_times()
@classmethod
def generate(cls):
"""Generate a HandwrittenData object for testing and documentation."""
return HandwrittenData(
'[[{"x":678,"y":195,"time":1592756126416},'
'{"x":677,"y":199,"time":1592756126420},'
'{"x":675,"y":203,"time":1592756126427}]]'
)
def fix_times(self):
"""
Some recordings have wrong times. Fix them so that nothing after
loading a handwritten recording breaks.
"""
pointlist = self.get_pointlist()
times = [point["time"] for stroke in pointlist for point in stroke]
times_min = max(min(times), 0) # Make sure this is not None
for i, stroke in enumerate(pointlist):
for j, point in enumerate(stroke):
if point["time"] is None:
pointlist[i][j]["time"] = times_min
else:
times_min = point["time"]
self.raw_data_json = json.dumps(pointlist)
def get_pointlist(self):
"""
Get a list of lists of tuples from JSON raw data string. Those lists
represent strokes with control points.
Returns
-------
list :
A list of strokes. Each stroke is a list of dictionaries
{'x': 123, 'y': 42, 'time': 1337}
"""
try:
pointlist = json.loads(self.raw_data_json)
except Exception:
logger.debug("pointStrokeList: strokelistP")
logger.debug(self.raw_data_json)
logger.debug("didn't work")
raise
if len(pointlist) == 0:
logger.warning(
f"Pointlist was empty. Search for '{self.raw_data_json}' "
f"in `wm_raw_draw_data`."
)
return pointlist
def get_sorted_pointlist(self):
"""
Make sure that the points and strokes are in order.
Returns
-------
list
A list of all strokes in the recording. Each stroke is represented
as a list of dicts {'time': 123, 'x': 45, 'y': 67}
"""
pointlist = self.get_pointlist()
for i in range(len(pointlist)):
pointlist[i] = sorted(pointlist[i], key=lambda p: p["time"])
pointlist = sorted(pointlist, key=lambda stroke: stroke[0]["time"])
return pointlist
def set_pointlist(self, pointlist):
"""Overwrite pointlist.
Parameters
----------
pointlist : a list of strokes; each stroke is a list of points
The inner lists represent strokes. Every stroke consists of points.
Every point is a dictinary with 'x', 'y', 'time'.
"""
assert type(pointlist) is list, "pointlist is not of type list, but %r" % type(
pointlist
)
assert len(pointlist) >= 1, "The pointlist of formula_id %i is %s" % (
self.formula_id,
self.get_pointlist(),
)
self.raw_data_json = json.dumps(pointlist)
def get_bounding_box(self):
""" Get the bounding box of a pointlist. """
pointlist = self.get_pointlist()
# Initialize bounding box parameters to save values
minx, maxx = pointlist[0][0]["x"], pointlist[0][0]["x"]
miny, maxy = pointlist[0][0]["y"], pointlist[0][0]["y"]
mint, maxt = pointlist[0][0]["time"], pointlist[0][0]["time"]
# Adjust parameters
for stroke in pointlist:
for p in stroke:
minx, maxx = min(minx, p["x"]), max(maxx, p["x"])
miny, maxy = min(miny, p["y"]), max(maxy, p["y"])
mint, maxt = min(mint, p["time"]), max(maxt, p["time"])
return {
"minx": minx,
"maxx": maxx,
"miny": miny,
"maxy": maxy,
"mint": mint,
"maxt": maxt,
}
def get_width(self):
"""Get the width of the rectangular, axis-parallel bounding box."""
box = self.get_bounding_box()
return box["maxx"] - box["minx"]
def get_height(self):
"""Get the height of the rectangular, axis-parallel bounding box."""
box = self.get_bounding_box()
return box["maxy"] - box["miny"]
def get_area(self):
"""Get the area in square pixels of the recording."""
return (self.get_height() + 1) * (self.get_width() + 1)
def get_time(self):
"""Get the time in which the recording was created."""
box = self.get_bounding_box()
return box["maxt"] - box["mint"]
def get_bitmap(self, time=None, size=32, store_path=None):
"""
Get a bitmap of the object at a given instance of time. If time is
`None`,`then the bitmap is generated for the last point in time.
Parameters
----------
time : int or None
size : int
Size in pixels. The resulting bitmap will be (size x size).
store_path : None or str
If this is set, then the image will be saved there.
Returns
-------
numpy array :
Greyscale png image
"""
img = Image.new("L", (size, size), "black")
draw = ImageDraw.Draw(img, "L")
bb = self.get_bounding_box()
for stroke in self.get_sorted_pointlist():
for p1, p2 in zip(stroke, stroke[1:]):
if time is not None and (p1["time"] > time or p2["time"] > time):
continue
y_from = int((-bb["miny"] + p1["y"]) / max(self.get_height(), 1) * size)
x_from = int((-bb["minx"] + p1["x"]) / max(self.get_width(), 1) * size)
y_to = int((-bb["miny"] + p2["y"]) / max(self.get_height(), 1) * size)
x_to = int((-bb["minx"] + p2["x"]) / max(self.get_width(), 1) * size)
draw.line([x_from, y_from, x_to, y_to], fill="#ffffff", width=1)
del draw
if store_path is not None:
img.save(store_path)
return numpy.asarray(img)
def preprocessing(self, algorithms: List[Callable]):
"""Apply preprocessing algorithms.
Parameters
----------
algorithms : a list objects
Preprocessing allgorithms which get applied in order.
Examples
--------
>>> from hwrt import preprocessing
>>> a = HandwrittenData.generate()
>>> preprocessing_queue = [preprocessing.ScaleAndShift(),
... preprocessing.StrokeConnect(),
... preprocessing.DouglasPeucker(epsilon=0.2),
... preprocessing.SpaceEvenly(number=100,
... kind='cubic')]
>>> a.preprocessing(preprocessing_queue)
"""
assert type(algorithms) is list, (
"Expected algorithms to be of type "
f"list, type(algorithms)={type(algorithms)}"
)
for algorithm in algorithms:
algorithm(self)
def feature_extraction(self, algorithms):
"""Get a list of features.
Every algorithm has to return the features as a list."""
assert type(algorithms) is list, f"type(algorithms) = {type(algorithms)}"
features = []
for algorithm in algorithms:
new_features = algorithm(self)
assert (
len(new_features) == algorithm.get_dimension()
), "Expected %i features from algorithm %s, got %i features" % (
algorithm.get_dimension(),
str(algorithm),
len(new_features),
)
features += new_features
return features
def show(self):
"""Show the data graphically in a new pop-up window."""
# Third party modules
import matplotlib.pyplot as plt
pointlist = self.get_pointlist()
if "pen_down" in pointlist[0][0]:
assert len(pointlist) > 1, "Lenght of pointlist was %i. Got: %s" % (
len(pointlist),
pointlist,
)
# Create a new pointlist that models pen-down strokes and pen
# up strokes
new_pointlist = []
last_pendown_state = None
stroke = []
for point in pointlist[0]:
if last_pendown_state is None:
last_pendown_state = point["pen_down"]
if point["pen_down"] != last_pendown_state:
new_pointlist.append(stroke)
last_pendown_state = point["pen_down"]
stroke = []
else:
stroke.append(point)
new_pointlist.append(stroke) # add the last stroke
pointlist = new_pointlist
_, ax = plt.subplots()
ax.set_title(
"Raw data id: %s, "
"Formula_id: %s" % (str(self.raw_data_id), str(self.formula_id))
)
colors = _get_colors(self.segmentation)
for symbols, color in zip(self.segmentation, colors):
for stroke_index in symbols:
stroke = pointlist[stroke_index]
xs, ys = [], []
for p in stroke:
xs.append(p["x"])
ys.append(p["y"])
if "pen_down" in stroke[0] and stroke[0]["pen_down"] is False:
plt.plot(xs, ys, "-x", color=color)
else:
plt.plot(xs, ys, "-o", color=color)
plt.gca().invert_yaxis()
ax.set_aspect("equal")
plt.show()
def count_single_dots(self):
"""Count all strokes of this recording that have only a single dot."""
pointlist = self.get_pointlist()
single_dots = 0
for stroke in pointlist:
if len(stroke) == 1:
single_dots += 1
return single_dots
def get_center_of_mass(self):
"""
Get a tuple (x,y) that is the center of mass. The center of mass is not
necessarily the same as the center of the bounding box. Imagine a black
square and a single dot wide outside of the square.
"""
xsum, ysum, counter = 0.0, 0.0, 0
for stroke in self.get_pointlist():
for point in stroke:
xsum += point["x"]
ysum += point["y"]
counter += 1
return (xsum / counter, ysum / counter)
def to_single_symbol_list(self):
"""
Convert this HandwrittenData object into a list of HandwrittenData
objects. Each element of the list is a single symbol.
Returns
-------
list of HandwrittenData objects
"""
symbol_stream = getattr(
self, "symbol_stream", [None for symbol in self.segmentation]
)
single_symbols = []
pointlist = self.get_sorted_pointlist()
for stroke_indices, label in zip(self.segmentation, symbol_stream):
strokes = []
for stroke_index in stroke_indices:
strokes.append(pointlist[stroke_index])
single_symbols.append(
HandwrittenData(json.dumps(strokes), formula_id=label)
)
return single_symbols
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
if self.raw_data_id is None and self.formula_in_latex is not None:
return "HandwrittenData(%s)" % str(self.formula_in_latex)
else:
return "HandwrittenData(raw_data_id=%s)" % str(self.raw_data_id)
def __str__(self):
return repr(self)
def _get_colors(segmentation):
"""Get a list of colors which is as long as the segmentation.
Parameters
----------
segmentation : list of lists
Returns
-------
list
A list of colors.
"""
symbol_count = len(segmentation)
num_colors = symbol_count
# See http://stackoverflow.com/a/20298116/562769
color_array = [
"#000000",
"#FFFF00",
"#1CE6FF",
"#FF34FF",
"#FF4A46",
"#008941",
"#006FA6",
"#A30059",
"#FFDBE5",
"#7A4900",
"#0000A6",
"#63FFAC",
"#B79762",
"#004D43",
"#8FB0FF",
"#997D87",
"#5A0007",
"#809693",
"#FEFFE6",
"#1B4400",
"#4FC601",
"#3B5DFF",
"#4A3B53",
"#FF2F80",
"#61615A",
"#BA0900",
"#6B7900",
"#00C2A0",
"#FFAA92",
"#FF90C9",
"#B903AA",
"#D16100",
"#DDEFFF",
"#000035",
"#7B4F4B",
"#A1C299",
"#300018",
"#0AA6D8",
"#013349",
"#00846F",
"#372101",
"#FFB500",
"#C2FFED",
"#A079BF",
"#CC0744",
"#C0B9B2",
"#C2FF99",
"#001E09",
"#00489C",
"#6F0062",
"#0CBD66",
"#EEC3FF",
"#456D75",
"#B77B68",
"#7A87A1",
"#788D66",
"#885578",
"#FAD09F",
"#FF8A9A",
"#D157A0",
"#BEC459",
"#456648",
"#0086ED",
"#886F4C",
"#34362D",
"#B4A8BD",
"#00A6AA",
"#452C2C",
"#636375",
"#A3C8C9",
"#FF913F",
"#938A81",
"#575329",
"#00FECF",
"#B05B6F",
"#8CD0FF",
"#3B9700",
"#04F757",
"#C8A1A1",
"#1E6E00",
"#7900D7",
"#A77500",
"#6367A9",
"#A05837",
"#6B002C",
"#772600",
"#D790FF",
"#9B9700",
"#549E79",
"#FFF69F",
"#201625",
"#72418F",
"#BC23FF",
"#99ADC0",
"#3A2465",
"#922329",
"#5B4534",
"#FDE8DC",
"#404E55",
"#0089A3",
"#CB7E98",
"#A4E804",
"#324E72",
"#6A3A4C",
"#83AB58",
"#001C1E",
"#D1F7CE",
"#004B28",
"#C8D0F6",
"#A3A489",
"#806C66",
"#222800",
"#BF5650",
"#E83000",
"#66796D",
"#DA007C",
"#FF1A59",
"#8ADBB4",
"#1E0200",
"#5B4E51",
"#C895C5",
"#320033",
"#FF6832",
"#66E1D3",
"#CFCDAC",
"#D0AC94",
"#7ED379",
"#012C58",
]
# Apply a little trick to make sure we have enough colors, no matter
# how many symbols are in one recording.
# This simply appends the color array as long as necessary to get enough
# colors
new_array = color_array[:]
while len(new_array) <= num_colors:
new_array += color_array
return new_array[:num_colors]
| [
"[email protected]"
] | |
e920b86f0aa0bf8d44fb0cb2489067041c153ae8 | 141545126466a00f32247dfa40e067ec049b0fa4 | /Programming OOP Python/OOP Exercise/Inheritance/person_01/project/person.py | 2a28800d1e6c9dcda6757be91fba3a054ad899ab | [] | no_license | RadkaValkova/SoftUni-Web-Developer | 83314367172a18f001e182b4e57f7ca0502ad1fc | 61d3414373498bb6009ae70e8d17f26cd2d88ea5 | refs/heads/main | 2023-06-01T02:11:06.606370 | 2021-06-29T19:39:19 | 2021-06-29T19:39:19 | 325,611,606 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | class Person:
def __init__(self,name, age):
self.name = name
self.age = age
| [
"[email protected]"
] | |
9704cae30d70ad976d7e0d9eb0d2456cfeb753cb | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /125_algorithms/_exercises/templates/_algorithms_challenges/algorithm-master/lintcode/450_reverse_nodes_in_k_group.py | fd54fa836cbe380c21c5b379ea1fb83a87f320a5 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 1,038 | py | """
Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
"""
c_ Solution:
"""
@param: head: a ListNode
@param: k: An integer
@return: a ListNode
"""
___ reverseKGroup head, k
__ n.. head:
r..
dummy ? 0
dummy.next head
head dummy
w.... head:
head reverse_next_kth(head, k)
r.. dummy.next
___ find_kth head, k
___ i __ r..(k
__ n.. head:
r..
head head.next
r.. head
___ reverse head
pre nxt N..
w.... head:
nxt head.next
head.next pre
pre head
head nxt
r.. pre
___ reverse_next_kth head, k
nk find_kth(head, k)
__ n.. nk:
r..
nk_nxt nk.next
n1_pre head
n1 head.next
nk.next N..
reverse(n1)
n1_pre.next nk
n1.next nk_nxt
r.. n1
| [
"[email protected]"
] | |
20fb0d8b294ad796f7075499d1ced20f90dc2488 | 401125c11f19161b9e917af0fd9ee3f4d5f3f199 | /shared/bower_components/brython/www/src/Lib/asyncio/__init__.py | f7b4e3469bb2221ca02a19fa2bde5bba3472e14a | [
"BSD-3-Clause"
] | permissive | seriocomedy/site | 56dbdf65e245420e53be4ace6b714ee1e180a1ab | b54d15d077b787ff894fc60eb7462c2ba837cda6 | refs/heads/master | 2022-10-09T20:46:57.934497 | 2017-04-21T17:34:48 | 2017-04-21T17:34:48 | 88,890,000 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,586 | py | """The asyncio package, tracking PEP 3156."""
import sys
# The selectors module is in the stdlib in Python 3.4 but not in 3.3.
# Do this first, so the other submodules can use "from . import selectors".
# Prefer asyncio/selectors.py over the stdlib one, as ours may be newer.
try:
from . import selectors
except ImportError:
import selectors # Will also be exported.
if sys.platform == 'win32':
# Similar thing for _overlapped.
try:
from . import _overlapped
except ImportError:
import _overlapped # Will also be exported.
# This relies on each of the submodules having an __all__ variable.
from .base_events import *
from .coroutines import *
from .events import *
from .futures import *
from .locks import *
from .protocols import *
from .queues import *
from .streams import *
from .subprocess import *
from .tasks import *
from .transports import *
from .http import *
__all__ = (base_events.__all__ +
coroutines.__all__ +
events.__all__ +
futures.__all__ +
locks.__all__ +
protocols.__all__ +
queues.__all__ +
streams.__all__ +
subprocess.__all__ +
tasks.__all__ +
transports.__all__,
http.__all__)
if sys.platform == 'win32': # pragma: no cover
from .windows_events import *
__all__ += windows_events.__all__
elif sys.platform == 'brython':
from .brython_events import *
__all__ += brython_events.__all__
else:
from .unix_events import * # pragma: no cover
__all__ += unix_events.__all__
| [
"[email protected]"
] | |
6788c1fd622280ee6be93fea139815e6f5b2f6f9 | 7a36b21109b47154cfd4131f23df83d27ade98b1 | /wedesign-web/registration/views.py | e83e7da7c8743a1e60b9a28caa343b8cd72ebbd2 | [
"MIT"
] | permissive | CyanoFactory/WeDesign-Website | 0f8dd9a26320a65705aa3e7fbc6fbab528ae9fee | f0fae6e1e6149e37b30a62e9af0febc800a4f19a | refs/heads/master | 2020-11-23T22:37:57.145799 | 2020-03-06T14:49:05 | 2020-03-06T14:49:05 | 227,849,591 | 0 | 0 | MIT | 2020-03-06T14:52:10 | 2019-12-13T13:46:18 | HTML | UTF-8 | Python | false | false | 1,183 | py | """
Copyright (c) 2019 Gabriel Kind
Hochschule Mittweida, University of Applied Sciences
Released under the MIT license
"""
from django.shortcuts import redirect
from django.contrib.auth.decorators import login_required
from wedesign.helpers import render_queryset_to_response
from .forms import CreateProfileForm, ChangeProfileForm
def register(request):
if request.method == 'POST':
form = CreateProfileForm(request.POST)
if form.is_valid():
form.save(request)
return redirect("wedesign:index")
else:
form = CreateProfileForm()
return render_queryset_to_response(
request,
template="registration/register.html",
data={"form": form}
)
@login_required
def account_change(request):
success = False
if request.method == 'POST':
form = ChangeProfileForm(request, request.POST)
if form.is_valid():
form.save()
success = True
else:
form = ChangeProfileForm(request)
return render_queryset_to_response(
request,
template="registration/account_change.html",
data={"form": form, "success": success}
) | [
"[email protected]"
] | |
cd7db34b7d9898480ce1b87ed4718a9d6c684be7 | 3d12d2ce979408da3aacb2fad10a0ea62a75f906 | /config.py | 2c497366d27a4b8cf4104bcec285aeac41b55324 | [
"Apache-2.0"
] | permissive | kotu931226/classifier_transformer_pytorch | d2be78a1c283dbc90b2d9fc9cb87a8daa0fd7c7f | 13284cd8b2a818be4033702354bbb9c7ba3815ea | refs/heads/master | 2020-04-07T15:44:22.284996 | 2018-11-27T12:26:43 | 2018-11-27T12:26:43 | 158,498,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 351 | py | import torch
ids_size = 16
d_model = 512
hidden_size = 256
n_classes = 4+1+1
d_ff = 2048
N = 3
n_heads = 16
n_epoch = 20*6
batch_size = 64
x_path = './data/Arithmetic_x.csv'
y_path = './data/Arithmetic_y.csv'
pad_id_path = './data/Arithmetic_pad_x.csv'
save_model_path = './data/classify.pt'
device = 'cuda' if torch.cuda.is_available() else 'cpu'
| [
"[email protected]"
] | |
9f807ef484473954e15ac8907434e0dd96831d3f | 43a47c9f7e670b1e834dc3610672f40460732b03 | /raidensim/types.py | b72512fa10ea5e17485c62152ce1f323d45d277c | [] | no_license | devium/routing_sim | 5b53388033d2c92b198f2e14e91b490cdd566c82 | 50a8166b4187c9ba77e6c60e247fc31c6d32a479 | refs/heads/master | 2021-07-03T18:47:54.920578 | 2018-03-26T13:06:00 | 2018-03-26T13:06:00 | 96,193,726 | 0 | 0 | null | 2017-07-04T08:20:17 | 2017-07-04T08:20:17 | null | UTF-8 | Python | false | false | 256 | py | from typing import Tuple, List
from raidensim.network.node import Node
from numpy import array
Fullness = float
Reason = str
IntRange = Tuple[int, int]
FloatRange = Tuple[float, float]
Path = List[Node]
Coord = array
DiskCoord = array
PolarCoord = array
| [
"[email protected]"
] | |
50cb8ffbfd32566b6d0893cdab210c642116660b | e7a00fbe886c6436258afb34f6d8977cc0c96041 | /model_code/RandomForestClassifier.py | c7e49989a4dda659aeba6b19885236188a1d1735 | [
"MIT"
] | permissive | harshnisar/sklearn-benchmarks | 3da7fb54809308ad984c052f0ab1a1996278cfd4 | d6f2c65df9147e49569f69575e3162b3d234a592 | refs/heads/master | 2021-01-20T22:35:26.559299 | 2016-04-08T12:43:49 | 2016-04-08T12:43:49 | 52,671,801 | 0 | 0 | null | 2016-02-27T14:58:19 | 2016-02-27T14:58:19 | null | UTF-8 | Python | false | false | 2,900 | py | import sys
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.preprocessing import StandardScaler
import itertools
dataset = sys.argv[1]
# Read the data set into memory
input_data = pd.read_csv(dataset, compression='gzip', sep='\t')
for (n_estimators, max_depth, max_features, criterion) in itertools.product([10, 50, 100, 500, 1000],
[1, 2, 3, 4, 5, 10, 20, 50, None],
[0.1, 0.25, 0.5, 0.75, 'sqrt', 'log2', None],
['gini', 'entropy']):
for dataset_repeat in range(1, 31):
# Divide the data set into a training and testing sets, each time with a different RNG seed
training_indices, testing_indices = next(iter(StratifiedShuffleSplit(input_data['class'].values,
n_iter=1,
train_size=0.75,
test_size=0.25,
random_state=dataset_repeat)))
training_features = input_data.loc[training_indices].drop('class', axis=1).values
training_classes = input_data.loc[training_indices, 'class'].values
testing_features = input_data.loc[testing_indices].drop('class', axis=1).values
testing_classes = input_data.loc[testing_indices, 'class'].values
ss = StandardScaler()
training_features = ss.fit_transform(training_features.astype(float))
testing_features = ss.transform(testing_features.astype(float))
# Create and fit the model on the training data
try:
clf = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth,
max_features=max_features, criterion=criterion)
clf.fit(training_features, training_classes)
testing_score = clf.score(testing_features, testing_classes)
except KeyboardInterrupt:
sys.exit(1)
except:
continue
param_string = ''
param_string += 'n_estimators={},'.format(n_estimators)
param_string += 'max_depth={},'.format(max_depth)
param_string += 'max_features={},'.format(max_features)
param_string += 'criterion={}'.format(criterion)
out_text = '\t'.join([dataset.split('/')[-1][:-7],
'RandomForestClassifier',
param_string,
str(testing_score)])
print(out_text)
| [
"[email protected]"
] | |
b990ec2e31f7726a3ab485d9584b861fd5ae09c0 | 06a7dc7cc93d019e4a9cbcf672b23a0bbacf8e8b | /2016_hippo_malrot/analyse_PLINK.py | ae82034df4da695a324ec0a62354448d1c084211 | [] | no_license | neurospin/scripts | 6c06cd218a5f32de9c3c2b7d1d8bda3f3d107458 | f14a2c9cf2cd7f5fbea767b017c3faf36d170bdb | refs/heads/master | 2021-07-11T22:55:46.567791 | 2021-07-02T13:08:02 | 2021-07-02T13:08:02 | 10,549,286 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,803 | py | """
@author yl247234
"""
import pandas as pd
import numpy as np
import optparse
import re, glob, os, json
## INPUTS ##
#We used 249058 variants remaining over the 466125 variants loaded from .bim file
DIRECTORY_SNPS = '/neurospin/brainomics/2016_hippo_malrot/results_snps/'
input_snps = DIRECTORY_SNPS+'IHI_genotypes.csv'
pheno_names = ['SCi_L', 'SCi_R','Sci_L_thresh', 'Sci_R_thresh','C0_L', 'C0_R']
WORKING_DIRECTORY = '/neurospin/brainomics/2016_hippo_malrot/PLINK_output/parse_output2/'
case = '_binary_PLINK'
## OUTPUTS ##
"""Only printing for the moment """
with open(input_snps) as f:
lis=[line.split(';') for line in f]
genes = lis[1][2:]
genes[len(genes)-1]=genes[len(genes)-1][:len(genes[len(genes)-1])-2]
corresponding_snps = lis[3][2:]
corresponding_snps[len(corresponding_snps)-1]=corresponding_snps[len(corresponding_snps)-1][:len(corresponding_snps[len(corresponding_snps)-1])-2]
df = pd.DataFrame({'corresponding_snps' : np.asarray(corresponding_snps),
'genes' : np.asarray(genes)})
df.index = df['corresponding_snps']
pheno_names = ['SCi_L', 'Sci_L_thresh', 'C0_L', 'SCi_R','Sci_R_thresh', 'C0_R']
for pheno_name in pheno_names:
pval_sel = os.path.join(WORKING_DIRECTORY,
pheno_name +'_logistic'+case+'.sel3')
if os.path.isfile(pval_sel):
pval = pd.read_csv(pval_sel, sep='\t')
"""print "\n"
print "Phenotype considered: " + pheno_name
print pval # to be printed only if selX with X>5"""
tab = np.asarray(pval['SNP'])
tab = tab.tolist()
for snp in tab:
if snp in df.index:
print "Phenotype considered: " + pheno_name
print "Gene associated: "+ df.loc[snp][1]
print "Snp associated: " + df.loc[snp][0]
| [
"[email protected]"
] | |
59374fd6bd814521d0880beccd53963435fc39df | dd545fb390ccc7b5bc3e8752e9fd1279a8547df0 | /scrawl/utils.py | 75507812607a064b99924cd39389404d93ddedb3 | [
"MIT"
] | permissive | dormantman/scrawl | f3cf8e0a3e6840c67428c986e75acd6d7a1cbc80 | c1845f1a0f69be812227b22b79a08dd086c11f61 | refs/heads/master | 2021-01-02T15:21:40.810798 | 2020-02-16T00:49:19 | 2020-02-16T00:49:19 | 239,679,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,006 | py | import os
import sys
from flask import jsonify, make_response
sys.setrecursionlimit(100)
def find_pages(page_name: str):
page_data = page_name.split('/')
if len(page_data) > 1:
return page_data
return page_data[0], ''
def render_error(message: str, status_code=500):
content = jsonify(detail=message)
return make_response(content, status_code)
def __full_directory_remove(path: str):
if os.path.exists(path):
for the_file in os.listdir(path):
file_path = os.path.join(path, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
else:
__full_directory_remove(file_path)
except OSError as error:
print('Deleting error:', error)
os.rmdir(path)
def full_directory_remove(path: str, work_directory: str):
content_directory = os.path.join(work_directory, path)
__full_directory_remove(content_directory)
| [
"[email protected]"
] | |
39ff97d2ee5bfaac3d226330a7858be88a85e7da | 636cc2ebe9050f736e4e27a641ff64f4a2409cfe | /lesson004/code/basic_python/reg.py | ea9f7c18206869a11646cb9dc38ebbce3790a410 | [] | no_license | zdRan/FuturePlanNodes | c8d9ec3e2f58999acdd964eca3198adc383727c3 | 3d16f5e6fe80f4ba66a98ae80d19817c43d9bf12 | refs/heads/master | 2020-05-22T02:19:03.197272 | 2019-08-16T02:00:10 | 2019-08-16T02:00:10 | 186,196,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,446 | py | # -*- coding: utf-8 -*-
# @Time : 2019/5/12 14:51
# @Author : zdRan
# @FileName: reg.py
# @Software: PyCharm
# @Blog :https://github.com/zdRan/FuturePlanNodes
import re
s = "代码地址是http://www.github.com"
reg = "http://[w]{3}\.[a-z0-9]*\.com"
result = re.findall(reg, s)
print(result)
s = "hello python hello"
reg = "hello"
print(re.findall(reg,s))
print(re.findall(reg,s)[1])
# 元字符
'''
. 代表换行符以外的任意字符
\w 匹配字母、数字、汉字、下划线
\s 匹配任意空白符
\d 匹配任意数字
^ 匹配字符串开头
$ 匹配字符串结束
'''
s = "dasdasdasd汉字汉字汉 字 12312312!@#¥%"
print(re.findall('\w',s))
print(re.findall('\d',s))
print(re.findall('\s',s))
print(re.findall('^d',s))
# 反义代码
'''
\W 匹配与 \w 相反的内容
\S 匹配与 \s 相反的内容
\D 匹配与 \d 相反的内容
'''
# 限定符
'''
* 重复 0 次或多次
+ 重复 1 次或多次
? 重复 0 次或 1 次
{n} 重复 n 次
{n,} 最小重复 n 次
{n,m} 重复 n 到 m 次
'''
s = "aaaa111nnn!!!n333444555汉字"
print(re.findall('\d{3}',s))
# 分组匹配
s = "qq号是12345678,邮编地址是11111,我的手机号是222222"
reg = "(\d{8}).*(\d{5})"
print(re.findall(reg,s))
print(re.search(reg,s))
print(re.search(reg,s).group())
# 整个正则表达式匹配到的内容
print(re.search(reg,s).group(0))
print(re.search(reg,s).group(1))
print(re.search(reg,s).group(2)) | [
"[email protected]"
] | |
0f1f73aefabbe839581fb46a2fd4e17b9dfebe4a | 81d3cfb56abd49a55a7b1850e78472df33d82616 | /Python/libraries/recognizers-number-with-unit/recognizers_number_with_unit/number_with_unit/german/extractors.py | 8b707a048d19adb6e0076bf808645bd7fa793342 | [
"MIT"
] | permissive | dugarsumit/Recognizers-Text | 08e536b336928fccb6f48552a61421b16541ed79 | ab6aba833f75d734a266df935a4245b04a228017 | refs/heads/master | 2021-02-17T19:02:54.789870 | 2020-03-05T15:36:39 | 2020-03-05T15:36:39 | 245,119,937 | 0 | 0 | MIT | 2020-03-05T09:26:54 | 2020-03-05T09:26:53 | null | UTF-8 | Python | false | false | 5,952 | py | from typing import Dict, List, Pattern
from recognizers_text.culture import Culture
from recognizers_text.extractor import Extractor
from recognizers_text.utilities import RegExpUtility
from recognizers_number.culture import CultureInfo
from recognizers_number.number.models import NumberMode
from recognizers_number.number.german.extractors import GermanNumberExtractor
from recognizers_number_with_unit.number_with_unit.constants import Constants
from recognizers_number_with_unit.number_with_unit.extractors import NumberWithUnitExtractorConfiguration
from recognizers_number_with_unit.resources.german_numeric_with_unit import GermanNumericWithUnit
from recognizers_number_with_unit.resources.base_units import BaseUnits
# pylint: disable=abstract-method
class GermanNumberWithUnitExtractorConfiguration(NumberWithUnitExtractorConfiguration):
@property
def ambiguity_filters_dict(self) -> Dict[Pattern, Pattern]:
return GermanNumericWithUnit.AmbiguityFiltersDict
@property
def unit_num_extractor(self) -> Extractor:
return self._unit_num_extractor
@property
def build_prefix(self) -> str:
return self._build_prefix
@property
def build_suffix(self) -> str:
return self._build_suffix
@property
def connector_token(self) -> str:
return self._connector_token
@property
def compound_unit_connector_regex(self) -> Pattern:
return self._compound_unit_connector_regex
@property
def non_unit_regex(self) -> Pattern:
return self._pm_non_unit_regex
@property
def ambiguous_unit_number_multiplier_regex(self) -> Pattern:
return None
def __init__(self, culture_info: CultureInfo):
if culture_info is None:
culture_info = CultureInfo(Culture.German)
super().__init__(culture_info)
self._unit_num_extractor = GermanNumberExtractor(NumberMode.Unit)
self._build_prefix = GermanNumericWithUnit.BuildPrefix
self._build_suffix = GermanNumericWithUnit.BuildSuffix
self._connector_token = GermanNumericWithUnit.ConnectorToken
self._compound_unit_connector_regex = RegExpUtility.get_safe_reg_exp(
GermanNumericWithUnit.CompoundUnitConnectorRegex)
self._pm_non_unit_regex = RegExpUtility.get_safe_reg_exp(
BaseUnits.PmNonUnitRegex)
# pylint: enable=abstract-method
class GermanAgeExtractorConfiguration(GermanNumberWithUnitExtractorConfiguration):
@property
def extract_type(self) -> str:
return Constants.SYS_UNIT_AGE
@property
def suffix_list(self) -> Dict[str, str]:
return self._suffix_list
@property
def prefix_list(self) -> Dict[str, str]:
return self._prefix_list
@property
def ambiguous_unit_list(self) -> List[str]:
return self._ambiguous_unit_list
def __init__(self, culture_info: CultureInfo = None):
super().__init__(culture_info)
self._suffix_list = GermanNumericWithUnit.AgeSuffixList
self._prefix_list = dict()
self._ambiguous_unit_list = list()
class GermanCurrencyExtractorConfiguration(GermanNumberWithUnitExtractorConfiguration):
@property
def extract_type(self) -> str:
return Constants.SYS_UNIT_CURRENCY
@property
def suffix_list(self) -> Dict[str, str]:
return self._suffix_list
@property
def prefix_list(self) -> Dict[str, str]:
return self._prefix_list
@property
def ambiguous_unit_list(self) -> List[str]:
return self._ambiguous_unit_list
def __init__(self, culture_info: CultureInfo = None):
super().__init__(culture_info)
self._suffix_list = GermanNumericWithUnit.CurrencySuffixList
self._prefix_list = GermanNumericWithUnit.CurrencyPrefixList
self._ambiguous_unit_list = GermanNumericWithUnit.AmbiguousCurrencyUnitList
class GermanDimensionExtractorConfiguration(GermanNumberWithUnitExtractorConfiguration):
@property
def extract_type(self) -> str:
return Constants.SYS_UNIT_DIMENSION
@property
def suffix_list(self) -> Dict[str, str]:
return self._suffix_list
@property
def prefix_list(self) -> Dict[str, str]:
return self._prefix_list
@property
def ambiguous_unit_list(self) -> List[str]:
return self._ambiguous_unit_list
def __init__(self, culture_info: CultureInfo = None):
super().__init__(culture_info)
self._suffix_list = {
**GermanNumericWithUnit.InformationSuffixList,
**GermanNumericWithUnit.AreaSuffixList,
**GermanNumericWithUnit.LengthSuffixList,
**GermanNumericWithUnit.SpeedSuffixList,
**GermanNumericWithUnit.VolumeSuffixList,
**GermanNumericWithUnit.WeightSuffixList
}
self._prefix_list = dict()
self._ambiguous_unit_list = GermanNumericWithUnit.AmbiguousDimensionUnitList
class GermanTemperatureExtractorConfiguration(GermanNumberWithUnitExtractorConfiguration):
@property
def extract_type(self) -> str:
return Constants.SYS_UNIT_TEMPERATURE
@property
def suffix_list(self) -> Dict[str, str]:
return self._suffix_list
@property
def prefix_list(self) -> Dict[str, str]:
return self._prefix_list
@property
def ambiguous_unit_list(self) -> List[str]:
return self._ambiguous_unit_list
@property
def ambiguous_unit_number_multiplier_regex(self) -> Pattern:
return self._ambiguous_unit_number_multiplier_regex
def __init__(self, culture_info: CultureInfo = None):
super().__init__(culture_info)
self._suffix_list = GermanNumericWithUnit.TemperatureSuffixList
self._prefix_list = dict()
self._ambiguous_unit_list = list()
self._ambiguous_unit_number_multiplier_regex = RegExpUtility.get_safe_reg_exp(
BaseUnits.AmbiguousUnitNumberMultiplierRegex)
| [
"[email protected]"
] | |
3ff9bcc607cf180c7c6b9d5c032ae61b2971806b | e309fb2053d870ddabe35801d47bd73141b1c1ab | /setup.py | 39b8363551106fad81768b699958acf4e61ff65c | [
"MIT"
] | permissive | jmoiron/par2ools | 20c2b17b7d0d58ed04d5c12fbfa037e3727a4203 | 3a9a71c8d0cadecb56bff711b2c2db9b3acb496c | refs/heads/master | 2023-08-13T19:34:53.259101 | 2016-01-10T23:01:42 | 2016-01-10T23:01:42 | 2,627,372 | 11 | 3 | MIT | 2021-06-14T08:26:51 | 2011-10-22T18:49:03 | Python | UTF-8 | Python | false | false | 1,095 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Setup script for par2ools."""
from setuptools import setup, find_packages
import sys, os
version = '0.1'
# some trove classifiers:
# License :: OSI Approved :: MIT License
# Intended Audience :: Developers
# Operating System :: POSIX
setup(
name='par2ools',
version=version,
description="par2 tools",
long_description=open('README.rst').read(),
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
],
keywords='par2',
author='Jason Moiron',
author_email='[email protected]',
url='http://github.com/jmoiron/par2ools',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
scripts=['bin/par2unrar', 'bin/par2ls', 'bin/par2mv'],
include_package_data=True,
zip_safe=False,
test_suite="tests",
install_requires=[
# -*- Extra requirements: -*-
],
entry_points="""
# -*- Entry points: -*-
""",
)
| [
"[email protected]"
] | |
b03098c39b14930e53f40ab6584e9339e3d0b901 | c38ac05ac36e4ec6eb0738f0e615342806ed70cf | /blog/forms.py | 6aa7ae74d239e5c4dcbfd6710bfca775cbfaae39 | [] | no_license | jpanknin/django-unleashed2 | 5de2d74ec0e9a1524784ee0893ee8d932db02a6c | dc16799ec30bb1118c7ce179604a3641b1ce9f7b | refs/heads/master | 2020-12-24T20:24:04.727271 | 2016-05-10T00:06:11 | 2016-05-10T00:06:11 | 58,072,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | from django import forms
from .models import Post
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = '__all__'
def clean_slug(self):
return self.cleaned_data['slug'].lower() | [
"[email protected]"
] | |
56b41bf773d22472032f09c9289dc4656c5871f0 | 71eb9a93e4bceb0af802924cbaebbf071a9e7b42 | /wn/_core.py | 534d84e5da7161577ab35f0984c6b163c5b85284 | [
"MIT"
] | permissive | dpalmasan/wn | d1177e8192263bcaf0dc9683350d7c3744469658 | 44d01d4b1e56c36ef45c816d8c8733cde5f69104 | refs/heads/main | 2023-04-11T06:04:00.644462 | 2021-04-07T13:30:50 | 2021-04-07T13:30:50 | 356,056,660 | 0 | 0 | MIT | 2021-04-08T21:49:39 | 2021-04-08T21:49:38 | null | UTF-8 | Python | false | false | 44,261 | py |
from typing import TypeVar, Optional, List, Tuple, Dict, Set, Iterator
import warnings
import wn
from wn._types import Metadata
from wn._util import flatten
from wn._db import NON_ROWID
from wn._queries import (
find_lexicons,
find_ilis,
find_proposed_ilis,
find_entries,
find_senses,
find_synsets,
get_lexicon,
get_modified,
get_lexicon_dependencies,
get_lexicon_extension_bases,
get_lexicon_extensions,
get_form_pronunciations,
get_form_tags,
get_entry_senses,
get_sense_relations,
get_sense_synset_relations,
get_synset_relations,
get_synset_members,
get_synsets_for_ilis,
get_examples,
get_definitions,
get_syntactic_behaviours,
get_metadata,
get_lexicalized,
get_adjposition,
get_sense_counts,
get_lexfile,
)
_FAKE_ROOT = '*ROOT*'
_INFERRED_SYNSET = '*INFERRED*'
class _DatabaseEntity:
__slots__ = '_id',
_ENTITY_TYPE = ''
def __init__(self, _id: int = NON_ROWID):
self._id = _id # Database-internal id (e.g., rowid)
def __eq__(self, other):
if not isinstance(other, _DatabaseEntity):
return NotImplemented
# the _id of different kinds of entities, such as Synset and
# Sense, can be the same, so make sure they are the same type
# of object first
return (self._ENTITY_TYPE == other._ENTITY_TYPE
and self._id == other._id)
def __lt__(self, other):
if not isinstance(other, _DatabaseEntity):
return NotImplemented
elif self._ENTITY_TYPE != other._ENTITY_TYPE:
return NotImplemented
else:
return self._id < other._id
def __hash__(self):
return hash((self._ENTITY_TYPE, self._id))
class ILI(_DatabaseEntity):
"""A class for interlingual indices."""
__slots__ = 'id', 'status', '_definition'
__module__ = 'wn'
def __init__(
self,
id: Optional[str],
status: str,
definition: str = None,
_id: int = NON_ROWID,
):
super().__init__(_id=_id)
self.id = id
self.status = status
self._definition = definition
def __repr__(self) -> str:
return f'ILI({repr(self.id) if self.id else "*PROPOSED*"})'
def definition(self) -> Optional[str]:
return self._definition
def metadata(self) -> Metadata:
"""Return the ILI's metadata."""
table = 'proposed_ilis' if self.status == 'proposed' else 'ilis'
return get_metadata(self._id, table)
class Lexicon(_DatabaseEntity):
"""A class representing a wordnet lexicon.
Attributes:
id: The lexicon's identifier.
label: The full name of lexicon.
language: The BCP 47 language code of lexicon.
email: The email address of the wordnet maintainer.
license: The URL or name of the wordnet's license.
version: The version string of the resource.
url: The project URL of the wordnet.
citation: The canonical citation for the project.
logo: A URL or path to a project logo.
"""
__slots__ = ('id', 'label', 'language', 'email', 'license',
'version', 'url', 'citation', 'logo')
__module__ = 'wn'
_ENTITY_TYPE = 'lexicons'
def __init__(
self,
id: str,
label: str,
language: str,
email: str,
license: str,
version: str,
url: str = None,
citation: str = None,
logo: str = None,
_id: int = NON_ROWID,
):
super().__init__(_id=_id)
self.id = id
self.label = label
self.language = language
self.email = email
self.license = license
self.version = version
self.url = url
self.citation = citation
self.logo = logo
def __repr__(self):
id, ver, lg = self.id, self.version, self.language
return f'<Lexicon {id}:{ver} [{lg}]>'
def metadata(self) -> Metadata:
"""Return the lexicon's metadata."""
return get_metadata(self._id, 'lexicons')
def specifier(self) -> str:
"""Return the *id:version* lexicon specifier."""
return f'{self.id}:{self.version}'
def modified(self) -> bool:
"""Return True if the lexicon has local modifications."""
return get_modified(self._id)
def requires(self) -> Dict[str, Optional['Lexicon']]:
"""Return the lexicon dependencies."""
return dict(
(f'{id}:{version}',
None if _id is None else _to_lexicon(get_lexicon(_id)))
for id, version, _, _id in get_lexicon_dependencies(self._id)
)
def extends(self) -> Optional['Lexicon']:
"""Return the lexicon this lexicon extends, if any.
If this lexicon is not an extension, return None.
"""
bases = get_lexicon_extension_bases(self._id, depth=1)
if bases:
return _to_lexicon(get_lexicon(bases[0]))
return None
def extensions(self, depth: int = 1) -> List['Lexicon']:
"""Return the list of lexicons extending this one.
By default, only direct extensions are included. This is
controlled by the *depth* parameter, which if you view
extensions as children in a tree where the current lexicon is
the root, *depth=1* are the immediate extensions. Increasing
this number gets extensions of extensions, or setting it to a
negative number gets all "descendant" extensions.
"""
return [_to_lexicon(get_lexicon(rowid))
for rowid in get_lexicon_extensions(self._id, depth=depth)]
class _LexiconElement(_DatabaseEntity):
__slots__ = '_lexid', '_wordnet'
def __init__(
self,
_lexid: int = NON_ROWID,
_id: int = NON_ROWID,
_wordnet: 'Wordnet' = None
):
super().__init__(_id=_id)
self._lexid = _lexid # Database-internal lexicon id
if _wordnet is None:
_wordnet = Wordnet()
self._wordnet: 'Wordnet' = _wordnet
def lexicon(self):
return _to_lexicon(get_lexicon(self._lexid))
def _get_lexicon_ids(self) -> Tuple[int, ...]:
if self._wordnet._default_mode:
return tuple(
{self._lexid}
| set(get_lexicon_extension_bases(self._lexid))
| set(get_lexicon_extensions(self._lexid))
)
else:
return self._wordnet._lexicon_ids
class Pronunciation:
"""A class for word form pronunciations."""
__slots__ = 'value', 'variety', 'notation', 'phonemic', 'audio'
def __init__(
self,
value: str,
variety: str = None,
notation: str = None,
phonemic: bool = True,
audio: str = None,
):
self.value = value
self.variety = variety
self.notation = notation
self.phonemic = phonemic
self.audio = audio
class Tag:
"""A general-purpose tag class for word forms."""
__slots__ = 'tag', 'category',
__module__ = 'wn'
def __init__(self, tag: str, category: str):
self.tag = tag
self.category = category
def __eq__(self, other):
if not isinstance(other, Tag):
return NotImplemented
return self.tag == other.tag and self.category == other.category
class Form(str):
"""A word-form string with additional attributes."""
__slots__ = '_id', 'id', 'script',
__module__ = 'wn'
_id: int
id: Optional[str]
script: Optional[str]
def __new__(
cls,
form: str,
id: str = None,
script: str = None,
_id: int = NON_ROWID
):
obj = str.__new__(cls, form) # type: ignore
obj.id = id
obj.script = script
obj._id = _id
return obj
def __eq__(self, other):
if isinstance(other, Form) and self.script != other.script:
return False
return str.__eq__(self, other)
def __hash__(self):
script = self.script
if script is None:
return str.__hash__(self)
return hash((str(self), self.script))
def pronunciations(self) -> List[Pronunciation]:
return [Pronunciation(*data) for data in get_form_pronunciations(self._id)]
def tags(self) -> List[Tag]:
return [Tag(tag, category) for tag, category in get_form_tags(self._id)]
class Word(_LexiconElement):
"""A class for words (also called lexical entries) in a wordnet."""
__slots__ = 'id', 'pos', '_forms'
__module__ = 'wn'
_ENTITY_TYPE = 'entries'
def __init__(
self,
id: str,
pos: str,
forms: List[Tuple[str, Optional[str], Optional[str], int]],
_lexid: int = NON_ROWID,
_id: int = NON_ROWID,
_wordnet: 'Wordnet' = None
):
super().__init__(_lexid=_lexid, _id=_id, _wordnet=_wordnet)
self.id = id
self.pos = pos
self._forms = forms
def __repr__(self) -> str:
return f'Word({self.id!r})'
def lemma(self) -> Form:
"""Return the canonical form of the word.
Example:
>>> wn.words('wolves')[0].lemma()
'wolf'
"""
return Form(*self._forms[0])
def forms(self) -> List[Form]:
"""Return the list of all encoded forms of the word.
Example:
>>> wn.words('wolf')[0].forms()
['wolf', 'wolves']
"""
return [Form(*form_data) for form_data in self._forms]
def senses(self) -> List['Sense']:
"""Return the list of senses of the word.
Example:
>>> wn.words('zygoma')[0].senses()
[Sense('ewn-zygoma-n-05292350-01')]
"""
lexids = self._get_lexicon_ids()
iterable = get_entry_senses(self._id, lexids)
return [Sense(*sense_data, self._wordnet) for sense_data in iterable]
def metadata(self) -> Metadata:
"""Return the word's metadata."""
return get_metadata(self._id, 'entries')
def synsets(self) -> List['Synset']:
"""Return the list of synsets of the word.
Example:
>>> wn.words('addendum')[0].synsets()
[Synset('ewn-06411274-n')]
"""
return [sense.synset() for sense in self.senses()]
def derived_words(self) -> List['Word']:
"""Return the list of words linked through derivations on the senses.
Example:
>>> wn.words('magical')[0].derived_words()
[Word('ewn-magic-n'), Word('ewn-magic-n')]
"""
return [derived_sense.word()
for sense in self.senses()
for derived_sense in sense.get_related('derivation')]
def translate(
self, lexicon: str = None, *, lang: str = None,
) -> Dict['Sense', List['Word']]:
"""Return a mapping of word senses to lists of translated words.
Arguments:
lexicon: if specified, translate to words in the target lexicon(s)
lang: if specified, translate to words with the language code
Example:
>>> w = wn.words('water bottle', pos='n')[0]
>>> for sense, words in w.translate(lang='ja').items():
... print(sense, [jw.lemma() for jw in words])
...
Sense('ewn-water_bottle-n-04564934-01') ['水筒']
"""
result = {}
for sense in self.senses():
result[sense] = [
t_sense.word()
for t_sense in sense.translate(lang=lang, lexicon=lexicon)
]
return result
T = TypeVar('T', bound='_Relatable')
class _Relatable(_LexiconElement):
__slots__ = 'id',
def __init__(
self,
id: str,
_lexid: int = NON_ROWID,
_id: int = NON_ROWID,
_wordnet: 'Wordnet' = None
):
super().__init__(_lexid=_lexid, _id=_id, _wordnet=_wordnet)
self.id = id
def get_related(self: T, *args: str) -> List[T]:
raise NotImplementedError
def closure(self: T, *args: str) -> Iterator[T]:
visited = set()
queue = self.get_related(*args)
while queue:
relatable = queue.pop(0)
if relatable.id not in visited:
visited.add(relatable.id)
yield relatable
queue.extend(relatable.get_related(*args))
def relation_paths(self: T, *args: str, end: T = None) -> Iterator[List[T]]:
agenda: List[Tuple[List[T], Set[T]]] = [
([target], {self, target})
for target in self.get_related(*args)
if target._id != self._id # avoid self loops?
]
while agenda:
path, visited = agenda.pop()
if end is not None and path[-1] == end:
yield path
else:
related = [target for target in path[-1].get_related(*args)
if target not in visited]
if related:
for synset in reversed(related):
new_path = list(path) + [synset]
new_visited = visited | {synset}
agenda.append((new_path, new_visited))
elif end is None:
yield path
class Synset(_Relatable):
"""Class for modeling wordnet synsets."""
__slots__ = 'pos', '_ili'
__module__ = 'wn'
_ENTITY_TYPE = 'synsets'
def __init__(
self,
id: str,
pos: str,
ili: str = None,
_lexid: int = NON_ROWID,
_id: int = NON_ROWID,
_wordnet: 'Wordnet' = None
):
super().__init__(id=id, _lexid=_lexid, _id=_id, _wordnet=_wordnet)
self.pos = pos
self._ili = ili
@classmethod
def empty(
cls,
id: str,
ili: str = None,
_lexid: int = NON_ROWID,
_wordnet: 'Wordnet' = None
):
return cls(id, pos='', ili=ili, _lexid=_lexid, _wordnet=_wordnet)
@property
def ili(self):
if self._ili:
row = next(find_ilis(id=self._ili), None)
else:
row = next(find_proposed_ilis(synset_id=self._id), None)
if row is not None:
return ILI(*row)
return None
def __hash__(self):
# include ili and lexid in the hash so inferred synsets don't
# hash the same
return hash((self._ENTITY_TYPE, self._ili, self._lexid, self._id))
def __repr__(self) -> str:
return f'Synset({self.id!r})'
def definition(self) -> Optional[str]:
"""Return the first definition found for the synset.
Example:
>>> wn.synsets('cartwheel', pos='n')[0].definition()
'a wheel that has wooden spokes and a metal rim'
"""
lexids = self._get_lexicon_ids()
return next(
(text for text, _, _, _ in get_definitions(self._id, lexids)),
None
)
def examples(self) -> List[str]:
"""Return the list of examples for the synset.
Example:
>>> wn.synsets('orbital', pos='a')[0].examples()
['"orbital revolution"', '"orbital velocity"']
"""
lexids = self._get_lexicon_ids()
exs = get_examples(self._id, 'synsets', lexids)
return [ex for ex, _, _ in exs]
def senses(self) -> List['Sense']:
"""Return the list of sense members of the synset.
Example:
>>> wn.synsets('umbrella', pos='n')[0].senses()
[Sense('ewn-umbrella-n-04514450-01')]
"""
lexids = self._get_lexicon_ids()
iterable = get_synset_members(self._id, lexids)
return [Sense(*sense_data, self._wordnet) for sense_data in iterable]
def lexicalized(self) -> bool:
"""Return True if the synset is lexicalized."""
return get_lexicalized(self._id, 'synsets')
def lexfile(self) -> Optional[str]:
"""Return the lexicographer file name for this synset, if any."""
return get_lexfile(self._id)
def metadata(self) -> Metadata:
"""Return the synset's metadata."""
return get_metadata(self._id, 'synsets')
def words(self) -> List[Word]:
"""Return the list of words linked by the synset's senses.
Example:
>>> wn.synsets('exclusive', pos='n')[0].words()
[Word('ewn-scoop-n'), Word('ewn-exclusive-n')]
"""
return [sense.word() for sense in self.senses()]
def lemmas(self) -> List[Form]:
"""Return the list of lemmas of words for the synset.
Example:
>>> wn.synsets('exclusive', pos='n')[0].words()
['scoop', 'exclusive']
"""
return [w.lemma() for w in self.words()]
def get_related(self, *args: str) -> List['Synset']:
targets: List['Synset'] = []
lexids = self._get_lexicon_ids()
# first get relations from the current lexicon(s)
if self._id != NON_ROWID:
relations = get_synset_relations({self._id}, args, lexids)
targets.extend(Synset(*row[2:], self._wordnet)
for row in relations
if row[5] in lexids)
# then attempt to expand via ILI
if self._ili is not None and self._wordnet and self._wordnet._expanded_ids:
expids = self._wordnet._expanded_ids
# get expanded relation
expss = find_synsets(ili=self._ili, lexicon_rowids=expids)
rowids = {rowid for _, _, _, _, rowid in expss} - {self._id, NON_ROWID}
relations = get_synset_relations(rowids, args, expids)
ilis = {row[4] for row in relations} - {None}
# map back to target lexicons
seen = {ss._id for ss in targets}
for row in get_synsets_for_ilis(ilis, lexicon_rowids=lexids):
if row[-1] not in seen:
targets.append(Synset(*row, self._wordnet))
# add empty synsets for ILIs without a target in lexids
for ili in (ilis - {tgt._ili for tgt in targets}):
targets.append(
Synset.empty(
id=_INFERRED_SYNSET,
ili=ili,
_lexid=self._lexid,
_wordnet=self._wordnet
)
)
return targets
def _hypernym_paths(
self, simulate_root: bool, include_self: bool
) -> List[List['Synset']]:
paths = list(self.relation_paths('hypernym', 'instance_hypernym'))
if include_self:
paths = [[self] + path for path in paths] or [[self]]
if simulate_root and self.id != _FAKE_ROOT:
root = Synset.empty(
id=_FAKE_ROOT, _lexid=self._lexid, _wordnet=self._wordnet
)
paths = [path + [root] for path in paths] or [[root]]
return paths
def hypernym_paths(self, simulate_root: bool = False) -> List[List['Synset']]:
"""Return the list of hypernym paths to a root synset.
Example:
>>> for path in wn.synsets('dog', pos='n')[0].hypernym_paths():
... for i, ss in enumerate(path):
... print(' ' * i, ss, ss.lemmas()[0])
...
Synset('pwn-02083346-n') canine
Synset('pwn-02075296-n') carnivore
Synset('pwn-01886756-n') eutherian mammal
Synset('pwn-01861778-n') mammalian
Synset('pwn-01471682-n') craniate
Synset('pwn-01466257-n') chordate
Synset('pwn-00015388-n') animal
Synset('pwn-00004475-n') organism
Synset('pwn-00004258-n') animate thing
Synset('pwn-00003553-n') unit
Synset('pwn-00002684-n') object
Synset('pwn-00001930-n') physical entity
Synset('pwn-00001740-n') entity
Synset('pwn-01317541-n') domesticated animal
Synset('pwn-00015388-n') animal
Synset('pwn-00004475-n') organism
Synset('pwn-00004258-n') animate thing
Synset('pwn-00003553-n') unit
Synset('pwn-00002684-n') object
Synset('pwn-00001930-n') physical entity
Synset('pwn-00001740-n') entity
"""
return self._hypernym_paths(simulate_root, False)
def min_depth(self, simulate_root: bool = False) -> int:
"""Return the minimum taxonomy depth of the synset.
Example:
>>> wn.synsets('dog', pos='n')[0].min_depth()
8
"""
return min(
(len(path) for path in self.hypernym_paths(simulate_root=simulate_root)),
default=0
)
def max_depth(self, simulate_root: bool = False) -> int:
"""Return the maximum taxonomy depth of the synset.
Example:
>>> wn.synsets('dog', pos='n')[0].max_depth()
13
"""
return max(
(len(path) for path in self.hypernym_paths(simulate_root=simulate_root)),
default=0
)
def _shortest_hyp_paths(
self, other: 'Synset', simulate_root: bool
) -> Dict[Tuple['Synset', int], List['Synset']]:
if self == other:
return {(self, 0): []}
from_self = self._hypernym_paths(simulate_root, True)
from_other = other._hypernym_paths(simulate_root, True)
common = set(flatten(from_self)).intersection(flatten(from_other))
if not common:
return {}
# Compute depths of common hypernyms from their distances.
# Doing this now avoid more expensive lookups later.
depths: Dict['Synset', int] = {}
# subpaths accumulates paths to common hypernyms from both sides
subpaths: Dict['Synset', Tuple[List[List['Synset']], List[List['Synset']]]]
subpaths = {ss: ([], []) for ss in common}
for which, paths in (0, from_self), (1, from_other):
for path in paths:
for dist, ss in enumerate(path):
if ss in common:
# self or other subpath to ss (not including ss)
subpaths[ss][which].append(path[:dist + 1])
# keep maximum depth
depth = len(path) - dist - 1
if ss not in depths or depths[ss] < depth:
depths[ss] = depth
shortest: Dict[Tuple['Synset', int], List['Synset']] = {}
for ss in common:
from_self_subpaths, from_other_subpaths = subpaths[ss]
shortest_from_self = min(from_self_subpaths, key=len)
# for the other path, we need to reverse it and remove the pivot synset
shortest_from_other = min(from_other_subpaths, key=len)[-2::-1]
shortest[(ss, depths[ss])] = shortest_from_self + shortest_from_other
return shortest
def shortest_path(
self, other: 'Synset', simulate_root: bool = False
) -> List['Synset']:
"""Return the shortest path from the synset to the *other* synset.
Arguments:
other: endpoint synset of the path
simulate_root: if :python:`True`, ensure any two synsets
are always connected by positing a fake root node
"""
pathmap = self._shortest_hyp_paths(other, simulate_root)
key = min(pathmap, key=lambda key: len(pathmap[key]), default=None)
if key is None:
raise wn.Error(f'no path between {self!r} and {other!r}')
return pathmap[key][1:]
def common_hypernyms(
self, other: 'Synset', simulate_root: bool = False
) -> List['Synset']:
"""Return the common hypernyms for the current and *other* synsets.
Arguments:
other: synset that is a hyponym of any shared hypernyms
simulate_root: if :python:`True`, ensure any two synsets
always share a hypernym by positing a fake root node
"""
from_self = self._hypernym_paths(simulate_root, True)
from_other = other._hypernym_paths(simulate_root, True)
common = set(flatten(from_self)).intersection(flatten(from_other))
return sorted(common)
def lowest_common_hypernyms(
self, other: 'Synset', simulate_root: bool = False
) -> List['Synset']:
"""Return the common hypernyms furthest from the root.
Arguments:
other: synset that is a hyponym of any shared hypernyms
simulate_root: if :python:`True`, ensure any two synsets
always share a hypernym by positing a fake root node
"""
pathmap = self._shortest_hyp_paths(other, simulate_root)
# keys of pathmap are (synset, depth_of_synset)
max_depth: int = max([depth for _, depth in pathmap], default=-1)
if max_depth == -1:
return []
else:
return [ss for ss, d in pathmap if d == max_depth]
def holonyms(self) -> List['Synset']:
"""Return the list of synsets related by any holonym relation.
Any of the following relations are traversed: ``holonym``,
``holo_location``, ``holo_member``, ``holo_part``,
``holo_portion``, ``holo_substance``.
"""
return self.get_related(
'holonym',
'holo_location',
'holo_member',
'holo_part',
'holo_portion',
'holo_substance',
)
def meronyms(self) -> List['Synset']:
"""Return the list of synsets related by any meronym relation.
Any of the following relations are traversed: ``meronym``,
``mero_location``, ``mero_member``, ``mero_part``,
``mero_portion``, ``mero_substance``.
"""
return self.get_related(
'meronym',
'mero_location',
'mero_member',
'mero_part',
'mero_portion',
'mero_substance',
)
def hypernyms(self) -> List['Synset']:
"""Return the list of synsets related by any hypernym relation.
Both the ``hypernym`` and ``instance_hypernym`` relations are
traversed.
"""
return self.get_related(
'hypernym',
'instance_hypernym'
)
def hyponyms(self) -> List['Synset']:
"""Return the list of synsets related by any hyponym relation.
Both the ``hyponym`` and ``instance_hyponym`` relations are
traversed.
"""
return self.get_related(
'hyponym',
'instance_hyponym'
)
def translate(self, lexicon: str = None, *, lang: str = None) -> List['Synset']:
"""Return a list of translated synsets.
Arguments:
lexicon: if specified, translate to synsets in the target lexicon(s)
lang: if specified, translate to synsets with the language code
Example:
>>> es = wn.synsets('araña', lang='es')[0]
>>> en = es.translate(lexicon='ewn')[0]
>>> en.lemmas()
['spider']
"""
ili = self._ili
if not ili:
return []
return synsets(ili=ili, lang=lang, lexicon=lexicon)
class Count(int):
"""A count of sense occurrences in some corpus."""
__module__ = 'wn'
_id: int
def __new__(cls, value, _id: int = NON_ROWID):
obj = int.__new__(cls, value) # type: ignore
obj._id = _id
return obj
def metadata(self) -> Metadata:
"""Return the count's metadata."""
return get_metadata(self._id, 'counts')
class Sense(_Relatable):
"""Class for modeling wordnet senses."""
__slots__ = '_entry_id', '_synset_id'
__module__ = 'wn'
_ENTITY_TYPE = 'senses'
def __init__(
self,
id: str,
entry_id: str,
synset_id: str,
_lexid: int = NON_ROWID,
_id: int = NON_ROWID,
_wordnet: 'Wordnet' = None
):
super().__init__(id=id, _lexid=_lexid, _id=_id, _wordnet=_wordnet)
self._entry_id = entry_id
self._synset_id = synset_id
def __repr__(self) -> str:
return f'Sense({self.id!r})'
def word(self) -> Word:
"""Return the word of the sense.
Example:
>>> wn.senses('spigot')[0].word()
Word('pwn-spigot-n')
"""
return word(id=self._entry_id)
def synset(self) -> Synset:
"""Return the synset of the sense.
Example:
>>> wn.senses('spigot')[0].synset()
Synset('pwn-03325088-n')
"""
return synset(id=self._synset_id)
def examples(self) -> List[str]:
"""Return the list of examples for the sense."""
lexids = self._get_lexicon_ids()
exs = get_examples(self._id, 'senses', lexids)
return [ex for ex, _, _ in exs]
def lexicalized(self) -> bool:
"""Return True if the sense is lexicalized."""
return get_lexicalized(self._id, 'senses')
def adjposition(self) -> Optional[str]:
"""Return the adjective position of the sense.
Values include :python:`"a"` (attributive), :python:`"p"`
(predicative), and :python:`"ip"` (immediate
postnominal). Note that this is only relevant for adjectival
senses. Senses for other parts of speech, or for adjectives
that are not annotated with this feature, will return
``None``.
"""
return get_adjposition(self._id)
def frames(self) -> List[str]:
"""Return the list of subcategorization frames for the sense."""
lexids = self._get_lexicon_ids()
return get_syntactic_behaviours(self._id, lexids)
def counts(self) -> List[Count]:
"""Return the corpus counts stored for this sense."""
lexids = self._get_lexicon_ids()
return [Count(value, _id=_id)
for value, _id in get_sense_counts(self._id, lexids)]
def metadata(self) -> Metadata:
"""Return the sense's metadata."""
return get_metadata(self._id, 'senses')
def get_related(self, *args: str) -> List['Sense']:
"""Return a list of related senses.
One or more relation types should be passed as arguments which
determine the kind of relations returned.
Example:
>>> physics = wn.senses('physics', lexicon='ewn')[0]
>>> for sense in physics.get_related('has_domain_topic'):
... print(sense.word().lemma())
...
coherent
chaotic
incoherent
"""
lexids = self._get_lexicon_ids()
iterable = get_sense_relations(self._id, args, lexids)
return [Sense(sid, eid, ssid, lexid, rowid, self._wordnet)
for _, _, sid, eid, ssid, lexid, rowid in iterable
if lexids is None or lexid in lexids]
def get_related_synsets(self, *args: str) -> List[Synset]:
"""Return a list of related synsets."""
lexids = self._get_lexicon_ids()
iterable = get_sense_synset_relations(self._id, args, lexids)
return [Synset(ssid, pos, ili, lexid, rowid, self._wordnet)
for _, _, ssid, pos, ili, lexid, rowid in iterable
if lexids is None or lexid in lexids]
def translate(self, lexicon: str = None, *, lang: str = None) -> List['Sense']:
"""Return a list of translated senses.
Arguments:
lexicon: if specified, translate to senses in the target lexicon(s)
lang: if specified, translate to senses with the language code
Example:
>>> en = wn.senses('petiole', lang='en')[0]
>>> pt = en.translate(lang='pt')[0]
>>> pt.word().lemma()
'pecíolo'
"""
synset = self.synset()
return [t_sense
for t_synset in synset.translate(lang=lang, lexicon=lexicon)
for t_sense in t_synset.senses()]
class Wordnet:
"""Class for interacting with wordnet data.
A wordnet object acts essentially as a filter by first selecting
matching lexicons and then searching only within those lexicons
for later queries. On instantiation, a *lang* argument is a BCP47
language code that restricts the selected lexicons to those whose
language matches the given code. A *lexicon* argument is a
space-separated list of lexicon specifiers that more directly
select lexicons by their ID and version; this is preferable when
there are multiple lexicons in the same language or multiple
version with the same ID.
Some wordnets were created by translating the words from a larger
wordnet, namely the Princeton WordNet, and then relying on the
larger wordnet for structural relations. An *expand* argument is a
second space-separated list of lexicon specifiers which are used
for traversing relations, but not as the results of queries.
"""
__slots__ = ('_lexicons', '_lexicon_ids', '_expanded', '_expanded_ids',
'_default_mode')
__module__ = 'wn'
def __init__(self, lexicon: str = None, *, lang: str = None, expand: str = None):
# default mode means any lexicon is searched or expanded upon,
# but relation traversals only target the source's lexicon
self._default_mode = (not lexicon and not lang)
lexs = list(find_lexicons(lexicon or '*', lang=lang))
self._lexicons: Tuple[Lexicon, ...] = tuple(map(_to_lexicon, lexs))
self._lexicon_ids: Tuple[int, ...] = tuple(lx._id for lx in self._lexicons)
self._expanded: Tuple[Lexicon, ...] = ()
if expand is None:
if self._default_mode:
expand = '*'
else:
deps = [(id, ver, _id)
for lex in self._lexicons
for id, ver, _, _id in get_lexicon_dependencies(lex._id)]
# warn only if a dep is missing and a lexicon was specified
if not self._default_mode:
missing = ' '.join(
f'{id}:{ver}' for id, ver, _id in deps if _id is None
)
if missing:
warnings.warn(
f'lexicon dependencies not available: {missing}',
wn.WnWarning
)
expand = ' '.join(
f'{id}:{ver}' for id, ver, _id in deps if _id is not None
)
if expand:
self._expanded = tuple(map(_to_lexicon, find_lexicons(lexicon=expand)))
self._expanded_ids: Tuple[int, ...] = tuple(lx._id for lx in self._expanded)
def lexicons(self):
"""Return the list of lexicons covered by this wordnet."""
return self._lexicons
def expanded_lexicons(self):
"""Return the list of expand lexicons for this wordnet."""
return self._expanded
def word(self, id: str) -> Word:
"""Return the first word in this wordnet with identifier *id*."""
iterable = find_entries(id=id, lexicon_rowids=self._lexicon_ids)
try:
return Word(*next(iterable), self)
except StopIteration:
raise wn.Error(f'no such lexical entry: {id}')
def words(self, form: str = None, pos: str = None) -> List[Word]:
"""Return the list of matching words in this wordnet.
Without any arguments, this function returns all words in the
wordnet's selected lexicons. A *form* argument restricts the
words to those matching the given word form, and *pos*
restricts words by their part of speech.
"""
iterable = find_entries(form=form, pos=pos, lexicon_rowids=self._lexicon_ids)
return [Word(*word_data, self) for word_data in iterable]
def synset(self, id: str) -> Synset:
"""Return the first synset in this wordnet with identifier *id*."""
iterable = find_synsets(id=id, lexicon_rowids=self._lexicon_ids)
try:
return Synset(*next(iterable), self)
except StopIteration:
raise wn.Error(f'no such synset: {id}')
def synsets(
self, form: str = None, pos: str = None, ili: str = None
) -> List[Synset]:
"""Return the list of matching synsets in this wordnet.
Without any arguments, this function returns all synsets in
the wordnet's selected lexicons. A *form* argument restricts
synsets to those whose member words match the given word
form. A *pos* argument restricts synsets to those with the
given part of speech. An *ili* argument restricts synsets to
those with the given interlingual index; generally this should
select a unique synset within a single lexicon.
"""
iterable = find_synsets(
form=form, pos=pos, ili=ili, lexicon_rowids=self._lexicon_ids,
)
return [Synset(*synset_data, self) for synset_data in iterable]
def sense(self, id: str) -> Sense:
"""Return the first sense in this wordnet with identifier *id*."""
iterable = find_senses(id=id, lexicon_rowids=self._lexicon_ids)
try:
return Sense(*next(iterable), self)
except StopIteration:
raise wn.Error(f'no such sense: {id}')
def senses(self, form: str = None, pos: str = None) -> List[Sense]:
"""Return the list of matching senses in this wordnet.
Without any arguments, this function returns all senses in the
wordnet's selected lexicons. A *form* argument restricts the
senses to those whose word matches the given word form, and
*pos* restricts senses by their word's part of speech.
"""
iterable = find_senses(form=form, pos=pos, lexicon_rowids=self._lexicon_ids)
return [Sense(*sense_data, self) for sense_data in iterable]
def ili(self, id: str) -> ILI:
"""Return the first ILI in this wordnet with identifer *id*."""
iterable = find_ilis(id=id, lexicon_rowids=self._lexicon_ids)
try:
return ILI(*next(iterable))
except StopIteration:
raise wn.Error(f'no such ILI: {id}')
def ilis(self, status: str = None) -> List[ILI]:
iterable = find_ilis(status=status, lexicon_rowids=self._lexicon_ids)
return [ILI(*ili_data) for ili_data in iterable]
def _to_lexicon(data) -> Lexicon:
rowid, id, label, language, email, license, version, url, citation, logo = data
return Lexicon(
id,
label,
language,
email,
license,
version,
url=url,
citation=citation,
logo=logo,
_id=rowid
)
def projects() -> List[Dict]:
"""Return the list of indexed projects.
This returns the same dictionaries of information as
:meth:`wn.config.get_project_info
<wn._config.WNConfig.get_project_info>`, but for all indexed
projects.
Example:
>>> infos = wn.projects()
>>> len(infos)
36
>>> infos[0]['label']
'Open English WordNet'
"""
index = wn.config.index
return [
wn.config.get_project_info(f'{project_id}:{version}')
for project_id, project_info in index.items()
for version in project_info['versions']
]
def lexicons(*, lexicon: str = None, lang: str = None) -> List[Lexicon]:
"""Return the lexicons matching a language or lexicon specifier.
Example:
>>> wn.lexicons(lang='en')
[<Lexicon ewn:2020 [en]>, <Lexicon pwn:3.0 [en]>]
"""
try:
w = Wordnet(lang=lang, lexicon=lexicon)
except wn.Error:
return []
else:
return w.lexicons()
def word(id: str, *, lexicon: str = None, lang: str = None) -> Word:
"""Return the word with *id* in *lexicon*.
This will create a :class:`Wordnet` object using the *lang* and
*lexicon* arguments. The *id* argument is then passed to the
:meth:`Wordnet.word` method.
>>> wn.word('ewn-cell-n')
Word('ewn-cell-n')
"""
return Wordnet(lang=lang, lexicon=lexicon).word(id=id)
def words(
form: str = None,
pos: str = None,
*,
lexicon: str = None,
lang: str = None,
) -> List[Word]:
"""Return the list of matching words.
This will create a :class:`Wordnet` object using the *lang* and
*lexicon* arguments. The remaining arguments are passed to the
:meth:`Wordnet.words` method.
>>> len(wn.words())
282902
>>> len(wn.words(pos='v'))
34592
>>> wn.words(form="scurry")
[Word('ewn-scurry-n'), Word('ewn-scurry-v')]
"""
return Wordnet(lang=lang, lexicon=lexicon).words(form=form, pos=pos)
def synset(id: str, *, lexicon: str = None, lang: str = None) -> Synset:
"""Return the synset with *id* in *lexicon*.
This will create a :class:`Wordnet` object using the *lang* and
*lexicon* arguments. The *id* argument is then passed to the
:meth:`Wordnet.synset` method.
>>> wn.synset('ewn-03311152-n')
Synset('ewn-03311152-n')
"""
return Wordnet(lang=lang, lexicon=lexicon).synset(id=id)
def synsets(
form: str = None,
pos: str = None,
ili: str = None,
*,
lexicon: str = None,
lang: str = None,
) -> List[Synset]:
"""Return the list of matching synsets.
This will create a :class:`Wordnet` object using the *lang* and
*lexicon* arguments. The remaining arguments are passed to the
:meth:`Wordnet.synsets` method.
>>> len(wn.synsets('couch'))
4
>>> wn.synsets('couch', pos='v')
[Synset('ewn-00983308-v')]
"""
return Wordnet(lang=lang, lexicon=lexicon).synsets(form=form, pos=pos, ili=ili)
def senses(
form: str = None,
pos: str = None,
*,
lexicon: str = None,
lang: str = None,
) -> List[Sense]:
"""Return the list of matching senses.
This will create a :class:`Wordnet` object using the *lang* and
*lexicon* arguments. The remaining arguments are passed to the
:meth:`Wordnet.senses` method.
>>> len(wn.senses('twig'))
3
>>> wn.senses('twig', pos='n')
[Sense('ewn-twig-n-13184889-02')]
"""
return Wordnet(lang=lang, lexicon=lexicon).senses(form=form, pos=pos)
def sense(id: str, *, lexicon: str = None, lang: str = None) -> Sense:
"""Return the sense with *id* in *lexicon*.
This will create a :class:`Wordnet` object using the *lang* and
*lexicon* arguments. The *id* argument is then passed to the
:meth:`Wordnet.sense` method.
>>> wn.sense('ewn-flutter-v-01903884-02')
Sense('ewn-flutter-v-01903884-02')
"""
return Wordnet(lang=lang, lexicon=lexicon).sense(id=id)
def ili(id: str, *, lexicon: str = None, lang: str = None) -> ILI:
"""Return the interlingual index with *id*.
This will create a :class:`Wordnet` object using the *lang* and
*lexicon* arguments. The *id* argument is then passed to the
:meth:`Wordnet.ili` method.
>>> wn.ili(id='i1234')
ILI('i1234')
>>> wn.ili(id='i1234').status
'presupposed'
"""
return Wordnet(lang=lang, lexicon=lexicon).ili(id=id)
def ilis(
status: str = None,
*,
lexicon: str = None,
lang: str = None,
) -> List[ILI]:
"""Return the list of matching interlingual indices.
This will create a :class:`Wordnet` object using the *lang* and
*lexicon* arguments. The remaining arguments are passed to the
:meth:`Wordnet.ilis` method.
>>> len(wn.ilis())
120071
>>> len(wn.ilis(status='proposed'))
2573
>>> wn.ilis(status='proposed')[-1].definition()
'the neutrino associated with the tau lepton.'
>>> len(wn.ilis(lang='de'))
13818
"""
return Wordnet(lang=lang, lexicon=lexicon).ilis(status=status)
| [
"[email protected]"
] | |
2fb27e6c828a8ac88870fdb65f162e369d27d3ca | 71f00ed87cd980bb2f92c08b085c5abe40a317fb | /Data/GoogleCloud/google-cloud-sdk/lib/googlecloudsdk/command_lib/filestore/backups/util.py | 253b7e8f5059390c4d1a981d074475c459222000 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | factoryofthesun/Rao-NLP | 2bd8269a8eed1cb352c14c8fde88e3111ccca088 | 87f9723f5ee51bd21310d58c3425a2a7271ec3c5 | refs/heads/master | 2023-04-18T08:54:08.370155 | 2020-06-09T23:24:07 | 2020-06-09T23:24:07 | 248,070,291 | 0 | 1 | null | 2021-04-30T21:13:04 | 2020-03-17T20:49:03 | Python | UTF-8 | Python | false | false | 2,370 | py | # -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions for Cloud Filestore backup commands."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.core import properties
INSTANCE_NAME_TEMPLATE = 'projects/{}/locations/{}/instances/{}'
BACKUP_NAME_TEMPLATE = 'projects/{}/locations/{}/backups/{}'
PARENT_TEMPLATE = 'projects/{}/locations/{}'
def FormatBackupCreateRequest(ref, args, req):
"""Python hook for yaml commands to supply the backup create request with proper values."""
del ref
req.backupId = args.backup
project = properties.VALUES.core.project.Get(required=True)
location = args.region
req.parent = PARENT_TEMPLATE.format(project, location)
return req
def FormatBackupAccessRequest(ref, args, req):
"""Python hook for yaml commands to supply backup access requests with the proper name."""
del ref
project = properties.VALUES.core.project.Get(required=True)
location = args.region
req.name = BACKUP_NAME_TEMPLATE.format(project, location, args.backup)
return req
def AddInstanceNameToRequest(ref, args, req):
"""Python hook for yaml commands to process the source instance name."""
del ref
project = properties.VALUES.core.project.Get(required=True)
req.backup.sourceInstance = INSTANCE_NAME_TEMPLATE.format(
project, args.instance_zone, args.instance)
return req
def AddBackupNameToRequest(ref, args, req):
"""Python hook for yaml commands to process the source backup name."""
project = properties.VALUES.core.project.Get(required=True)
location = args.source_backup_region or ref.locationsId
req.restoreInstanceRequest.sourceBackup = BACKUP_NAME_TEMPLATE.format(
project, location, args.source_backup)
return req
| [
"[email protected]"
] | |
9f7a749185882e2c5faa0dfef7fe85f2208a8096 | 7ce56dc3a1110b61d0087565f02b4fe576cad58c | /scrapy_test/picture/picture/pipelines.py | 9aac5a02d3f866297d61c654a719f88c3a7d5c54 | [] | no_license | lssxfy123/PythonStudy | 7c251961ce72217e83184853cb0c11dc773e4075 | d5beba373b78c6c0276c413a44819d3084899d01 | refs/heads/master | 2022-11-28T17:25:36.957483 | 2021-11-26T09:55:32 | 2021-11-26T09:55:32 | 55,392,700 | 1 | 1 | null | 2022-11-22T01:39:12 | 2016-04-04T07:28:25 | Jupyter Notebook | UTF-8 | Python | false | false | 793 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import re
import os
import requests
from picture.settings import IMAGES_STORE
class PicturePipeline(object):
def process_item(self, item, spider):
file_path = IMAGES_STORE + re.sub('[\/:*?"<>|]', '', item["title"].strip())
if not os.path.exists(file_path):
os.makedirs(file_path)
image_urls = item['image_urls']
for url in image_urls:
html = requests.get(url)
file_name = file_path + '/' + url.split(r'/')[-1]
with open(file_name, 'wb') as file_obj:
file_obj.write(html.content)
return item
| [
"[email protected]"
] | |
796488331e7adf05752f2a1b73759c353cf42b04 | 3b35218014ed5503bb75158139e404d4029dedae | /profil/migrations/0005_auto_20160510_1225.py | cf3f9b1e8e7a38c05331fa6d33321060063a6450 | [] | no_license | psarrus/Centre_Equestre | 1b962f6cebc930fa8b4297d95943f592fc0725b5 | f9493162cd0b58c996c8c39dcd13de6b904fc796 | refs/heads/master | 2021-01-21T14:44:08.408880 | 2016-07-23T13:40:31 | 2016-07-23T13:40:31 | 56,144,262 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-10 10:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profil', '0004_profil_categorie'),
]
operations = [
migrations.AlterField(
model_name='profil',
name='categorie',
field=models.CharField(choices=[('1', 'El\xe8ve'), ('2', 'Cavalier'), ('3', 'Professeur'), ('4', 'Personnel'), ('5', 'Autre')], max_length=1),
),
]
| [
"[email protected]"
] | |
c63d787b39e6b7018435b0490e71dc883b6de049 | 362fa7a29274726fc1b37232771c0ce7021afe39 | /cogs/jurassic_modules/lab.py | 0643d5128818bc6b851d51681d574e2dc4d76aff | [] | no_license | H3Cki/T-Bot | 08bbcf349eb110b5078ae95eaeb8acc1cf56c24f | 75faaaf4cbe3d6a89d0b8062fd979c45afa53666 | refs/heads/master | 2021-04-02T12:08:53.218367 | 2020-09-26T00:16:16 | 2020-09-26T00:16:16 | 248,273,780 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,378 | py | from .dino_info import StaticDino, ProfileDinoPart, DinoStatEmojis as DSE
import discord
import random
import asyncio
from ..utils.general import splitList
from math import ceil
class Lab:
MAX_CAPACITY = 16
CONTROLS = {
'prev' : u'◀',
'next' : u'▶',
'reload': u'🔃',
'stop' : u'⏹'
}
def __init__(self,profile,cog=None,naked=False):
self.profile = profile
self.cog = cog
self.naked = naked
self.setup()
def setup(self):
self.parts = ProfileDinoPart.get(profile_id = self.profile.id)
self.dinos_with_parts = []
for part in self.parts:
part = part.entity
dino = StaticDino.get(name=part.dino_name)
if dino not in self.dinos_with_parts:
self.dinos_with_parts.append(dino)
self.cost_multiplier = round(1 + ((len(self.dinos_with_parts)-self.MAX_CAPACITY))/8,1) if len(self.dinos_with_parts) > self.MAX_CAPACITY else 1
self.cost_multiplier_perc = (self.cost_multiplier*100)-100
if self.cog:
self.dinos_with_parts = list(sorted(self.dinos_with_parts,key = lambda x: (self.cog.isDinoLive(self.profile.guild,self.profile.member,x.name), x.buildRequirements().requirementsMet(self.profile,noex=True,boolean=True), self.profile.resources > x.buildCost(self.profile.guild_id,self)), reverse=True))
if self.naked == False:
self.dino_split_list = splitList(self.dinos_with_parts,7)
self.page_idx = 0
self.total_pages = len(self.dino_split_list)
def getList(self):
return self.dino_split_list[self.page_idx] if self.total_pages else self.dino_split_list
async def start(self,ctx):
embed = self.getEmbed(self.getList())
msg = await ctx.send(embed=embed)
if len(self.dino_split_list) > 1:
await msg.add_reaction(Lab.CONTROLS['prev'])
await msg.add_reaction(Lab.CONTROLS['next'])
await msg.add_reaction(Lab.CONTROLS['reload'])
await msg.add_reaction(Lab.CONTROLS['stop'])
def check(reaction, user):
return user.id == ctx.message.author.id and reaction.message.id == msg.id
while True:
try:
reaction, user = await self.cog.bot.wait_for('reaction_add', check=check, timeout=35.0)
except:
break
if reaction.emoji == Lab.CONTROLS['stop']:
await msg.delete()
return
if reaction.emoji == Lab.CONTROLS['reload']:
self.setup()
pass
starting_page_idx = self.page_idx
if reaction.emoji == Lab.CONTROLS['next']:
self.page_idx = self.page_idx + 1 if self.page_idx < self.total_pages - 1 else self.page_idx
if reaction.emoji == Lab.CONTROLS['prev']:
self.page_idx = self.page_idx - 1 if self.page_idx > 0 else self.page_idx
if self.page_idx != starting_page_idx or reaction.emoji == Lab.CONTROLS['reload']:
embed = self.getEmbed(self.getList())
await msg.edit(embed=embed)
await msg.remove_reaction(reaction.emoji,ctx.message.author)
def getEmbed(self,dinos):
desc = None
if self.cost_multiplier > 1:
desc = f'❗ Laboratory is overloaded, resource cost is increased by {self.cost_multiplier_perc}%'
embed = discord.Embed(title = f"LABORATORY - {len(self.dinos_with_parts)} Dinosaurs" ,description=desc,color=discord.Color.from_rgb(random.randint(0,255),random.randint(0,255),random.randint(0,255)))
embed.set_author(name=self.profile.member.display_name,icon_url=self.profile.member.avatar_url_as(format='png'))
embed.set_thumbnail(url='https://i.imgur.com/s1xRSYT.png')
texts = []
for dino in dinos:
if dino.isDiscovered(self.profile.guild_id):
stats_txt = f'`🔥` {dino.stats_as_string()}\n'
emoji = DSE.emojis['dino1']
tier = f" T{dino.tier}"
else:
stats_txt = ''
emoji = '❓'
tier = ''
islive = self.cog.isDinoLive(self.profile.guild,self.profile.member,dino.name) if self.cog else None
if islive:
ilt = " ⏰ LIVE!"
else:
ilt = ""
build_cost = dino.buildCost(self.profile.guild_id,self)
requirs = dino.buildRequirements()
buildables = [requirs.requirementsMet(self.profile,noex=True,boolean=True),self.profile.resources > build_cost]
reqs = requirs.compareAsText(self.profile)
reqs_txt = f"`🔸` {reqs}"
bc = build_cost.compareAsText(self.profile.resources)
bc_txt = f"\n`🔺` {bc}"
if all(buildables):
build_txt = '\n<:blank:551400844654936095><:high:551205365526691841> Buildable!'
elif buildables[0]:
build_txt = '\n<:blank:551400844654936095><:mid:551402984974581763> Not enough resources'
elif buildables[1]:
build_txt = '\n<:blank:551400844654936095><:mid:551402984974581763> Missing parts'
else:
build_txt = ''
embed.add_field(name=f"{emoji} {dino.name.upper()}{tier}"+ilt,value=stats_txt+reqs_txt+bc_txt+build_txt,inline=False)
warning = f'\n❗ After exceeding {self.MAX_CAPACITY} slots build cost for all dinos will be increased' if len(self.dinos_with_parts) > self.MAX_CAPACITY - 5 else ''
legend = '\n🔥-Stats, 🔸-Parts required, 🔺-Resources required'
page_txt = f'[Page: {self.page_idx+1}/{self.total_pages}]\n' if len(self.dino_split_list) > 1 else ''
embed.set_footer(text=f'{page_txt}Use command !build to build a dino, or !build all to build everything'+legend+warning)
return embed
| [
"[email protected]"
] | |
b1c818806a7402be80445ca18bff8944f3f90fd6 | 6e61e02ff67171ec157b5694dbfda974955fb864 | /First_Django_Project/settings.py | c260b65fe0b4e6714b64a2d8f7add2c4bd865638 | [] | no_license | emmanueln644/First_Django_Project | 33754ead923b084d87d8928f3ed3646917aee37e | 60f2cec91cd1a2c5680af1beda78efb2c17c421d | refs/heads/main | 2023-04-06T15:17:55.070238 | 2021-04-20T19:51:23 | 2021-04-20T19:51:23 | 359,935,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,128 | py | """
Django settings for First_Django_Project project.
Generated by 'django-admin startproject' using Django 3.0.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0#d=u)7f5-0o*_vjhjf!c4=kq4&cj8bwoo(&0%t*2l%8(#v&lv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'First_Django_Project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'First_Django_Project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
9d4c99c7b27ada6cb300e5d4531cc7b5698a3d28 | 289366c8e140f4a64f76728e7582a88017ff6071 | /leave_groups.py | ee6bacf1699e8f492ea400e270e2057333a39fb1 | [] | no_license | roginski/leavemygroups | e089d0dc851d563ae97c1de4fe879f9cff8699aa | 30c8564aed08bb95c0eeecbf902c6043782d0b68 | refs/heads/master | 2022-05-19T07:11:49.059644 | 2022-03-21T09:38:38 | 2022-03-21T09:38:38 | 38,970,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,449 | py | #!/usr/bin/env python
import requests
access_token = '041cfc0c19e18a9803066a3d1fa0bee4f45d19e87aa3658c88522a06d495433400f229dbe2226d6f1327d'
user_id = '1801158'
app_id = 4989700
def login(email, password):
r = session.get('http://vk.com/login.php', params={'email': email, 'pass': password})
r.raise_for_status()
def auth(user_id):
#url = 'https://oauth.vk.com/authorize?client_id=4989700&scope=groups&redirect_uri=https://oauth.vk.com/blank.html&display=popup&v=5.34&response_type=token'
r = session.get(
'https://oauth.vk.com/authorize',
params={
'client_id': app_id,
'scope': 'groups',
'redirect_uri': 'https://oauth.vk.com/blank.html',
'display': 'popup',
'v': '5.34',
'response_type': 'token'})
print r.text
def get_groups():
url = 'https://api.vk.com/method/groups.get?user_id=%s&access_token=%s&v=5.34' % (user_id, access_token)
r = requests.get(url)
r.raise_for_status()
return r.json()['response']['items']
def leave_group(group_id):
url = 'https://api.vk.com/method/groups.leave?group_id=%s&user_id=%s&access_token=%s&v=5.34' % (group_id, user_id, access_token)
r = requests.get(url)
r.raise_for_status()
def leave_groups():
groups = get_groups()
print len(groups), 'groups found, leaving!'
for gr in groups:
leave_group(gr)
print 'Left group', gr
session = requests.Session()
login('[email protected]', 'drjynfrntujdyj')
auth(user_id)
| [
"[email protected]"
] | |
f4eb77fc446b5c458fcd784804493e19c66b2351 | c579950bf5d5acc9283058f4aeb13fa57686244b | /day1/part1.py | 6eb1f57c98c24f46900f3337fa3de0e574636dfc | [
"MIT"
] | permissive | kayew/aoc-2018 | 621180abb79acd35c9fd6629b5148a935cbe462c | 4db312cc6b3aa8d5d21e4573755a99fd3b36b4ff | refs/heads/master | 2021-10-02T22:10:29.328062 | 2018-12-01T14:47:39 | 2018-12-01T14:47:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | #!/usr/bin/env python3
with open("input.txt") as file:
lines = file.readlines()
total = 0
for line in lines:
total += int(line)
print(total) | [
"[email protected]"
] | |
50d0a51187d3988fc95a82761d84adea3ee986a1 | b4faab9b904d155ce6e781a675f972dcb810c008 | /waifu2x/training/calibrate_output.py | 4e54b6e18cc19528a00a155526e28a07cfc1dcb1 | [
"MIT",
"CC-BY-NC-4.0",
"Apache-2.0"
] | permissive | nagadomi/nunif | 0c595d3e61f3c89082ce7481cfba139b85ac863d | 6d4b92da09801572e984b05f6733d460b60250aa | refs/heads/master | 2023-08-31T21:29:56.460275 | 2023-08-21T18:16:01 | 2023-08-21T18:16:01 | 202,088,108 | 486 | 59 | MIT | 2023-08-04T05:51:17 | 2019-08-13T07:23:32 | Python | UTF-8 | Python | false | false | 2,816 | py | import argparse
from tqdm import tqdm
import torch
import torch.nn as nn
from nunif.models import load_model, save_model, get_model_config
def calibrate_output():
class RGBCalibration(nn.Module):
def __init__(self):
super().__init__()
self.rgb = nn.Parameter(torch.zeros((1, 3, 1, 1), dtype=torch.float32))
def forward(self, x):
return torch.clamp(x.detach() + self.rgb, 0., 1.)
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--input", "-i", type=str, required=True, help="input 4x model")
parser.add_argument("--gpu", type=int, default=0, help="gpu id. -1 for cpu")
args = parser.parse_args()
device = f"cuda:{args.gpu}" if args.gpu >= 0 else "cpu"
model, meta = load_model(args.input)
assert meta["name"] == "waifu2x.swin_unet_4x"
input_size = 64
batch_size = 16
epoch = 100
steps = 2048
amp = False if device == "cpu" else True
amp_device_type = "cuda" if "cuda" in device else "cpu"
amp_dtype = torch.bfloat16 if amp_device_type == "cpu" else torch.float16
offset = get_model_config(model, "i2i_offset")
scale = get_model_config(model, "i2i_scale")
acc = 8
model = model.to(device).eval()
criterion = nn.MSELoss().to(device)
cal = RGBCalibration().to(device)
cal.train()
grad_scaler = torch.cuda.amp.GradScaler(enabled=amp)
optimizer = torch.optim.Adam(cal.parameters(), lr=1e-3)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.95)
for epoch in range(epoch):
losses = []
c = 0
for step in tqdm(range(steps // batch_size), ncols=80):
optimizer.zero_grad()
rgb = (torch.rand((batch_size, 3, 1, 1)) * 255).round() / 255.0
x = rgb.expand((batch_size, 3, input_size, input_size)).clone().to(device)
y = rgb.expand((batch_size, 3,
input_size * scale - offset * 2,
input_size * scale - offset * 2)).clone().to(device)
with torch.autocast(device_type=amp_device_type, dtype=amp_dtype, enabled=amp):
with torch.no_grad():
z = model.unet(x)
z = cal(z)
loss = criterion(z, y)
losses.append(loss.item())
grad_scaler.scale(loss).backward()
c += 1
if c % acc == 0:
grad_scaler.step(optimizer)
grad_scaler.update()
scheduler.step()
print(f"epoch {epoch}: loss={sum(losses) / len(losses)}, lr={scheduler.get_lr()}, RGB={cal.rgb.data.flatten().tolist()}")
print(f"RGBCalibration: {cal.rgb.data.flatten().tolist()}")
if __name__ == "__main__":
calibrate_output()
| [
"[email protected]"
] | |
11dac78ea79735c540a89c55181e2832d4cb734d | d3b39413106d8e3456150606b29aac6908af7b98 | /Python/Code/20/main.py | d63c12fbdd3e39345584a5f51976de0a370399da | [] | no_license | hnmahamud/Unix-Problem-Solving | 2322ae57ae744a661138097908d58b6a037e19a6 | db454ec61fdee200bbeb89d1a6d07e75d992230e | refs/heads/master | 2023-01-10T23:31:24.787352 | 2020-11-21T17:19:17 | 2020-11-21T17:19:17 | 314,863,946 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 189 | py | word = 'Java'
file = open('test.txt', 'r')
Lines = file.readlines()
for line in Lines:
lowLine = line.lower()
lowWord = word.lower()
if lowWord in lowLine:
print(line) | [
"[email protected]"
] | |
2ba09893226bf7e5014fbc70b13a6c1faa226e49 | d6a0cc09c0fd86d95bc0ee8034acf09334a2c377 | /tensorflow/C05_ ANN_DNN/lecture/step02_softmax_DNN.py | b6394ea889a7e4ea1c391f61b124628396176696 | [] | no_license | Kimuksung/bigdata | e2b0e04e817a7113cba8c5d4acdd8cf20b664147 | e7cce223eb55709d9ebcb631e39360b347a02764 | refs/heads/master | 2021-07-13T10:16:54.498961 | 2021-03-08T02:27:28 | 2021-03-08T02:27:28 | 241,530,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,821 | py | """
DNN model
- hidden layer : relu function
- output layer : Softmax activation function
- hidden1 layer classifier node =12
- hidden2 layer classifier node = 6
- data set : iris
"""
import tensorflow.compat.v1 as tf # ver1.x
tf.disable_v2_behavior() # ver2.0 사용안함
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
# x, y data
iris = load_iris()
x_data = iris.data
y_data = iris.target
y_data = y_data.reshape(-1,1)
obj = OneHotEncoder()
# sparse -> numpy
y_data = obj.fit_transform(y_data).toarray()
y_data.shape
x_train , x_test , y_train , y_test = train_test_split( x_data , y_data)
# x, y 변수 정의
X = tf.placeholder(dtype = tf.float32 , shape =[None , 4])
Y = tf.placeholder(dtype = tf.float32 , shape =[None , 3])
# =============================================================================
# Dnn network
# =============================================================================
hidden_node1 = 12
hidden_node2 = 6
# hidden layer1
w1 = tf.Variable(tf.random_normal([ 4 , hidden_node1]))
b1 = tf.Variable(tf.random_normal([ hidden_node1 ]))
# hidden layer2
w2 = tf.Variable(tf.random_normal([ hidden_node1 , hidden_node2]))
b2 = tf.Variable(tf.random_normal([ hidden_node2 ]))
# output layer
w3 = tf.Variable(tf.random_normal([ hidden_node2 , 3]))
b3 = tf.Variable(tf.random_normal([ 3 ]))
# softmax 분류기
# 1) 회귀방정식 : 예측치
hidden_output1 = tf.nn.relu(tf.matmul(X, w1) + b1) # hidden layer
hidden_output2 = tf.nn.relu(tf.matmul(hidden_output1, w2) + b2)
model = tf.matmul(hidden_output2 , w3) + b3
softmax = tf.nn.softmax(model)
# 2) Loss
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
labels = Y , logits = model
))
# 3) optimizer : 오차 최소화(w, b update)
train = tf.train.AdamOptimizer(0.1).minimize(loss) # 오차 최소화
# 4) argmax() : encoding -> decoding
y_pred = tf.argmax(softmax , axis =1)
y_true = tf.argmax(Y , axis =1)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
feed_data= { X : x_data , Y : y_data}
#반복 학습
for step in range(500):
_, loss_val = sess.run([train , loss] , feed_dict = feed_data)
if (step+1)% 50 == 0 :
print(" step : {} , loss : {}".format(step+1 , loss_val))
# model result
y_pred_re = sess.run( y_pred , feed_dict ={ X :x_data })
y_true_re = sess.run(y_true , feed_dict = { Y : y_data })
print("y pred = " , y_pred_re)
print("y true = " , y_true_re)
acc = accuracy_score(y_true_re,y_pred_re)
print("acc = " , acc)
| [
"[email protected]"
] | |
807823111c3c2231cfa2e27d7dcdaa35764acbc7 | 26e5cbe3175b260c47c69caaf986d1bd90d2c3fa | /samplepyhana.py | f2043de500033dd66a61ec0372604f0203584b97 | [] | no_license | shaw1236/sapSamples | 1536e32949072be070e86c18654341ce2687ec90 | b43b3f65c0112d3f656173d31eca372c02e15e78 | refs/heads/master | 2020-09-11T06:26:53.590070 | 2020-06-24T18:41:25 | 2020-06-24T18:41:25 | 221,971,058 | 17 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,578 | py | ## Purpose: wrapper to pyhdb to simplify the query call
## Date : Nov 11, 2019
## Author : Simon Li
'''
Usage :
from MyHdb import MyHdb
hdb = MyHdb()
'''
#https://github.com/SAP/PyHDB
import pyhdb
class MyHdb:
#constructor
def __init__(self, sys = 'SB3'):
self.sys = sys.upper()
self.__login = {}
self.__setConnection(sys)
#destructor
def __del__(self):
self.__connection.close()
# set the connection accordingly
def __setConnection(self, sys):
self.__login['user'] = "shaw1236"
self.__login['password'] = "testCQ3" # Will drop it from login
if self.sys == 'SB3':
self.__login['host'] = r"10.230.82.120" #sapr3sb3
self.__login['instanceNo'] = "01"
elif self.sys == "CD1":
pass
elif self.sys == "CQ1":
pass
elif self.sys == "CD2":
pass
elif self.sys == "CQ2":
pass
elif self.sys == "SB1":
pass
elif self.sys == "SB2":
pass
else:
raise ValueError("System id {0} is not supported!".format(sys))
self.__login['port'] = 30015 + 100 * int(self.__login['instanceNo'])
#print(self.__login)
self.__schema = "SAPSB3"
self.__connection = pyhdb.connect(
host = self.__login['host'],
port = self.__login['port'],
user = self.__login['user'],
password = self.__login['password']
)
self.__cursor = self.__connection.cursor()
@property
def schema(self):
return self.__schema
@schema.setter
def schema(self, schema):
self.__schema = schema
@property
def connection(self):
return self.__connection
@connection.setter
def connection(self, conn):
pass
@property
def cursor(self):
return self.__cursor
def execute(self, isql):
sql = isql.replace('$.', self.__schema)
sql = sql.replace('$', self.__schema)
self.__cursor.execute(sql)
def fetchmany(self, num):
return self.__cursor.fetchmany(num)
def fetchone(self):
return self.__cursor.fetchone()
def fetchall(self):
return self.__cursor.fetchall()
if __name__ == '__main__':
#from MyHdb import MyHdb
try:
hdb = MyHdb()
hdb.execute("SELECT 'Hello Python World' FROM DUMMY")
print(hdb.fetchone())
hdb.execute("SELECT pstyv, kurztext from $TJHAPT")
records = hdb.fetchmany(3) #fetchone(), fetchall()
print(records)
except ValueError as e:
print(str(e)) | [
"[email protected]"
] | |
59de8c57d501bd523824f1575cccfd191af63dfc | ed06ef44c944707276a2fca16d61e7820596f51c | /Python/stone-game-v.py | 3b5ae50a61aea6458307eaf7d11d58e7279c0f96 | [] | no_license | sm2774us/leetcode_interview_prep_2021 | 15842bef80637c6ff43542ed7988ec4b2d03e82c | 33b41bea66c266b733372d9a8b9d2965cd88bf8c | refs/heads/master | 2023-05-29T14:14:49.074939 | 2021-06-12T19:52:07 | 2021-06-12T19:52:07 | 374,725,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,748 | py | # Time: O(n^2)
# Space: O(n^2)
class Solution(object):
def stoneGameV(self, stoneValue):
"""
:type stoneValue: List[int]
:rtype: int
"""
n = len(stoneValue)
prefix = [0]
for v in stoneValue:
prefix.append(prefix[-1] + v)
mid = range(n)
dp = [[0]*n for _ in range(n)]
for i in range(n):
dp[i][i] = stoneValue[i]
max_score = 0
for l in range(2, n+1):
for i in range(n-l+1):
j = i+l-1
while prefix[mid[i]]-prefix[i] < prefix[j+1]-prefix[mid[i]]:
mid[i] += 1 # Time: O(n^2) in total
p = mid[i]
max_score = 0
if prefix[p]-prefix[i] == prefix[j+1]-prefix[p]:
max_score = max(dp[i][p-1], dp[j][p])
else:
if i <= p-2:
max_score = max(max_score, dp[i][p-2])
if p <= j:
max_score = max(max_score, dp[j][p])
dp[i][j] = max(dp[i][j-1], (prefix[j+1]-prefix[i]) + max_score)
dp[j][i] = max(dp[j][i+1], (prefix[j+1]-prefix[i]) + max_score)
return max_score
# Time: O(n^2)
# Space: O(n^2)
class Solution2(object):
def stoneGameV(self, stoneValue):
"""
:type stoneValue: List[int]
:rtype: int
"""
n = len(stoneValue)
prefix = [0]
for v in stoneValue:
prefix.append(prefix[-1] + v)
mid = [[0]*n for _ in range(n)]
for l in range(1, n+1):
for i in range(n-l+1):
j = i+l-1
p = i if l == 1 else mid[i][j-1]
while prefix[p]-prefix[i] < prefix[j+1]-prefix[p]:
p += 1 # Time: O(n^2) in total
mid[i][j] = p
rmq = [[0]*n for _ in range(n)]
for i in range(n):
rmq[i][i] = stoneValue[i]
dp = [[0]*n for _ in range(n)]
for l in range(2, n+1):
for i in range(n-l+1):
j = i+l-1
p = mid[i][j]
max_score = 0
if prefix[p]-prefix[i] == prefix[j+1]-prefix[p]:
max_score = max(rmq[i][p-1], rmq[j][p])
else:
if i <= p-2:
max_score = max(max_score, rmq[i][p-2])
if p <= j:
max_score = max(max_score, rmq[j][p])
dp[i][j] = max_score
rmq[i][j] = max(rmq[i][j-1], (prefix[j+1]-prefix[i]) + max_score)
rmq[j][i] = max(rmq[j][i+1], (prefix[j+1]-prefix[i]) + max_score)
return dp[0][n-1]
| [
"[email protected]"
] | |
6f1a2669f7e36e5c1eaedb917e17c720177d2365 | 70c38c9d44ea63f13d21426b2e6f53c6851b4cd1 | /14--Morphological.py | 2a7d093d8d3fe5cd48086895d1343bdeac61216b | [] | no_license | harshit-777/OpenCV-Learning | d6b626ef909c2e958420e841d5c4d34f0b430cc7 | dd709fde70eb5474172c585ec26a85482e655937 | refs/heads/master | 2022-11-13T09:12:00.722570 | 2020-06-29T05:32:29 | 2020-06-29T05:32:29 | 275,738,797 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 913 | py | import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('H1.png', cv2.IMREAD_GRAYSCALE)
_, mask = cv2.threshold(img, 220, 255, cv2.THRESH_BINARY_INV)
kernal = np.ones((5,5), np.uint8)
dilation = cv2.dilate(mask, kernal, iterations=2) #remove dot from image
erosion = cv2.erode(mask, kernal, iterations=1) #ersion is like soil remo
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernal) #followed by errosion
closing = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernal) #
mg = cv2.morphologyEx(mask, cv2.MORPH_GRADIENT, kernal)
th = cv2.morphologyEx(mask, cv2.MORPH_TOPHAT, kernal)
titles = ['image', 'mask', 'dilation', 'erosion', 'opening', 'closing', 'mg', 'th']
images = [img, mask, dilation, erosion, opening, closing, mg, th]
for i in range(8):
plt.subplot(2, 4, i+1), plt.imshow(images[i], 'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show() | [
"[email protected]"
] | |
71e91ed1f8228d4e4b6642429ea3080bcbf8a5af | 956a665a3e4b2d2112e001b6576ed14c8ab9a3d6 | /account/urls.py | 6a117ecc189ab2e3feb4d5804815080a80cb4d7c | [] | no_license | UBegimai/python11_blog | bab03066ad3da10ce124e66b13dd8647b157184f | 5ef5e8c8da73433ec25c4b5554d35cb5915d685a | refs/heads/master | 2023-05-02T16:47:48.084709 | 2021-06-10T11:29:20 | 2021-06-10T11:29:20 | 375,338,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | from django.contrib.auth.views import LogoutView
from django.urls import path
from django.views.generic import TemplateView
from account import views
from account.views import RegistrationView
urlpatterns = [
path('register/', RegistrationView.as_view(),
name='registration'),
path('success_registration/',
views.SuccessfulRegistrationView.as_view(),
name='successful-registration'),
path('activation/', views.ActivationView.as_view(),
name='activation'),
path('login/', views.SignInView.as_view(),
name='login'),
path('logout/', LogoutView.as_view(),
name='logout'),
path('change_password/',
views.ChangePasswordView.as_view(),
name='change-password'),
path('forgot_password/',
views.ForgotPasswordView.as_view(),
name='forgot-password'),
path('forgot_pass_complete/',
TemplateView.as_view(template_name='account/forgot_pass_complete.html'),
name='forgot-pass-complete')
]
| [
"[email protected]"
] | |
1e61d6408e0725eb6fcec5ad07fcf611b4a33e9e | 0ccf8ee2feb9faaea15cf6b54eb3ccbc8133ddb5 | /Euler/euler18.py | 84aec6f0d181a82ae6fed093631e9764c612e45b | [] | no_license | duncanmichel/Programming-Problem-Solutions | 753ad156bbdc44188986403d7a77bd22133b0885 | b0e6112e730b5d08aae21a552572fb0929672976 | refs/heads/master | 2023-01-04T12:51:15.322581 | 2022-12-31T17:00:38 | 2022-12-31T17:00:38 | 158,260,686 | 1 | 0 | null | 2019-05-02T18:52:18 | 2018-11-19T16:55:44 | Python | UTF-8 | Python | false | false | 1,104 | py | triangle = [[75],
[95, 64],
[17, 47, 82],
[18, 35, 87, 10],
[20, 4, 82, 47, 65],
[19, 1, 23, 75, 3, 34],
[88, 2, 77, 73, 7, 63, 67],
[99, 65, 4, 28, 6, 16, 70, 92],
[41, 41, 26, 56, 83, 40, 80, 70, 33],
[41, 48, 72, 33, 47, 32, 37, 16, 94, 29],
[53, 71, 44, 65, 25, 43, 91, 52, 97, 51, 14],
[70, 11, 33, 28, 77, 73, 17, 78, 39, 68, 17, 57],
[91, 71, 52, 38, 17, 14, 91, 43, 58, 50, 27, 29, 48],
[63, 66, 4, 68, 89, 53, 67, 30, 73, 16, 69, 87, 40, 31],
[4, 62, 98, 27, 23, 9, 70, 98, 73, 93, 38, 53, 60, 4, 23]]
def evalChild(lvl,row):
child = triangle[lvl][row]
if lvl == len(triangle) - 1:
return child
return child + max(evalChild(lvl+1,row),evalChild(lvl+1,row+1))
def brute():
return triangle[0][0] + max(evalChild(1,0),evalChild(1,1))
print("The maximum sum from a path through the triangle is: %d" % brute())
"""
level = 0
row_index = 0
def child1(lvl,row):
return triangle[lvl+1][row]
def child2(lvl,row):
return triangle[lvl+1][row+1]
for i in triangle:
for j in range(0,len(i)):
print(i[j])
"""
| [
"[email protected]"
] | |
6806e9f5f4ff1a7fa0883b8ec7676c4ff81db7c8 | ca9db01a710a0ec64985f358c60280e3c6f57350 | /Sort/src/Sort.py | 250d4ea3e830de4d13c3670924f671dba5521b1b | [] | no_license | ThomasBriggs/python-examples | 40d5d1e5b199bb2182c7443b3061461bd64e50a9 | a0d3bbd577fb51b1b6ea220b4ea6f3dd7b6ebf2c | refs/heads/master | 2022-06-03T11:02:45.301230 | 2022-05-27T12:36:48 | 2022-05-27T12:36:48 | 189,917,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | def genRandom(x, min=0, max=100):
from random import randint
a = []
for i in range(x):
a.append(randint(min, max))
return(a)
def bubbleSort(array):
n = len(array)
for i in range(n-1):
for j in range(n-1-i):
if array[j] > array[j+1]:
temp = array[j]
array[j] = array[j+1]
array[j+1] = temp
return(array)
| [
"[email protected]"
] | |
054ce53f885fac0ae5666cb468e1a9572cd1c333 | 07c61596c1fba2e2a7034fe5af9707794ea2e2c1 | /Hackerrank/Python/Set_Mutations.py3 | f35aae2bde3fdab4fa28161a7ed681cbf359a234 | [] | no_license | H-Shen/Collection_of_my_coding_practice | 2fcb2f8fef9451ad4a3a9c063bbf6a34ea5966b4 | 6415552d38a756c9c89de0c774799654c73073a6 | refs/heads/master | 2023-08-24T21:19:08.886667 | 2023-08-22T03:47:39 | 2023-08-22T03:47:39 | 180,731,825 | 8 | 1 | null | 2021-08-13T18:25:25 | 2019-04-11T06:48:09 | C++ | UTF-8 | Python | false | false | 411 | py3 | a = int(input())
A = set(map(int,input().strip().split()))
N = int(input())
for i in range(N):
I = input().split()
b = int(I[1])
B = set(map(int,input().strip().split()))
if I[0] == 'intersection_update':
A &= B
elif I[0] == 'update':
A |= B
elif I[0] == 'symmetric_difference_update':
A ^= B
elif I[0] == 'difference_update':
A -= B
print(sum(A))
| [
"[email protected]"
] | |
86e8fa4560dbeb9b31b42fe539e3860eca576eea | 85e09a9d9a26a6e9c685715525fd9c50434d8c03 | /tests/test_index.py | 17b32221f2bda9442e60d662bec2a1503b3c9186 | [] | no_license | zhuny/template-web | 0c1310941f7a2c68df7fc4df14ea96e72f3b2342 | 37723fa03a066c594e423da4b2b5d7aefb8ae975 | refs/heads/master | 2021-07-13T23:50:05.077834 | 2020-04-15T11:33:58 | 2020-04-15T11:33:58 | 249,177,873 | 0 | 0 | null | 2021-06-17T00:44:00 | 2020-03-22T12:24:20 | Python | UTF-8 | Python | false | false | 259 | py | import unittest
from main import app
class IndexTest(unittest.TestCase):
def setUp(self):
self.client = app.test_client()
def test_get_restful(self):
resp1 = self.client.get("/index")
resp2 = self.client.get("/index/123")
| [
"[email protected]"
] | |
6682b3369aad8d7f4aa7f5ff10bd9db85b0912cb | 1901ef608c4347977a962b84d952f3e283bc2551 | /07_string.py | 1bd58bc68c4888a5367a21199fe59bb3049ea704 | [] | no_license | abdullahalmamun0/Python_Full_Course | 469bcfeec53679c9f9760e35c7b60d29dc1e8199 | 19e5dd95efb1c538a9fc60ba2c7612591338e53f | refs/heads/main | 2023-06-23T08:40:05.020906 | 2021-07-12T16:43:08 | 2021-07-12T16:43:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,021 | py | # -*- coding: utf-8 -*-
a = 'codinglaugh'
b = "codinglaugh"
print(a,type(a))
print(b,type(b))
c = "youtube channel name is 'codinglaugh'"
print(c)
d = '''codinglaugh
learn
something
everyday'''
print(d,type(d))
e = 'codinglaugh '
f = "learn "
g = "something "
h = "everyday "
# print(e+f+g+h+"marjuk")
print(e*5)
print("c" not in e)
print(len(e))
length = 0
for i in e:
print(i,end="")
length += 1
print()
print(length)
h = "codinglaugh"
if "codingdshflaugh" != h:
print("matched")
else:
print("something wrong")
i = "abc"
j = "ABC"
print(i>j)
# print(e[-12])
# print(e[:])
# e = e[:3]+"m"+e[4:]
# e = e[:3]+""+e[4:]
# del e[3]
print(e)
# =============================================================================
# String format()
# =============================================================================
a = "codinglaughsdh"
b = 4
c = 3.4534758
# d = a + " {1} {0:.3f}".format(c,b)
d = "{0:$>20} {1} {0} marjuk".format(a,"jsdbf")
print(d)
| [
"[email protected]"
] | |
664cb187c0cf7a2c9333969198b48d7b1c9239a1 | 31597ea394a645637fb7c14ae33b65a6dc8ca004 | /atom/python/keywords.py | a8d4320761c7040163c6e8b28802a74ec3848fe6 | [] | no_license | codespeak/grammars | 41703acefb2a6aebca2846a1488ce9d4606f5666 | cf658c5d3d63752b66de11919645cba52b670976 | refs/heads/master | 2020-08-04T08:56:18.050563 | 2015-09-29T00:12:24 | 2015-09-29T00:12:24 | 38,143,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,945 | py | from pynhost.grammars.atom import atomextension, atomutils
from pynhost.grammars.atom.python import pyextension
class PythonKeywordsGrammar(pyextension.PyExtensionGrammar):
activate_key = '{ctrl+alt+8}',
def __init__(self):
super().__init__()
self.settings['filtered words'] = []
self.settings['priority'] = 5
self.mapping = {
'import': atomutils.OTHER['beginningConditionalSpace'] + 'import' + atomutils.OTHER['endConditionalSpace'],
'<hom_quad>': 'for' + atomutils.OTHER['endConditionalSpace'],
'<hom_in>': atomutils.OTHER['beginningConditionalSpace'] + 'in' + atomutils.OTHER['endConditionalSpace'],
'<hom_and>': atomutils.OTHER['beginningConditionalSpace'] + 'and' + atomutils.OTHER['endConditionalSpace'],
'<hom_assert>': 'assert' + atomutils.OTHER['endConditionalSpace'],
'<hom_break>': 'break{enter}',
'<hom_return>': 'return{}'.format(atomutils.OTHER['endConditionalSpace']),
'<hom_continue>': 'continue{enter}',
'<hom_as>': atomutils.OTHER['beginningConditionalSpace'] + 'as' + atomutils.OTHER['endConditionalSpace'],
'<hom_yield>': atomutils.OTHER['beginningConditionalSpace'] + 'yield' + atomutils.OTHER['endConditionalSpace'],
'<hom_try>': 'try:{enter}',
'<hom_not>': atomutils.OTHER['beginningConditionalSpace'] + 'not' + atomutils.OTHER['endConditionalSpace'],
'<hom_or>': atomutils.OTHER['beginningConditionalSpace'] + 'or' + atomutils.OTHER['endConditionalSpace'],
'<hom_is>': atomutils.OTHER['beginningConditionalSpace'] + 'is' + atomutils.OTHER['endConditionalSpace'],
'<hom_none>': 'None',
'<hom_false>': 'False',
'<hom_true>': 'True',
'<hom_if>': atomutils.OTHER['beginningConditionalSpace'] + 'if' + atomutils.OTHER['endConditionalSpace'],
'<hom_with>': atomutils.OTHER['beginningConditionalSpace'] + 'with' + atomutils.OTHER['endConditionalSpace'],
'<hom_else>': atomutils.OTHER['beginningConditionalSpace'] + 'else',
'<hom_from>': atomutils.OTHER['beginningConditionalSpace'] + 'from' + atomutils.OTHER['endConditionalSpace'],
'<hom_raise>': atomutils.OTHER['beginningConditionalSpace'] + 'raise' + atomutils.OTHER['endConditionalSpace'],
'<hom_except>': atomutils.OTHER['beginningConditionalSpace'] + 'except' + atomutils.OTHER['endConditionalSpace'],
'<hom_index> <hom_error>': atomutils.OTHER['beginningConditionalSpace'] + 'IndexError',
'(<hom_runtime> | <hom_run> <hom_time>) <hom_error>': atomutils.OTHER['beginningConditionalSpace'] + 'RuntimeError',
'<hom_pop>': 'pop',
'<hom_cast> <hom_integer>': 'int(){left}',
'<hom_cast> <hom_string>': 'str(){left}',
'<hom_strip>': 'strip',
'<hom_pass>': 'pass',
}
| [
"[email protected]"
] | |
5853b12773f0b1d80e6fc09cba643ccead08d7df | 0e36c610f199032c0d1093cb156449efa566cd1f | /api/urls.py | aeaf5640b5e91984248a8f5310334bce292282f5 | [
"MIT"
] | permissive | kostin-001/sound_check | 3351c06918b7e5db11ffdce9a30808bdb774ab07 | 8842d298ed19e3182f50efce3b48ee35ae7e4b1a | refs/heads/master | 2022-12-15T05:28:04.289586 | 2022-12-01T08:17:05 | 2022-12-01T08:17:05 | 267,848,597 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | from django.urls import re_path, path, include
from drf_yasg import openapi
from drf_yasg.views import get_schema_view
from rest_framework.permissions import AllowAny
from rest_framework.routers import DefaultRouter
from api.v1.endpoints import SongViewSet
router = DefaultRouter()
router.register(r'songs', SongViewSet, basename='songs')
app_name = 'v1'
schema_view_api_v1 = get_schema_view(
openapi.Info(
title="Sound Check API backend",
default_version='v1',
description="Sound Check API",
terms_of_service="https://www.google.com/policies/terms/",
contact=openapi.Contact(email="[email protected]"),
license=openapi.License(name="BSD License"),
),
public=True,
permission_classes=(AllowAny,),
)
urlpatterns = [
re_path(r'^swagger(?P<format>\.json|\.yaml)$', schema_view_api_v1.without_ui(cache_timeout=0), name='schema-json'),
re_path(r'^swagger/$', schema_view_api_v1.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
re_path(r'^redoc/$', schema_view_api_v1.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
path('', include(router.urls)),
]
| [
"[email protected]"
] | |
7b74f92fb64929dec57098985a0ed523ae9c23dd | bd8dccd6f389a4b13d63233856e10924bbe4db2c | /old_junk/auto_complete.py | 57149712b58ea14c6470ff8eb57fb0481840ce47 | [] | no_license | LucHighwalker/tweetGenerator | ce138ea3a67978a74397985aee4624bcca52e7f4 | 6b7dd3f1e6b0bf82cf84cb37e24cfb961e3c3596 | refs/heads/master | 2020-04-02T11:52:56.837906 | 2018-12-14T01:35:14 | 2018-12-14T01:35:14 | 154,410,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,098 | py | import os
import time
import getch
words = [line.rstrip('\n')
for line in open('./words.txt')]
possibilities = list()
sentence = ''
current_word = ''
cursor_timing = 1
cursor_switched = time.time()
cursor = '|'
def display():
global possibilities
global sentence
global cursor_timing
global cursor_switched
global current_word
global cursor
os.system('cls' if os.name == 'nt' else 'clear')
if time.time() - cursor_switched >= cursor_timing:
if cursor == '':
cursor = '|'
else:
cursor = ''
word_count = 0
disp_possible = ''
for word in possibilities:
if word_count < 5:
disp_possible = '{}{} '.format(disp_possible, word)
word_count = word_count + 1
else:
break
display = '{}\n\n{}{}{}\n\n\n'.format(
disp_possible, sentence, current_word, cursor)
print(display)
def get_possibilities(letter):
global possibilities
global current_word
if len(possibilities) == 0:
word_list = words
else:
word_list = possibilities
new_possibilites = list()
for word in word_list:
if len(word) >= len(current_word):
if word[len(current_word) - 1] == letter:
new_possibilites.append(word)
possibilities = new_possibilites
def user_input():
global possibilities
global sentence
global current_word
try:
user_input = getch.getch()
if user_input == ' ':
if len(possibilities) > 0:
if current_word != possibilities[0]:
current_word = possibilities[0]
possibilities = list()
sentence = '{}{} '.format(sentence, current_word)
current_word = ''
else:
current_word = '{}{}'.format(current_word, user_input)
get_possibilities(user_input)
return True
except EOFError:
return False
if __name__ == '__main__':
running = True
display()
while running:
user_input()
display()
| [
"[email protected]"
] | |
7b498f1fb0d8b447934a6ed881fe221e77176670 | b6e6060b2d5df98436b78b21e61e82a5e2a25aed | /neurolib/restore/restore.py | 66eaee7c34aeeb71dad938f1bcfa57f859088df5 | [] | no_license | gumpfly/neurolib | f7ba01d6539baf760eb10585e3cfa5cb1a077b8f | 12ee60e78f384a9fa9b780a614fae7b72d9b5b19 | refs/heads/master | 2020-12-02T14:55:10.829786 | 2019-03-22T20:11:03 | 2019-03-28T23:42:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,170 | py | # Copyright 2018 Daniel Hernandez Diaz, Columbia University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import os
import pickle
import numpy as np
import tensorflow as tf
# pylint: disable=bad-indentation, no-member, protected-access
class Restore():
"""
"""
def __init__(self,
rslt_dir,
metafile=None,
model='DKF'):
"""
Initialize the Restorer
"""
print('Initiating Restore...')
self.scope = model
tf.reset_default_graph()
self.sess = tf.Session()
rslt_dir = rslt_dir if rslt_dir[-1] == '/' else rslt_dir + '/'
self.rslt_dir = rslt_dir
if metafile is None:
metafile = self.get_latest_metafile_in_rslt_dir(rslt_dir)
print("metafile", metafile)
saver = tf.train.import_meta_graph(rslt_dir+metafile)
else:
saver = tf.train.import_meta_graph(rslt_dir+metafile)
print('Restoring Model `{}` from metafile: {}'.format(model, metafile))
saver.restore(self.sess, tf.train.latest_checkpoint(rslt_dir))
self.get_outputs_dict()
@staticmethod
def get_latest_metafile_in_rslt_dir(rslt_dir):
"""
Return the latest metafile in the provided directory
"""
prefixes = [file[:-5] for file in os.listdir(rslt_dir) if 'meta'==file.split('.')[-1]]
return max([f for f in prefixes], key=lambda f : int(f.split('-')[-1])) + '.meta'
def get_outputs_dict(self):
"""
"""
with open(self.rslt_dir + 'output_names', 'rb') as f:
self.output_names = pickle.load(f)
print('The available output tensors for this graph are:\n')
for name in self.output_names:
print('\t', name)
with open(self.rslt_dir + 'feed_keys', 'rb') as f:
self.feed_keys = pickle.load(f)
print("\nTo evaluate a tensor, you must feed a dataset dictionary"
" to `self.eval(...)` with the following keys")
for key in self.feed_keys:
print('\t', key, self.feed_keys[key])
def prepare_datasets(self, dataset, chunk='valid'):
"""
Split the dataset dictionary into train, validation and test datasets.
"""
dset = {}
for key in dataset:
key_split = key.split('_')
if key_split[0] == chunk:
inode = "_".join(key_split[1:])
dset[self.scope + '/' + inode + ':0'] = dataset[key]
return dset
def add_dummy_data(self, dataset):
"""
Finish off the feed_dict for a sess.run(...) from a dataset.
In particular, add inputs for all the dummy variables defined by the Builder
associated to this model
"""
data_key_prefix = self.scope + '/' + 'observation'
# get batch size from data
obskey = next(key for key in dataset if key.startswith(data_key_prefix))
batch_size = dataset[obskey].shape[0]
print("batch_size", batch_size)
# define int32 numpy array for the dummy batch size tensors
dummy_names = self.feed_keys['dummies']
print("dummy_names", dummy_names)
for key in dummy_names:
dataset[key] = np.array([batch_size], dtype=np.int32)
return dataset
def check_input_correctness(self, dset):
"""
"""
pass
def eval(self, name, _input, dataset_type='valid'):
"""
"""
self.check_input_correctness(_input)
prep_dataset = self.prepare_datasets(_input, chunk=dataset_type)
print("prep_dataset.keys()", prep_dataset.keys())
fd = self.add_dummy_data(prep_dataset)
with self.sess.as_default(): #pylint: disable=not-context-manager
rslt = self.sess.run(name, feed_dict=fd)
return rslt
| [
"[email protected]"
] | |
9134c41f8c30bff516c9509e5b840081e7c3e4d3 | 26930c239cc8f850393e3924a467f0b8928a35fa | /Demo_one/venv/bin/easy_install | 0440de96bd745f1449d9da6d5d71f80f21ce1033 | [] | no_license | PETERMAOSX/Pycharm_Code | 2c64e3c55e671e6b1b535936612d864cc1241451 | e1f65356ed173835f4a7833249ce593e8d5a9330 | refs/heads/main | 2023-01-05T22:53:00.177845 | 2020-11-03T14:33:20 | 2020-11-03T14:33:20 | 309,711,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | #!/Users/neo/PycharmProjects/Demo_one/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] | ||
9aa79891fd75f52a44e59e561528ca99a393fa4f | 5f16a1c392d979d4a9fcd03a77e33989000a07bf | /article/serializers.py | a4d3e796258204c1783548a8c56e94ccf5b22555 | [] | no_license | nikitabansal711/djangofirstapp | 826fabc436d6a04238cacff9fbf86c6be16af879 | b31f2fc5969c2db7c3b70b7cda40a718ba5f516d | refs/heads/master | 2021-05-21T15:50:02.198770 | 2020-04-05T10:18:31 | 2020-04-05T10:18:31 | 252,703,503 | 0 | 0 | null | 2020-04-04T09:49:06 | 2020-04-03T10:41:58 | Python | UTF-8 | Python | false | false | 928 | py | from rest_framework import serializers
from article.models import Article, Author
class ArticleSerializer(serializers.Serializer):
title = serializers.CharField(max_length=120)
description = serializers.CharField()
body = serializers.CharField()
author_id = serializers.IntegerField()
def create(self, validated_data):
return Article.objects.create(**validated_data)
def update(self, instance, validated_data):
instance.title = validated_data.get('title', instance.title)
instance.description = validated_data.get('description', instance.description)
instance.body = validated_data.get('body', instance.body)
instance.author_id = validated_data.get('author_id', instance.author_id)
instance.save()
return instance
class AuthorSerializer(serializers.ModelSerializer):
class Meta:
model = Author
fields = ('name', 'email') | [
"[email protected]"
] | |
a1ea1a1eb4d06992254c9a9080d64ada7fd5a7e3 | c3ddb7105733c30a3b6e45cd4c2943022347ba3e | /setup.py | d87bbcf608d662ec29296da3c981473b4aaff1a1 | [
"MIT"
] | permissive | theagilepadawan/python-asyncio-taps | 45d7fd6d74c1caf0f17f34e7e86f6fc51e885b32 | 85805dc42ece1009be9eb5de9f753c4801c81d7c | refs/heads/master | 2021-02-07T13:07:58.054370 | 2019-12-23T01:10:01 | 2019-12-23T01:10:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,799 | py | #!/usr/bin/env python3
from setuptools import setup, find_packages
from distutils.core import setup, Extension
import os
import os.path
include_dirs = ['/usr/local/include']
library_dirs = ['/usr/local/lib']
install_path = os.getenv('INSTALL_PATH', None)
if install_path:
include_dirs.append('%s/include' % install_path)
library_dirs.append('%s/lib' % install_path)
yangcheck_ext = Extension('yang_glue',
define_macros = [('MAJOR_VERSION', '1'),
('MINOR_VERSION', '0')],
include_dirs = include_dirs,
libraries = ['yang'],
library_dirs = library_dirs,
sources = ['pytaps/yang_glue.cxx'])
multicast_glue_ext = Extension('multicast_glue',
define_macros = [('MAJOR_VERSION', '1'),
('MINOR_VERSION', '0')],
include_dirs = include_dirs,
libraries = ['mcrx'],
library_dirs = library_dirs,
sources = ['pytaps/multicast_glue.cxx'])
setup(
name="pytaps",
version="0.1",
packages=find_packages(),
install_requires=['termcolor'],
package_data={
# If any package contains *.txt or *.rst files, include them:
'': ['*.txt', '*.rst'],
},
# metadata to display on PyPI
author="Max Franke",
author_email="[email protected]",
description="TAPS (Transport Services) API Reference implementation for IETF",
keywords="taps ietf implementation",
url="https://github.com/fg-inet/python-asyncio-taps",
project_urls={
"Working Group": "https://datatracker.ietf.org/wg/taps/about/",
"Documentation": "https://pytaps.readthedocs.io/en/latest/index.html",
"Source Code": "https://github.com/fg-inet/python-asyncio-taps",
},
classifiers=[
'License :: OSI Approved :: Python Software Foundation License'
],
data_files=[('pytaps/modules', [
'pytaps/modules/ietf-taps-api.yang',
'pytaps/modules/[email protected]',
'pytaps/modules/[email protected]',
'pytaps/modules/[email protected]',
'pytaps/modules/[email protected]',])],
ext_modules=[yangcheck_ext, multicast_glue_ext],
long_description='''
This is an implementation of a transport system as described by the TAPS (Transport Services) Working Group in the IETF in https://tools.ietf.org/html/draft-ietf-taps-interface-04. The full documentation can be found on https://pytaps.readthedocs.io/en/latest/index.html.
A transport system is a novel way to offer transport layer services to the application layer.
It provides an interface on top of multiple different transport protocols, such as TCP, SCTP, UDP, or QUIC. Instead of having to choose a transport protocol itself, the application only provides abstract requirements (*Transport Properties*), e.g., *Reliable Data Transfer*. The transport system maps then maps these properties to specific transport protocols, possibly trying out multiple different protocols in parallel. Furthermore, it can select between multiple local interfaces and remote IP addresses.
TAPS is currently being standardized in the [IETF TAPS Working Group](https://datatracker.ietf.org/wg/taps/about/):
- [Architecture](https://datatracker.ietf.org/doc/draft-ietf-taps-arch/)
- [Interface](https://datatracker.ietf.org/doc/draft-ietf-taps-interface/)
- [Implementation considerations](https://datatracker.ietf.org/doc/draft-ietf-taps-impl/)
People interested in participating in TAPS can [join the mailing list](https://www.ietf.org/mailman/listinfo/taps).
'''
# could also include long_description, download_url, etc.
)
| [
"[email protected]"
] | |
c0cff9cdc25398783e31ca66b9f2060d5df2fb35 | 3e592cd4c76ffddac02edca65f86d5a03a99b415 | /Python Exercises/LPTHW - E1.py | e2d4b14df7ec46d32e28a03a7b5a46aa95310cfb | [] | no_license | GMoney1337/Python | d32f1457a2e3c24f3ffdbe1deecf6d9f0a06c426 | fd7626f862ef919172368ddba16e97d54e48acb6 | refs/heads/master | 2021-01-24T04:19:59.761730 | 2019-12-16T18:42:02 | 2019-12-16T18:42:02 | 122,853,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | print("Hello World!")
print("Hello Again!")
print("This is fun.")
print('Yay! Printing.')
print("I'd rather you 'not'.")
print('I "said" do not touch this.')
| [
"[email protected]"
] | |
00697b0edf2522d1882b35c90d669bc71dbb5982 | fc357aba40672ce57fcbf96e2ad837caaef389d4 | /dashboard/dashboard/update_bug_with_results.py | 84d7ea069e616bc5010b8383e1469bf348d9be1f | [
"BSD-3-Clause"
] | permissive | dinosk/catapult | e550a7028ff3836fa7ec974d1a85eae2ccb14513 | 6de275176224197282cfd6a5617f3775abad734b | refs/heads/master | 2021-06-05T16:37:44.163473 | 2016-02-11T19:16:02 | 2016-02-11T19:16:02 | 56,242,905 | 0 | 1 | BSD-3-Clause | 2020-07-24T05:02:58 | 2016-04-14T14:12:00 | HTML | UTF-8 | Python | false | false | 33,139 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""URL endpoint for a cron job to update bugs after bisects."""
import datetime
import json
import logging
import re
import traceback
import urllib
from google.appengine.api import app_identity
from google.appengine.api import mail
from google.appengine.api import urlfetch
from google.appengine.api import urlfetch_errors
from google.appengine.ext import ndb
from dashboard import bisect_fyi
from dashboard import buildbucket_service
from dashboard import datastore_hooks
from dashboard import email_template
from dashboard import issue_tracker_service
from dashboard import layered_cache
from dashboard import quick_logger
from dashboard import request_handler
from dashboard import rietveld_service
from dashboard import start_try_job
from dashboard import utils
from dashboard.models import anomaly
from dashboard.models import bug_data
from dashboard.models import try_job
# Try job status codes from rietveld (see TryJobResult in codereview/models.py)
SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY, TRYPENDING = range(7)
# Not a status code from rietveld, added for completeness of the possible
# statuses a job can be in.
STARTED = -1
OK = (SUCCESS, WARNINGS, SKIPPED)
FAIL = (FAILURE, EXCEPTION)
_COMMIT_HASH_CACHE_KEY = 'commit_hash_%s'
_CONFIDENCE_THRESHOLD = 99.5
# Timeout in minutes set by buildbot for trybots.
_BISECT_BOT_TIMEOUT = 12 * 60
# Amount of time to pass before deleting a try job.
_STALE_TRYJOB_DELTA = datetime.timedelta(days=7)
# Amount of time pass before deleting try jobs that use Buildbucket.
_STALE_TRYJOB_DELTA_BUILDBUCKET = datetime.timedelta(days=21)
_BUG_COMMENT_TEMPLATE = """Bisect job status: %(status)s
Bisect job ran on: %(bisect_bot)s
%(results)s
Buildbot stdio: %(buildbot_log_url)s
Job details: %(issue_url)s
"""
_AUTO_ASSIGN_MSG = """
==== Auto-CCing suspected CL author %(author)s ====
Hi %(author)s, the bisect results pointed to your CL below as possibly
causing a regression. Please have a look at this info and see whether
your CL be related.
"""
class UnexpectedJsonError(Exception):
pass
class BugUpdateFailure(Exception):
pass
class UpdateBugWithResultsHandler(request_handler.RequestHandler):
"""URL endpoint for a cron job to update bugs after bisects."""
def get(self):
"""The get handler method is called from a cron job.
It expects no parameters and has no output. It checks all current bisect try
jobs and send comments to an issue on the issue tracker if a bisect job has
completed.
"""
credentials = rietveld_service.Credentials(
rietveld_service.GetDefaultRietveldConfig(),
rietveld_service.PROJECTHOSTING_SCOPE)
issue_tracker = issue_tracker_service.IssueTrackerService(
additional_credentials=credentials)
# Set privilege so we can also fetch internal try_job entities.
datastore_hooks.SetPrivilegedRequest()
jobs_to_check = try_job.TryJob.query(
try_job.TryJob.status == 'started').fetch()
all_successful = True
for job in jobs_to_check:
try:
if job.use_buildbucket:
logging.info('Checking job %s with Buildbucket job ID %s.',
job.key.id(), getattr(job, 'buildbucket_job_id', None))
else:
logging.info('Checking job %s with Rietveld issue ID %s.',
job.key.id(), getattr(job, 'rietveld_issue_id', None))
_CheckJob(job, issue_tracker)
except Exception as e: # pylint: disable=broad-except
logging.error('Caught Exception %s: %s\n%s',
type(e).__name__, e, traceback.format_exc())
all_successful = False
if all_successful:
utils.TickMonitoringCustomMetric('UpdateBugWithResults')
def _CheckJob(job, issue_tracker):
"""Checks whether a try job is finished and updates a bug if applicable.
This method returns nothing, but it may log errors.
Args:
job: A TryJob entity, which represents one bisect try job.
issue_tracker: An issue_tracker_service.IssueTrackerService instance.
"""
# Give up on stale try job.
if job.use_buildbucket:
stale_delta = _STALE_TRYJOB_DELTA_BUILDBUCKET
else:
stale_delta = _STALE_TRYJOB_DELTA
if (job.last_ran_timestamp and
job.last_ran_timestamp < datetime.datetime.now() - stale_delta):
comment = 'Stale bisect job, will stop waiting for results.'
comment += 'Rietveld issue: %s' % job.rietveld_issue_id
start_try_job.LogBisectResult(job.bug_id, comment)
job.SetFailed()
return
if job.job_type == 'perf-try':
_CheckPerfTryJob(job)
elif job.job_type == 'bisect-fyi':
_CheckFYIBisectJob(job, issue_tracker)
else:
# Delete bisect jobs that aren't associated with any bug id.
if job.bug_id is None or job.bug_id < 0:
job.key.delete()
return
_CheckBisectJob(job, issue_tracker)
def _CheckPerfTryJob(job):
perf_results = _GetPerfTryResults(job)
if not perf_results:
return
_SendPerfTryJobEmail(job, perf_results)
job.SetCompleted()
def _SendPerfTryJobEmail(job, perf_results):
"""Sends an email to the user who started the perf try job."""
to = [job.email] if job.email else []
if not to:
logging.error('No "email" in job data. %s.', job.rietveld_issue_id)
return
perf_email = email_template.GetPerfTryJobEmail(perf_results)
if not perf_email:
logging.error('Failed to create "perf_email" from result data. %s.'
' Results data: %s', job.rietveld_issue_id, perf_results)
return
mail.send_mail(sender='[email protected]',
to=','.join(to),
subject=perf_email['subject'],
body=perf_email['body'],
html=perf_email['html'])
def _ParseCloudLinksFromOutput(output):
"""Extracts cloud storage URLs from text."""
html_results_pattern = re.compile(
r'@@@STEP_LINK@HTML Results@(?P<link>http://storage.googleapis.com/'
'chromium-telemetry/html-results/results-[a-z0-9-_]+)@@@',
re.MULTILINE)
profiler_pattern = re.compile(
r'@@@STEP_LINK@(?P<title>[^@]+)@(?P<link>https://console.developers.'
'google.com/m/cloudstorage/b/[a-z-]+/o/profiler-[a-z0-9-_.]+)@@@',
re.MULTILINE)
links = {
'html-results': html_results_pattern.findall(output),
'profiler': profiler_pattern.findall(output),
}
return links
def _LoadConfigFromString(contents):
try:
# The config should be in the following format:
# config = {'foo': 'foo'}
# So we really just need to strip off the "config" part.
json_contents = str(contents).split('{')[1].split('}')[0]
json_contents = json_contents.replace("'", '\"')
json_contents = '{%s}' % json_contents
return json.loads(json_contents)
except (IndexError, ValueError, AttributeError):
logging.error('Could not parse config contents: %s', contents)
return None
def _GetPerfTryResults(job):
"""Gets perf results for a perf try job.
Args:
job: TryJob entity.
Returns:
A dictionary containing status, results, buildbot_log_url, and
issue_url for this bisect job, None if perf try job is pending or
there's an error fetching run data.
"""
results = {}
# Fetch bisect bot results from Rietveld server.
response = _FetchRietveldIssueJSON(job)
issue_url = _RietveldIssueURL(job)
try_job_info = _ValidateRietveldResponse(response)
results['buildbot_log_url'] = str(try_job_info['url'])
results['issue_url'] = str(issue_url)
# Check whether the bisect job is finished or not and fetch the output.
result = int(try_job_info['result'])
if result not in OK + FAIL:
return None
results_url = ('%s/steps/Running%%20Bisection/logs/stdio/text' %
try_job_info['url'])
response = _FetchURL(results_url, skip_status_code=True)
results['bisect_bot'] = try_job_info['builder']
results['config'] = _LoadConfigFromString(job.config)
if not results['config']:
results['status'] = 'Failure'
return results
# We don't see content for "Result" step. Bot probably did not get there.
if not response or response.status_code != 200:
results['status'] = 'Failure'
return results
links = _ParseCloudLinksFromOutput(response.content)
results['html_results'] = (links['html-results'][0]
if links['html-results'] else '')
results['profiler_results'] = links['profiler']
results['status'] = 'Completed'
return results
def _CheckBisectJob(job, issue_tracker):
bisect_results = _GetBisectResults(job)
if not bisect_results:
logging.info('No bisect results, job may be pending.')
return
logging.info('Bisect job status: %s.', bisect_results['status'])
if bisect_results['status'] == 'Completed':
_PostSuccessfulResult(job, bisect_results, issue_tracker)
job.SetCompleted()
elif bisect_results['status'] == 'Failure with partial results':
_PostFailedResult(
job, bisect_results, issue_tracker, add_bug_comment=True)
job.SetFailed()
elif bisect_results['status'] == 'Failure':
_PostFailedResult(job, bisect_results, issue_tracker)
job.SetFailed()
def _GetBisectResults(job):
"""Gets bisect results for a bisect job.
Args:
job: TryJob entity.
Returns:
A dictionary containing status, results, buildbot_log_url, and
issue_url for this bisect job. The issue_url may be a link to a Rietveld
issue or to Buildbucket job info.
"""
results = {}
# Fetch bisect bot results from Rietveld server.
if job.use_buildbucket:
try_job_info = _ValidateAndConvertBuildbucketResponse(
buildbucket_service.GetJobStatus(job.buildbucket_job_id), job)
hostname = app_identity.get_default_version_hostname()
job_id = job.buildbucket_job_id
issue_url = 'https://%s/buildbucket_job_status/%s' % (hostname, job_id)
else:
response = _FetchRietveldIssueJSON(job)
issue_url = _RietveldIssueURL(job)
try_job_info = _ValidateRietveldResponse(response)
results['buildbot_log_url'] = str(try_job_info['url'])
results['issue_url'] = str(issue_url)
# Check whether the bisect job is finished or not and fetch the output.
result = int(try_job_info['result'])
if result not in OK + FAIL:
return None
results_url = '%s/steps/Results/logs/stdio/text' % try_job_info['url']
response = _FetchURL(results_url, skip_status_code=True)
results['bisect_bot'] = try_job_info['builder']
# We don't see content for "Result" step. Bot probably did not get there.
if not response or response.status_code != 200:
results['status'] = 'Failure'
results['results'] = ''
build_data = _FetchBuildData(try_job_info['url'])
if build_data:
_CheckBisectBotForInfraFailure(job.bug_id, build_data,
try_job_info['url'])
results['results'] = _GetBotFailureInfo(build_data)
partial_result = _GetPartialBisectResult(build_data, try_job_info['url'])
if partial_result:
results['status'] = 'Failure with partial results'
results['results'] += partial_result
return results
# Clean result.
# If the bisect_results string contains any non-ASCII characters,
# converting to string should prevent an error from being raised.
bisect_result = _BeautifyContent(str(response.content))
# Bisect is considered success if result is provided.
# "BISECTION ABORTED" is added when a job is early aborted because the
# associated issue was closed.
# TODO(robertocn): Make sure we are outputting this string
if ('BISECT JOB RESULTS' in bisect_result or
'BISECTION ABORTED' in bisect_result):
results['status'] = 'Completed'
else:
results['status'] = 'Failure'
results['results'] = bisect_result
return results
def _FetchBuildData(build_url):
"""Fetches build data from buildbot json api.
For json api examples see:
http://build.chromium.org/p/tryserver.chromium.perf/json/help
Args:
build_url: URL to a Buildbot bisect tryjob.
Returns:
A dictionary of build data for a bisect tryjob. None if there's an
error fetching build data.
"""
index = build_url.find('/builders/')
if index == -1:
logging.error('Build url does not contain expected "/builders/" to '
'fetch json data. URL: %s.', build_url)
return None
# Fetch and verify json data.
json_build_url = build_url[:index] + '/json' + build_url[index:]
response = _FetchURL(json_build_url)
if not response:
logging.error('Could not fetch json data from %s.', json_build_url)
return None
try:
build_data = json.loads(response.content)
if (not build_data or
not build_data.get('steps') or
not build_data.get('times') or
not build_data.get('text')):
raise ValueError('Expected properties not found in build data: %s.' %
build_data)
except ValueError, e:
logging.error('Response from builder could not be parsed as JSON. '
'URL: %s. Error: %s.', json_build_url, e)
return None
return build_data
def _GetBotFailureInfo(build_data):
"""Returns helpful message about failed bisect runs."""
message = ''
# Add success rate message.
build_steps = build_data['steps']
num_success_build = 0
total_build = 0
for step in build_steps:
# 'Working on' is the step name for bisect run for a build.
if 'Working on' in step['name']:
if step['results'][0] in (SUCCESS, WARNINGS):
num_success_build += 1
total_build += 1
message += 'Completed %s/%s builds.\n' % (num_success_build, total_build)
# Add run time message.
run_time = build_data['times'][1] - build_data['times'][0]
run_time = int(run_time / 60) # Minutes.
message += 'Run time: %s/%s minutes.\n' % (run_time, _BISECT_BOT_TIMEOUT)
if run_time >= _BISECT_BOT_TIMEOUT:
message += 'Bisect timed out! Try again with a smaller revision range.\n'
# Add failed steps message.
# 'text' field has the following properties:
# text":["failed","slave_steps","failed","Working on [b92af3931458f2]"]
status_list = build_data['text']
if status_list[0] == 'failed':
message += 'Failed steps: %s\n\n' % ', '.join(status_list[1::2])
return message
def _GetPartialBisectResult(build_data, build_url):
"""Gets partial bisect result if there's any.
For bisect result output format see:
https://chromium.googlesource.com/chromium/src/+/master/tools/
auto_bisect/bisect_perf_regression.py
Args:
build_data: A dictionary of build data for a bisect tryjob.
build_url: URL to a Buildbot bisect tryjob.
Returns:
String result of bisect job.
"""
build_steps = build_data['steps']
# Search for the last successful bisect step.
pattern = re.compile(r'===== PARTIAL RESULTS =====(.*)\n\n', re.DOTALL)
for step in reversed(build_steps):
# 'Working on' is the step name for bisect run for a build.
if ('Working on' in step['name'] and
step['results'][0] in (SUCCESS, WARNINGS)):
stdio_url = ('%s/steps/%s/logs/stdio/text' %
(build_url, urllib.quote(step['name'])))
response = _FetchURL(stdio_url)
if response:
match = pattern.search(response.content)
if match:
return _BeautifyContent(match.group())
return None
def _PostFailedResult(
job, bisect_results, issue_tracker, add_bug_comment=False):
"""Posts failed bisect results on logger and optional issue tracker."""
comment = _BUG_COMMENT_TEMPLATE % bisect_results
if add_bug_comment:
# Set restrict view label if the bisect results are internal only.
labels = ['Restrict-View-Google'] if job.internal_only else None
added_comment = issue_tracker.AddBugComment(
job.bug_id, comment, labels=labels)
if not added_comment:
raise BugUpdateFailure('Failed to update bug %s with comment %s'
% (job.bug_id, comment))
start_try_job.LogBisectResult(job.bug_id, comment)
logging.info('Updated bug %s with results from %s',
job.bug_id, job.rietveld_issue_id)
def _PostSuccessfulResult(job, bisect_results, issue_tracker):
"""Posts successful bisect results on logger and issue tracker."""
# From the results, get the list of people to CC (if applicable), the bug
# to merge into (if applicable) and the commit hash cache key, which
# will be used below.
authors_to_cc = []
merge_issue = None
bug = ndb.Key('Bug', job.bug_id).get()
commit_cache_key = _GetCommitHashCacheKey(bisect_results['results'])
if bug and _BisectResultIsPositive(bisect_results['results']):
merge_issue = layered_cache.GetExternal(commit_cache_key)
if not merge_issue:
authors_to_cc = _GetAuthorsToCC(bisect_results['results'])
comment = _BUG_COMMENT_TEMPLATE % bisect_results
# Add a friendly message to author of culprit CL.
owner = None
if authors_to_cc:
comment = '%s%s' % (_AUTO_ASSIGN_MSG % {'author': authors_to_cc[0]},
comment)
owner = authors_to_cc[0]
# Set restrict view label if the bisect results are internal only.
labels = ['Restrict-View-Google'] if job.internal_only else None
added_comment = issue_tracker.AddBugComment(
job.bug_id, comment, cc_list=authors_to_cc, merge_issue=merge_issue,
labels=labels, owner=owner)
if not added_comment:
raise BugUpdateFailure('Failed to update bug %s with comment %s'
% (job.bug_id, comment))
start_try_job.LogBisectResult(job.bug_id, comment)
logging.info('Updated bug %s with results from %s',
job.bug_id, job.rietveld_issue_id)
if merge_issue:
_MapAnomaliesToMergeIntoBug(merge_issue, job.bug_id)
# Mark the duplicate bug's Bug entity status as closed so that
# it doesn't get auto triaged.
bug.status = bug_data.BUG_STATUS_CLOSED
bug.put()
# Cache the commit info and bug ID to datastore when there is no duplicate
# issue that this issue is getting merged into. This has to be done only
# after the issue is updated successfully with bisect information.
if commit_cache_key and not merge_issue:
layered_cache.SetExternal(commit_cache_key, str(job.bug_id),
days_to_keep=30)
logging.info('Cached bug id %s and commit info %s in the datastore.',
job.bug_id, commit_cache_key)
def _ValidateAndConvertBuildbucketResponse(job_info, job=None):
"""Checks the response from the buildbucket service and converts it.
The response is converted to a similar format to that used by Rietveld for
backwards compatibility.
Args:
job_info: A dictionary containing the response from the buildbucket service.
job: Bisect TryJob entity object.
Returns:
Try job info dict in the same format as _ValidateRietveldResponse; will
have the keys "url", "results", and "bisect_bot".
Raises:
UnexpectedJsonError: The format was not as expected.
"""
job_info = job_info['build']
json_response = json.dumps(job_info)
if not job_info:
raise UnexpectedJsonError('No response from Buildbucket.')
if job_info.get('result') is None:
raise UnexpectedJsonError('No "result" in try job results. '
'Buildbucket response: %s' % json_response)
# This is a case where the buildbucket job was triggered but never got
# scheduled on buildbot probably due to long pending job queue.
if (job_info.get('status') == 'COMPLETED' and
job_info.get('result') == 'CANCELED' and
job_info.get('cancellation_reason') == 'TIMEOUT'):
job.SetFailed()
raise UnexpectedJsonError('Try job timed out before it got scheduled. '
'Buildbucket response: %s' % json_response)
# This is a case where the buildbucket job failed due to invalid config.
if (job_info.get('status') == 'COMPLETED' and
job_info.get('result') == 'FAILURE' and
job_info.get('failure_reason') == 'INVALID_BUILD_DEFINITION'):
job.SetFailed()
job.key.delete()
raise UnexpectedJsonError('Invalid job configuration. '
'Buildbucket response: %s' % json_response)
if job_info.get('url') is None:
raise UnexpectedJsonError('No "url" in try job results. This could mean '
'that the job has not started. '
'Buildbucket response: %s' % json_response)
try:
result_details = json.loads(job_info['result_details_json'])
bisect_config = result_details['properties']['bisect_config']
job_info['builder'] = bisect_config['recipe_tester_name']
except (KeyError, ValueError, TypeError):
# If the tester name isn't found here, this is unexpected but non-fatal.
job_info['builder'] = 'Unknown'
logging.error('Failed to extract tester name from JSON: %s', json_response)
job_info['result'] = _BuildbucketStatusToStatusConstant(
job_info['status'], job_info['result'])
return job_info
def _ValidateRietveldResponse(response):
"""Checks the response from Rietveld to see if the JSON format is right.
Args:
response: A Response object, should have a string content attribute.
Returns:
Try job info dict, guaranteed to have the keys "url" and "result".
Raises:
UnexpectedJsonError: The format was not as expected.
"""
if not response:
raise UnexpectedJsonError('No response from Rietveld.')
try:
issue_data = json.loads(response.content)
except ValueError:
raise UnexpectedJsonError('Response from Rietveld could not be parsed '
'as JSON: %s' % response.content)
# Check whether we can get the results from the issue data response.
if not issue_data.get('try_job_results'):
raise UnexpectedJsonError('Empty "try_job_results" in Rietveld response. '
'Response: %s.' % response.content)
try_job_info = issue_data['try_job_results'][0]
if not try_job_info:
raise UnexpectedJsonError('Empty item in try job results. '
'Rietveld response: %s' % response.content)
if try_job_info.get('result') is None:
raise UnexpectedJsonError('No "result" in try job results. '
'Rietveld response: %s' % response.content)
if try_job_info.get('url') is None:
raise UnexpectedJsonError('No "url" in try job results. This could mean '
'that the job has not started. '
'Rietveld response: %s' % response.content)
return try_job_info
def _MapAnomaliesToMergeIntoBug(dest_bug_id, source_bug_id):
"""Maps anomalies from source bug to destination bug.
Args:
dest_bug_id: Merge into bug (base bug) number.
source_bug_id: The bug to be merged.
"""
query = anomaly.Anomaly.query(
anomaly.Anomaly.bug_id == int(source_bug_id))
anomalies = query.fetch()
for anomaly_entity in anomalies:
anomaly_entity.bug_id = int(dest_bug_id)
ndb.put_multi(anomalies)
def _CheckBisectBotForInfraFailure(bug_id, build_data, build_url):
"""Logs bisect failures related to infrastructure.
Args:
bug_id: Bug number.
build_data: A dictionary of build data for a bisect tryjob.
build_url: URL to a Buildbot bisect tryjob.
TODO(chrisphan): Remove this once we get an idea of the rate of infra related
failures.
"""
build_steps = build_data['steps']
# If there's no bisect scripts step then it is considered infra issue.
slave_step_index = _GetBisectScriptStepIndex(build_steps)
if not slave_step_index:
_LogBisectInfraFailure(bug_id, 'Bot failure.', build_url)
return
# Timeout failure is our problem.
run_time = build_data['times'][1] - build_data['times'][0]
run_time = int(run_time / 60) # Minutes.
if run_time >= _BISECT_BOT_TIMEOUT:
return
# Any build failure is an infra issue.
# These flags are output by bisect_perf_regression.py.
build_failure_flags = [
'Failed to build revision',
'Failed to produce build',
'Failed to perform pre-sync cleanup',
'Failed to sync',
'Failed to run [gclient runhooks]',
]
slave_step = build_steps[slave_step_index]
stdio_url = ('%s/steps/%s/logs/stdio/text' %
(build_url, urllib.quote(slave_step['name'])))
response = _FetchURL(stdio_url)
if response:
for flag in build_failure_flags:
if flag in response.content:
_LogBisectInfraFailure(bug_id, 'Build failure.', build_url)
return
def _GetBisectScriptStepIndex(build_steps):
"""Gets the index of step that run bisect script in build step data."""
index = 0
for step in build_steps:
if step['name'] in ['slave_steps', 'Running Bisection']:
return index
index += 1
return None
def _LogBisectInfraFailure(bug_id, failure_message, stdio_url):
"""Adds infrastructure related bisect failures to log."""
comment = failure_message + '\n'
comment += ('<a href="https://chromeperf.appspot.com/group_report?'
'bug_id=%s">%s</a>\n' % (bug_id, bug_id))
comment += 'Buildbot stdio: <a href="%s">%s</a>\n' % (stdio_url, stdio_url)
formatter = quick_logger.Formatter()
logger = quick_logger.QuickLogger('bisect_failures', 'infra', formatter)
logger.Log(comment)
logger.Save()
def _GetCommitHashCacheKey(results_output):
"""Gets a commit hash cache key for the given bisect results output.
Args:
results_output: The bisect results output.
Returns:
A string to use as a layered_cache key, or None if we don't want
to merge any bugs based on this bisect result.
"""
if not _BisectResultIsPositive(results_output):
return None
commits_list = re.findall(r'Commit : (.*)', results_output)
if len(commits_list) != 1:
return None
return _COMMIT_HASH_CACHE_KEY % commits_list[0].strip()
def _BisectResultIsPositive(results_output):
"""Returns True if the bisect found a culprit with high confidence."""
return 'Status: Positive' in results_output
def _GetAuthorsToCC(results_output):
"""Makes a list of email addresses that we want to CC on the bug.
TODO(qyearsley): Make sure that the bisect result bot doesn't cc
non-googlers on Restrict-View-Google bugs. This might be done by making
a request for labels for the bug (or by making a request for alerts in
the datastore for the bug id and checking the internal-only property).
Args:
results_output: The bisect results output.
Returns:
A list of email addresses, possibly empty.
"""
author_lines = re.findall(r'Author : (.*)', results_output)
unique_emails = set()
for line in author_lines:
parts = line.split(',')
unique_emails.update(p.strip() for p in parts if '@' in p)
emails = sorted(unique_emails)
# Avoid CCing issue to multiple authors when bisect finds multiple
# different authors for culprits CLs.
if len(emails) > 1:
emails = []
if len(emails) == 1:
# In addition to the culprit CL author, we also want to add reviewers
# of the culprit CL to the cc list.
emails.extend(_GetReviewersFromBisectLog(results_output))
return emails
def _GetReviewersFromBisectLog(results_output):
"""Parse bisect log and gets reviewers email addresses from Rietveld issue.
Note: This method doesn't get called when bisect reports multiple CLs by
different authors, but will get called when there are multiple CLs by the
same owner.
Args:
results_output: Bisect results output.
Returns:
List of email addresses from the committed CL.
"""
reviewer_list = []
revisions_list = re.findall(r'Link : (.*)', results_output)
revisions_links = {rev.strip() for rev in revisions_list}
# Sometime revision page content consist of multiple "Review URL" strings
# due to some reverted CLs, such CLs are prefixed with ">"(>) symbols.
# Should only parse CL link corresponding the revision found by the bisect.
link_pattern = (r'(?<!>\s)Review URL: <a href=[\'"]'
r'https://codereview.chromium.org/(\d+)[\'"].*>')
for link in revisions_links:
# Fetch the commit links in order to get codereview link
response = _FetchURL(link)
if not response:
continue
rietveld_issue_ids = re.findall(link_pattern, response.content)
for issue_id in rietveld_issue_ids:
# Fetch codereview link, and get reviewer email addresses from the
# response JSON.
issue_response = _FetchURL(
'https://codereview.chromium.org/api/%s' % issue_id)
if not issue_response:
continue
issue_data = json.loads(issue_response.content)
reviewer_list.extend([str(item) for item in issue_data['reviewers']])
return reviewer_list
def _BeautifyContent(response_data):
"""Strip lines begins with @@@ and strip leading and trailing whitespace."""
pattern = re.compile(r'@@@.*@@@.*\n')
response_str = re.sub(pattern, '', response_data)
new_response = [line.strip() for line in response_str.split('\n')]
response_str = '\n'.join(new_response)
delimiter = '---bisect results start here---'
if delimiter in response_str:
response_str = response_str.split(delimiter)[1]
return response_str.rstrip()
def _FetchURL(request_url, skip_status_code=False):
"""Wrapper around URL fetch service to make request.
Args:
request_url: URL of request.
skip_status_code: Skips return code check when True, default is False.
Returns:
Response object return by URL fetch, otherwise None when there's an error.
"""
logging.info('URL being fetched: ' + request_url)
try:
response = urlfetch.fetch(request_url)
except urlfetch_errors.DeadlineExceededError:
logging.error('Deadline exceeded error checking %s', request_url)
return None
except urlfetch_errors.DownloadError as err:
# DownloadError is raised to indicate a non-specific failure when there
# was not a 4xx or 5xx status code.
logging.error(err)
return None
if skip_status_code:
return response
elif response.status_code != 200:
logging.error(
'ERROR %s checking %s', response.status_code, request_url)
return None
return response
def _FetchRietveldIssueJSON(job):
server = rietveld_service.RietveldService(internal_only=job.internal_only)
path = 'api/%d/%d' % (job.rietveld_issue_id, job.rietveld_patchset_id)
return server.MakeRequest(path, method='GET')
def _RietveldIssueURL(job):
config = rietveld_service.GetDefaultRietveldConfig()
host = config.internal_server_url if job.internal_only else config.server_url
return '%s/%d' % (host, job.rietveld_issue_id)
def _BuildbucketStatusToStatusConstant(status, result):
"""Converts the string status from buildbucket to a numeric constant."""
# TODO(robertocn): We might need to make a difference between
# - Scheduled and Started
# - Failure and Cancelled.
if status == 'COMPLETED':
if result == 'SUCCESS':
return SUCCESS
return FAILURE
return STARTED
def _CheckFYIBisectJob(job, issue_tracker):
bisect_results = _GetBisectResults(job)
if not bisect_results:
logging.info('Bisect FYI: [%s] No bisect results, job might be pending.',
job.job_name)
return
logging.info('Bisect FYI: [%s] Bisect job status: %s.',
job.job_name, bisect_results['status'])
try:
if bisect_results['status'] == 'Completed':
_PostSuccessfulResult(job, bisect_results, issue_tracker)
# Below in VerifyBisectFYIResults we verify whether the actual
# results matches with the expectations; if they don't match then
# bisect_results['status'] gets set to 'Failure'.
bisect_fyi.VerifyBisectFYIResults(job, bisect_results)
# Verify whether the issue is updated with bisect results, if not
# then mark the results status='Failure'.
bisect_fyi.VerifyBugUpdate(job, issue_tracker, bisect_results)
elif 'Failure' in bisect_results['status']:
_PostFailedResult(
job, bisect_results, issue_tracker, add_bug_comment=True)
bisect_results['errors'] = 'Bisect FYI job failed:\n%s' % bisect_results
except BugUpdateFailure as e:
bisect_results['status'] = 'Failure'
bisect_results['error'] = 'Bug update Failed: %s' % e
finally:
_SendFYIBisectEmail(job, bisect_results)
job.key.delete()
def _SendFYIBisectEmail(job, results):
"""Sends an email to auto-bisect-team about FYI bisect results."""
# Don't send email when test case pass.
if results.get('status') == 'Completed':
logging.info('Test Passed: %s.\n Results: %s', job.job_name, results)
return
email_data = email_template.GetBisectFYITryJobEmail(job, results)
if not email_data:
logging.error('Failed to create "email_data" from results for %s.\n'
' Results: %s', job.job_name, results)
return
mail.send_mail(sender='[email protected]',
to='[email protected]',
subject=email_data['subject'],
body=email_data['body'],
html=email_data['html'])
| [
"[email protected]"
] | |
13c0080c9a926d91ea9068b73bd41eb76d621568 | 591feda5a13fc2e80cbe5b95a81b04b12b9f2ab0 | /ediel/filename.py | ed7a2438ae4e2d8c101d59c3abf8d7a725143408 | [
"MIT"
] | permissive | EnergieID/ediel | 19d2d09a130db831ecab146d630af4bc25b20bfd | ba900d0121af7e6f125515d0de190a48738bb9d9 | refs/heads/master | 2023-06-25T05:24:43.387939 | 2023-06-14T12:15:32 | 2023-06-14T12:15:32 | 84,204,819 | 2 | 0 | null | 2017-10-30T15:37:58 | 2017-03-07T13:53:38 | Python | UTF-8 | Python | false | false | 1,136 | py | """
Module to do MIG filename parsing and regex matching
"""
import glob
import re
from typing import Optional, Iterator
NPS_PATTERN = '(?P<path>(?:.*\/)?(?P<filename>(?P<sender>[0-9]{13})\.(?P<receiver>[0-9]{13})\.(?P<sequence>[0-9]*)\.(?P<export>EXPORT(?P<export_no>[0-9]{2})[^\.]*)\.(?P<mig>MIG[^\.]*)\.csv))'
def match_filename(filename: str) -> Optional[dict]:
"""
Match the filename to the NPS Pattern
Parameters
----------
filename : str
Returns
-------
dict
A dict with the full file path, filename, and components
If no match is found, None is returned
"""
r = re.match(string=filename, pattern=NPS_PATTERN, flags=re.I)
if r:
return r.groupdict()
else:
return None
def find_files(pathname: str) -> Iterator[dict]:
"""
Finds all files that match the NPS Pattern in a given directory
Parameters
----------
pathname : str
directory to search in
Yields
-------
dict
"""
for filename in glob.iglob(f'{pathname}/*'):
r = match_filename(filename=filename)
if r:
yield r | [
"[email protected]"
] | |
695a439a5eff644df7f057083ab7d8912a4bff8a | a6e82e7394fd537f89d79a27067b4380a2d89230 | /sistemaRM/forms.py | 48277a7d514817111a788349ae634ca6a8d34abf | [] | no_license | jesustr52/proyecto_ReynaMadre | 504a38b890356e6ded99c2738aeb4993a24422ae | 567dc13198fad5ac873c3430b2d14faae532b6be | refs/heads/main | 2023-08-16T13:13:47.065170 | 2021-10-02T05:02:00 | 2021-10-02T05:02:00 | 412,693,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | from django import forms
from .models import *
class EmpleadoForm(forms.ModelForm):
class Meta:
model = Empleado
fields = ['nombreE', 'fechaNacimiento', 'email', 'genero', 'telefono', 'celular', 'empresa', 'departamento', 'fechaIngreso']
| [
"[email protected]"
] | |
3e6a2db3fe62dd58d8dc55dfde63937829daed4c | 9162e68bc67facb6108973ab1f3c79cc0b8b3680 | /forms/run_form.py | 6f9f5dd7db5abff5f202eca4336183707a9fd07c | [] | no_license | ghamarian/tf3 | 0570e0d1911083333614acad9fc0a52822a69770 | 4873906669ee9bde30c32da0abb09b2a2821c165 | refs/heads/master | 2020-03-16T16:08:29.714764 | 2018-08-01T08:44:24 | 2018-08-01T08:44:24 | 132,774,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | from flask_wtf import FlaskForm
from wtforms import SubmitField, BooleanField
from wtforms.widgets import HTMLString, html_params, Input
class RunForm(FlaskForm):
submit = SubmitField("Run")
| [
"[email protected]"
] | |
1d9116e8334293b95c02095e5236e4c85762a45e | 2649bdbeb09a9efcd37162cacb58d8bc4727d8e7 | /userLog/apps.py | 97a26772c654d38ef910fd63e30ca699922deb1c | [] | no_license | vishwajeet1/testapp | 04fbf1091ab6b5875acd1d44ac86f5a29ee296f8 | 71d803bc11be29633cc6662c87365fdfe3fbd4c8 | refs/heads/master | 2021-01-14T19:28:15.129707 | 2020-02-27T09:55:18 | 2020-02-27T09:55:18 | 242,730,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89 | py | from django.apps import AppConfig
class UserlogConfig(AppConfig):
name = 'userLog'
| [
"[email protected]"
] | |
4d386ab887ac7ca42972e38bdea98c5820ab2b7f | b342ee1cb11dccc608aea374c2311b94dc54e79e | /askcode/migrations/versions/1926ca1f1d65_fakeuser.py | 1c3d39325880173beafe011ad759bc287aa0f00f | [] | no_license | mcnigno/askcode | d7c614d23437e66c5beacb2a7f38aa6764ecc872 | f74a886817cf4973ada7e44be94141c0f154465c | refs/heads/master | 2020-04-14T12:02:02.013367 | 2019-01-28T07:14:08 | 2019-01-28T07:14:08 | 163,828,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | """fakeuser
Revision ID: 1926ca1f1d65
Revises: d9480694d971
Create Date: 2019-01-01 11:09:19.171518
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1926ca1f1d65'
down_revision = 'd9480694d971'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('ab_user', 'group')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('ab_user', sa.Column('group', sa.VARCHAR(length=256), nullable=True))
# ### end Alembic commands ###
| [
"[email protected]"
] | |
840752b1ffa1b4e8ebc61b443df2cab733c939cf | 787bac92000d751d30a8c8fe8dad99f6b1110f5b | /setup.py | 73eeb1703bd70197004a12c47437e4eb0e4c3d40 | [
"MIT"
] | permissive | inglesp/kith | 844ee598d033d353c05d5a1ac374899f90423fcc | 9f783020adc167287a581f6140e42a8b1854f2ca | refs/heads/master | 2021-01-09T20:41:41.651244 | 2016-07-18T22:47:55 | 2016-07-18T22:47:55 | 63,642,365 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 789 | py | from setuptools import find_packages, setup
import os
import re
def read(*parts):
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, *parts)) as f:
return f.read()
VERSION = re.search(
"^__version__ = '(.*)'$",
read('src', 'kith', '__init__.py'),
re.MULTILINE
).group(1)
if __name__ == '__main__':
setup(
name='kith',
version=VERSION,
description='A little relational algebra engine',
long_description=read('README.rst'),
packages=find_packages(where='src'),
package_dir={'': 'src'},
url='http://github.com/inglesp/kith',
author='Peter Inglesby',
author_email='[email protected]',
license='License :: OSI Approved :: MIT License',
)
| [
"[email protected]"
] | |
2296afa25f0b11755c3fde80f5eb65bfb16c1674 | 054bc8696bdd429e2b3ba706feb72c0fb604047f | /python/stats/FisherExactTest/FisherExactTestScipy.py | f608232c82c5f8b4cc40b6722b4003e21f40407b | [] | no_license | wavefancy/WallaceBroad | 076ea9257cec8a3e1c8f53151ccfc7c5c0d7200f | fbd00e6f60e54140ed5b4e470a8bdd5edeffae21 | refs/heads/master | 2022-02-22T04:56:49.943595 | 2022-02-05T12:15:23 | 2022-02-05T12:15:23 | 116,978,485 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,623 | py | #!/usr/bin/env python3
"""
FisherExactTest for 2by2 table.
@Author: [email protected]
Usage:
FisherExactTestScipy.py -c cols -t cols [-a alternative] [--pd int]
FisherExactTestScipy.py -h | --help | -v | --version | -f | --format
Notes:
1. Read results from stdin, and output results to stdout,
*** add two columns for pvalue and odds ratio
*** If there are 0 in some cells, add each cell by 0.5 for estimate odds ratio.
2. See example by -f.
Options:
-c cols Column indexes for treat1, eg: 1,2. Index started from 1.
-t cols Column indexes for treat2, eg: 3,4. Index started from 1.
-a int [1|2] Alternative for the test, default 0:'two-sided',
1: 'less', test depletion of the first element in treat1.
2: 'greater', test enrichment of the first element in treat1.
--pd int Print int decimal points.
-h --help Show this screen.
-v --version Show version.
-f --format Show input/output file format example.
"""
import sys
from docopt import docopt
from signal import signal, SIGPIPE, SIG_DFL
from scipy import stats
signal(SIGPIPE,SIG_DFL) #prevent IOError: [Errno 32] Broken pipe. If pipe closed by 'head'.
def ShowFormat():
'''Input File format example:'''
print('''
#example input
-----------------------------
x1 7 0 186 95
x2 31 10 3183 1731
x3 x x x x
# cat test.txt | python3 FisherExactTest.py -c 2,3 -t 4,5 --ci 0.9
-----------------------------
x1 7 0 186 95 9.9849e-02 7.6810e+00 9.0450e-01 inf
x2 31 10 3183 1731 1.8838e-01 1.6857e+00 8.9080e-01 3.1919e+00
x3 x x x x NA NA NA NA
''');
if __name__ == '__main__':
args = docopt(__doc__, version='1.0')
#print(args)
if(args['--format']):
ShowFormat()
sys.exit(-1)
#api:
#http://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.fisher_exact.html
# 'two-sided', 'less', 'greater'
alternative = 'two-sided'
if args['-a']:
if args['-a'] == '1':
alternative = 'less'
elif args['-a'] == '2':
alternative = 'greater'
pdecimal = int(args['--pd']) if args['--pd'] else 4
t1Index = [int(x) -1 for x in args['-c'].split(',')]
t2Index = [int(x) -1 for x in args['-t'].split(',')]
INF = float('inf')
fstring = '%.'+str(pdecimal)+'f'
estring = '%.'+str(pdecimal)+'e'
# print(fstring)
for line in sys.stdin:
line = line.strip()
if line:
ss = line.split()
try:
c1 = [int(ss[x]) for x in t1Index]
c2 = [int(ss[x]) for x in t2Index]
oddsratio, pvalue = stats.fisher_exact([c1, c2])
# if out[1] == 0 or out[1] == INF:
# oddsratio = (c1[0]+0.5) / (c1[1] + 0.5) / ((c2[0] + 0.5) / (c2[1] + 0.5))
# out[1] = oddsratio
sys.stdout.write('%s\t'%(line))
sys.stdout.write('%s\t'%(estring%(pvalue)))
sys.stdout.write('%s'%(estring%(oddsratio)))
# sys.stdout.write('\t'.join([fstring%(x) for x in out[1:]]))
sys.stdout.write('\n')
except ValueError:
# sys.stderr.write('WARNING: parse int error for line(skipped): %s\n'%(line))
sys.stderr.write('%s\tNA\tNA\n'%(line))
sys.stdout.flush()
sys.stdout.close()
sys.stderr.flush()
sys.stderr.close()
| [
"[email protected]"
] | |
df2eff0642c75f5be09e49609c2ae4ff9a7b0156 | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/third_party/kubernetes/client/models/v1_api_service_status.py | 8859f1942cf458e5bd1ae12c8f6107c50730d798 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 2,933 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen
https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1APIServiceStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name and the value is attribute
type.
attribute_map (dict): The key is attribute name and the value is json key
in definition.
"""
swagger_types = {'conditions': 'list[V1APIServiceCondition]'}
attribute_map = {'conditions': 'conditions'}
def __init__(self, conditions=None):
"""
V1APIServiceStatus - a model defined in Swagger
"""
self._conditions = None
self.discriminator = None
if conditions is not None:
self.conditions = conditions
@property
def conditions(self):
"""
Gets the conditions of this V1APIServiceStatus.
Current service state of apiService.
:return: The conditions of this V1APIServiceStatus.
:rtype: list[V1APIServiceCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""
Sets the conditions of this V1APIServiceStatus.
Current service state of apiService.
:param conditions: The conditions of this V1APIServiceStatus.
:type: list[V1APIServiceCondition]
"""
self._conditions = conditions
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, 'to_dict') else x, value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], 'to_dict') else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1APIServiceStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
aeb5bfff15260d043fd102e5bef6fe4304e669f8 | d3dc72ab96ddf3c201237884f7722cc6bf6b44d9 | /dezero/core.py | 3f08161235443506e3b420f84f5d99f295c725b9 | [] | no_license | lacrosse91/my_tf | f767ce76de6b2dd4657d003ae947d0dc39ef5354 | 5f384ba31264c1b783e2a2c494c80c0b8f7995f4 | refs/heads/main | 2023-01-02T08:14:12.487976 | 2020-10-26T02:09:28 | 2020-10-26T02:09:28 | 304,539,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,692 | py | import weakref
import numpy as np
import contextlib
import dezero
class Config:
enable_backprop = True
@contextlib.contextmanager
def using_config(name, value):
old_value = getattr(Config, name)
setattr(Config, name, value)
try:
yield
finally:
setattr(Config, name, old_value)
def no_grad():
return using_config('enable_backprop', False)
class Variable:
__array_priority__ = 200
def __init__(self, data, name=None):
if data is not None:
if not isinstance(data, np.ndarray):
raise TypeError('{} is not supported'.format(type(data)))
self.data = data
self.name = name
self.grad = None
self.creator = None
self.generation = 0
@property
def shape(self):
return self.data.shape
@property
def ndim(self):
return self.data.ndim
@property
def size(self):
return self.data.size
@property
def dtype(self):
return self.data.dtype
@property
def T(self):
return dezero.functions.transpose(self)
def __len__(self):
return len(self.data)
def __repr__(self):
if self.data is None:
return 'variable(None)'
p = str(self.data).replace('\n', '\n' + ' ' * 9)
return 'variable(' + p + ')'
def set_creator(self, func):
self.creator = func
self.generation = func.generation + 1
def cleargrad(self):
self.grad = None
def backward(self, retain_grad=False,
create_graph=False):
if self.grad is None:
self.grad = Variable(np.ones_like(self.data))
funcs = []
seen_set = set()
def add_func(f):
if f not in seen_set:
funcs.append(f)
seen_set.add(f)
funcs.sort(key=lambda x: x.generation)
add_func(self.creator)
while funcs:
f = funcs.pop()
gys = [output().grad for output in f.outputs] # output is weakref
with using_config('enable_backprop', create_graph):
gxs = f.backward(*gys)
if not isinstance(gxs, tuple):
gxs = (gxs,)
for x, gx in zip(f.inputs, gxs):
if x.grad is None:
x.grad = gx
else:
x.grad = x.grad + gx
if x.creator is not None:
add_func(x.creator)
if not retain_grad:
for y in f.outputs:
y().grad = None # y is weakref
def reshape(self, *shape):
if len(shape) == 1 and isinstance(shape[0], (tuple, list)):
shape = shape[0]
return dezero.functions.reshape(self, shape)
def transpose(self):
return dezero.functions.transpose(self)
def as_variable(obj):
if isinstance(obj, Variable):
return obj
return Variable(obj)
def as_array(x):
if np.isscalar(x):
return np.array(x)
return x
class Function:
def __call__(self, *inputs):
inputs = [as_variable(x) for x in inputs]
xs = [x.data for x in inputs]
ys = self.forward(*xs)
if not isinstance(ys, tuple):
ys = (ys,)
outputs = [Variable(as_array(y)) for y in ys]
if Config.enable_backprop:
self.generation = max([x.generation for x in inputs])
for output in outputs:
output.set_creator(self)
self.inputs = inputs
self.outputs = [weakref.ref(output) for output in outputs]
return outputs if len(outputs) > 1 else outputs[0]
def forward(self, xs):
raise NotImplementedError()
def backward(self, gys):
raise NotImplementedError()
class Add(Function):
def forward(self, x0, x1):
y = x0 + x1
return y
def backward(self, gy):
return gy, gy
def add(x0, x1):
x1 = as_array(x1)
return Add()(x0, x1)
class Mul(Function):
def forward(self, x0, x1):
y = x0 * x1
return y
def backward(self, gy):
x0, x1 = self.inputs
return gy * x1, gy * x0
def mul(x0, x1):
x1 = as_array(x1)
return Mul()(x0, x1)
class Neg(Function):
def forward(self, x):
return -x
def backward(self, gy):
return -gy
def neg(x):
return Neg()(x)
class Sub(Function):
def forward(self, x0, x1):
y = x0 - x1
return y
def backward(self, gy):
return gy, -gy
def sub(x0, x1):
x1 = as_array(x1)
return Sub()(x0, x1)
def rsub(x0, x1):
x1 = as_array(x1)
return sub(x1, x0)
class Div(Function):
def forward(self, x0, x1):
y = x0 / x1
return y
def backward(self, gy):
x0, x1 = self.inputs
gx0 = gy / x1
gx1 = gy * (-x0 / x1 ** 2)
return gx0, gx1
def div(x0, x1):
x1 = as_array(x1)
return Div()(x0, x1)
def rdiv(x0, x1):
x1 = as_array(x1)
return div(x1, x0)
class Pow(Function):
def __init__(self, c):
self.c = c
def forward(self, x):
y = x ** self.c
return y
def backward(self, gy):
x, = self.inputs
c = self.c
gx = c * x ** (c - 1) * gy
return gx
def pow(x, c):
return Pow(c)(x)
def setup_variable():
Variable.__add__ = add
Variable.__radd__ = add
Variable.__mul__ = mul
Variable.__rmul__ = mul
Variable.__neg__ = neg
Variable.__sub__ = sub
Variable.__rsub__ = rsub
Variable.__truediv__ = div
Variable.__rtruediv__ = rdiv
Variable.__pow__ = pow
| [
"[email protected]"
] | |
44be647875cf30289b42eb843dacd132d0c8a7a5 | 9ddb6f63d49d8a84203c212407c6c9b0014a8a64 | /HIPRFISH_probe_design/Snakefile | aa9212e3593f3e8f2636207ba4ffe769c932e284 | [] | no_license | proudquartz/hiprfish-probe-design | c723f25da9b2b66bd8b2fdfd598a1ed9e56e9225 | 64e3c44d38f3079bad755362c51ccf4fbbae7710 | refs/heads/master | 2020-07-07T00:35:10.635087 | 2019-09-14T19:17:03 | 2019-09-14T19:17:03 | 203,187,836 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 20,707 | """
Collect HiPRFISH probe design results
Hao Shi 2019
De Vlaminck Lab
Cornell University
"""
###############################################################################################################
# HiPR-FISH: Simulation of HiPR-FISH probes with different design parameters
###############################################################################################################
import pandas as pd
from Bio import SeqIO
from Bio import Seq
import re
import os
os.environ['OPENBLAS_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
###############################################################################################################
# Helper functions
###############################################################################################################
def get_simulation_summary_filenames(sim_input_filename):
sim_output_filename = re.sub('.csv', '_results.csv', sim_input_filename)
return(sim_output_filename)
def get_simulation_list(sim_input_filename, data_dir):
sim_tab = pd.read_csv(sim_input_filename)
design_list = sim_tab.DESIGN_ID.tolist()
return(design_list, sim_tab)
def get_sample_input_fasta_filename(sim_tab, design_id, data_dir):
sample = sim_tab[sim_tab.DESIGN_ID == design_id].SAMPLE.values[0]
input_fasta_filename = data_dir + '/%s/input/%s.fasta' % (sample, sample)
return(input_fasta_filename)
def get_sample_directory(sim_tab, design_id, data_dir):
sample = sim_tab[sim_tab.DESIGN_ID == design_id].SAMPLE.values[0]
sample_dir = data_dir + '/%s' % (sample)
return(sample_dir)
def get_sim_sample(sim_tab, design_id):
sample = sim_tab[sim_tab.DESIGN_ID == design_id].SAMPLE.values[0]
return(sample)
def get_sim_target_rank(sim_tab, design_id):
target_rank = sim_tab[sim_tab.DESIGN_ID == design_id].TARGET_RANK.values[0]
return(target_rank)
def get_sim_similarity(sim_tab, design_id):
similarity = sim_tab[sim_tab.DESIGN_ID == design_id].SIMILARITY.values[0]
return(similarity)
def get_sim_mch(sim_tab, design_id):
mch = sim_tab[sim_tab.DESIGN_ID == design_id].MAX_CONTINUOUS_HOMOLOGY.values[0]
return(mch)
def get_sim_mintm(sim_tab, design_id):
mintm = sim_tab[sim_tab.DESIGN_ID == design_id].MIN_TM.values[0]
return(mintm)
def get_sim_maxtm(sim_tab, design_id):
maxtm = sim_tab[sim_tab.DESIGN_ID == design_id].MAX_TM.values[0]
return(maxtm)
def get_sim_gc(sim_tab, design_id):
gc = sim_tab[sim_tab.DESIGN_ID == design_id].GC.values[0]
return(gc)
def get_sim_otu(sim_tab, design_id):
otu = sim_tab[sim_tab.DESIGN_ID == design_id].OTU.values[0]
return(otu)
def get_sim_probe_selection_method(sim_tab, design_id):
probe_selection_method = sim_tab[sim_tab.DESIGN_ID == design_id].PROBE_SELECTION_METHOD.values[0]
return(probe_selection_method)
def get_sim_tpn(sim_tab, design_id):
tpn = sim_tab[sim_tab.DESIGN_ID == design_id].TPN.values[0]
return(tpn)
def get_oriented_fasta_filename(sim_tab, design_id, data_dir):
sample = sim_tab[sim_tab.DESIGN_ID == design_id].SAMPLE.values[0]
oriented_fasta_filename = data_dir + '/%s/utilities/%s.oriented.fasta' % (sample, sample)
return(oriented_fasta_filename)
def get_taxon_probes_output_directory(sim_tab, design_id, data_dir):
sample = sim_tab[sim_tab.DESIGN_ID == design_id].SAMPLE.values[0]
target_rank = sim_tab[sim_tab.DESIGN_ID == design_id].TARGET_RANK.values[0]
similarity = sim_tab[sim_tab.DESIGN_ID == design_id].SIMILARITY.values[0]
taxon_probes_output_directory = data_dir + '/%s/%s/s_%s/blast' % (sample, target_rank, str(similarity))
return(taxon_probes_output_directory)
def get_blast_output_directory(sim_tab, design_id, data_dir):
sample = sim_tab[sim_tab.DESIGN_ID == design_id].SAMPLE.values[0]
target_rank = sim_tab[sim_tab.DESIGN_ID == design_id].TARGET_RANK.values[0]
similarity = sim_tab[sim_tab.DESIGN_ID == design_id].SIMILARITY.values[0]
taxon_probes_output_directory = data_dir + '/%s/%s/s_%s/primer3' % (sample, target_rank, str(similarity))
return(taxon_blast_output_directory)
def get_consensus_directory(sim_tab, design_id, data_dir):
sample = sim_tab[sim_tab.DESIGN_ID == design_id].SAMPLE.values[0]
target_rank = sim_tab[sim_tab.DESIGN_ID == design_id].TARGET_RANK.values[0]
similarity = sim_tab[sim_tab.DESIGN_ID == design_id].SIMILARITY.values[0]
consensus_directory = data_dir + '/%s/%s/s_%s/consensus' % (sample, target_rank, str(similarity))
return(consensus_directory)
def get_cluster_lookup_filename(sim_tab, design_id, data_dir):
sample = sim_tab[sim_tab.DESIGN_ID == design_id].SAMPLE.values[0]
target_rank = sim_tab[sim_tab.DESIGN_ID == design_id].TARGET_RANK.values[0]
similarity = sim_tab[sim_tab.DESIGN_ID == design_id].SIMILARITY.values[0]
cluster_lookup_filename = data_dir + '/%s/%s/s_%s/consensus/cluster_lookup.tab' % (sample, target_rank, str(similarity))
return(cluster_lookup_filename)
def get_evaluation_directory(sim_tab, design_id, data_dir):
sample = sim_tab[sim_tab.DESIGN_ID == design_id].SAMPLE.values[0]
target_rank = sim_tab[sim_tab.DESIGN_ID == design_id].TARGET_RANK.values[0]
similarity = sim_tab[sim_tab.DESIGN_ID == design_id].SIMILARITY.values[0]
evaluation_directory = data_dir + '/%s/%s/s_%s/evaluation' % (sample, target_rank, str(similarity))
return(evaluation_directory)
def get_selection_directory(sim_tab, design_id, data_dir):
sample = sim_tab[sim_tab.DESIGN_ID == design_id].SAMPLE.values[0]
target_rank = sim_tab[sim_tab.DESIGN_ID == design_id].TARGET_RANK.values[0]
similarity = sim_tab[sim_tab.DESIGN_ID == design_id].SIMILARITY.values[0]
selection_directory = data_dir + '/simulation/%s' % (design_id)
return(evaluation_directory)
def get_output_probes_filename(sim_tab, design_id, data_dir):
sample = sim_tab[sim_tab.DESIGN_ID == design_id].SAMPLE.values[0]
target_rank = sim_tab[sim_tab.DESIGN_ID == design_id].TARGET_RANK.values[0]
similarity = sim_tab[sim_tab.DESIGN_ID == design_id].SIMILARITY.values[0]
selection_directory = data_dir + '/simulation/%s_taxon_best_probes.csv' % (design_id)
return(evaluation_directory)
def get_output_probes_summary_filename(sim_tab, design_id, data_dir):
sample = sim_tab[sim_tab.DESIGN_ID == design_id].SAMPLE.values[0]
target_rank = sim_tab[sim_tab.DESIGN_ID == design_id].TARGET_RANK.values[0]
similarity = sim_tab[sim_tab.DESIGN_ID == design_id].SIMILARITY.values[0]
selection_directory = data_dir + '/simulation/%s_taxon_best_probes_summary.csv' % (design_id)
return(evaluation_directory)
def get_probe_directory(sim_tab, design_id, data_dir):
sample = sim_tab[sim_tab.DESIGN_ID == design_id].SAMPLE.values[0]
target_rank = sim_tab[sim_tab.DESIGN_ID == design_id].TARGET_RANK.values[0]
similarity = sim_tab[sim_tab.DESIGN_ID == design_id].SIMILARITY.values[0]
probe_directory = data_dir + '/%s/%s/s_%s/primer3' % (sample, target_rank, str(similarity))
return(probe_directory)
def get_design_taxon_evaluation_filename_list(sim_tab, design_id, data_dir):
sample = sim_tab[sim_tab.DESIGN_ID == design_id].SAMPLE.values[0]
target_rank = sim_tab[sim_tab.DESIGN_ID == design_id].TARGET_RANK.values[0]
similarity = sim_tab[sim_tab.DESIGN_ID == design_id].SIMILARITY.values[0]
evaluation_directory = data_dir + '/%s/%s/s_%s/evaluation'
taxon_evluation_filename_list = glob.glob(evaluation_directory + '/*_probe_evaluation.csv')
return(taxon_evaluation_filename_list)
def get_taxon_consensus_filename(sim_tab, design_id, data_dir):
sample = sim_tab[sim_tab.DESIGN_ID == design_id].SAMPLE.values[0]
target_rank = sim_tab[sim_tab.DESIGN_ID == design_id].TARGET_RANK.values[0]
similarity = sim_tab[sim_tab.DESIGN_ID == design_id].SIMILARITY.values[0]
taxon_consensus_filename = data_dir + '/%s/%s/s_%s/consensus/taxon_consensus.fasta' % (sample, target_rank, str(similarity))
return(taxon_consensus_filename)
def get_probe_blast_directory(sim_tab, design_id, taxon, data_dir):
sample = sim_tab[sim_tab.DESIGN_ID == design_id].SAMPLE.values[0]
target_rank = sim_tab[sim_tab.DESIGN_ID == design_id].TARGET_RANK.values[0]
similarity = sim_tab[sim_tab.DESIGN_ID == design_id].SIMILARITY.values[0]
probe_blast_directory = data_dir + '/%s/%s/s_%s/blast/%s/' % (sample, target_rank, str(similarity), taxon)
return(probe_blast_directory)
def get_taxon_probe_evaluation_filename(sim_tab, design_id, taxon):
sample = sim_tab[sim_tab.DESIGN_ID == design_id].SAMPLE.values[0]
target_rank = sim_tab[sim_tab.DESIGN_ID == design_id].TARGET_RANK.values[0]
similarity = sim_tab[sim_tab.DESIGN_ID == design_id].SIMILARITY.values[0]
taxon_probe_evaluation_filename = data_dir + '/%s/%s/s_%s/blast/%s.probe.evaluation.h5' % (sample, target_rank, str(similarity), taxon)
return(taxon_probe_evaluation_filename)
def get_taxon_probe_evaluation_complete_filename(sim_tab, design_id,taxon):
sample = sim_tab[sim_tab.DESIGN_ID == design_id].SAMPLE.values[0]
target_rank = sim_tab[sim_tab.DESIGN_ID == design_id].TARGET_RANK.values[0]
similarity = sim_tab[sim_tab.DESIGN_ID == design_id].SIMILARITY.values[0]
taxon_probe_evaluation_filename = data_dir + '/%s/%s/s_%s/blast/%s.probe.evaluation.complete.txt' % (sample, target_rank, str(similarity), taxon)
return(taxon_probe_evaluation_filename)
def get_freq_ll(sim_tab, design_id):
freq_ll = sim_tab[sim_tab.DESIGN_ID == design_id].FREQLL.values[0]
return(freq_ll)
def get_sim_bot(sim_tab, design_id):
bot = sim_tab[sim_tab.DESIGN_ID == design_id].BOT.values[0]
return(bot)
def get_sim_bplc(sim_tab, design_id):
bplc = sim_tab[sim_tab.DESIGN_ID == design_id].BPLC.values[0]
return(bplc)
def get_sim_barcode_selection(sim_tab, design_id):
barcode_selection = sim_tab[sim_tab.DESIGN_ID == design_id].BARCODESELECTION.values[0]
return(barcode_selection)
def get_sim_primerset(sim_tab, design_id):
primerset = sim_tab[sim_tab.DESIGN_ID == design_id].PRIMERSET.values[0]
return(primerset)
def get_full_length_probes_filename(sim_tab, design_id):
primerset = sim_tab[sim_tab.DESIGN_ID == design_id].PRIMERSET.values[0]
barcode_selection = sim_tab[sim_tab.DESIGN_ID == design_id].BARCODESELECTION.values[0]
similarity = sim_tab[sim_tab.DESIGN_ID == design_id].SIMILARITY.values[0]
full_length_probes_filename = data_dir + '/simulation/{}_primerset_{}_barcode_selection_{}_full_length_probe_sequences.txt'.format(design_id, primerset, barcode_selection, )
return(full_length_probes_filename)
def get_full_length_blocking_probes_filename(sim_tab, design_id):
primerset = sim_tab[sim_tab.DESIGN_ID == design_id].PRIMERSET.values[0]
barcode_selection = sim_tab[sim_tab.DESIGN_ID == design_id].BARCODESELECTION.values[0]
similarity = sim_tab[sim_tab.DESIGN_ID == design_id].SIMILARITY.values[0]
full_length_probes_filename = data_dir + '/simulation/{}_primerset_{}_barcode_selection_{}_full_length_blocking_probe_sequences.txt'.format(design_id, primerset, barcode_selection, )
return(full_length_probes_filename)
def get_blast_database(sample, data_dir):
blast_db = data_dir + '/{}/input/{}.oriented.fasta'.format(sample, sample)
return(blast_db)
def get_blast_database_by_design_id(sam_tab, design_id, data_dir):
sample = sim_tab[sim_tab.DESIGN_ID == design_id].SAMPLE.values[0]
blast_db = data_dir + '/{}/input/{}.oriented.fasta'.format(sample, sample)
return(blast_db)
def get_full_length_probe_sequences_list(sim_tab):
full_length_probe_sequences_list = []
for i in range(sim_tab.shape[0]):
design_id = sim_tab.DESIGN_ID.values[i]
primerset = sim_tab.PRIMERSET.values[i]
barcode_selection = sim_tab.BARCODESELECTION.values[i]
f = data_dir + '/simulation/{}/{}_primerset_{}_barcode_selection_{}_full_length_probe_sequences.txt'.format(design_id, design_id, primerset, barcode_selection)
full_length_probe_sequences_list.append(f)
return(full_length_probe_sequences_list)
def get_full_length_blocking_probe_sequences_list(sim_tab):
full_length_blocking_probe_sequences_list = []
for i in range(sim_tab.shape[0]):
design_id = sim_tab.DESIGN_ID.values[i]
primerset = sim_tab.PRIMERSET.values[i]
barcode_selection = sim_tab.BARCODESELECTION.values[i]
f = data_dir + '/simulation/{}/{}_primerset_{}_barcode_selection_{}_full_length_blocking_probe_sequences.txt'.format(design_id, design_id, primerset, barcode_selection)
full_length_blocking_probe_sequences_list.append(f)
return(full_length_blocking_probe_sequences_list)
def get_full_length_probe_summary_list(sim_tab):
full_length_probe_summary_list = []
for i in range(sim_tab.shape[0]):
design_id = sim_tab.DESIGN_ID.values[i]
primerset = sim_tab.PRIMERSET.values[i]
barcode_selection = sim_tab.BARCODESELECTION.values[i]
f = data_dir + '/simulation/{}/{}_full_length_probes_summary.txt'.format(design_id, design_id, primerset, barcode_selection)
full_length_probe_summary_list.append(f)
return(full_length_probe_summary_list)
###############################################################################################################
# Useful variables
###############################################################################################################
data_dir = config['__default__']['DATA_DIR']
sim_input_filename = config['simulations']['simulation_table']
sim_output_filename = get_simulation_summary_filenames(sim_input_filename)
design_list, sim_tab = get_simulation_list(sim_input_filename, data_dir)
full_length_probe_summary_list = get_full_length_probe_summary_list(sim_tab)
usearch_path = config['usearch']['path']
primer3_program_path = config['primer3']['path']
design_dir = [data_dir + '/simulation/' + dsgn for dsgn in design_list]
for dsgn_dir in design_dir:
if not os.path.exists(dsgn_dir):
os.makedirs(dsgn_dir)
###############################################################################################################
# Snake rules
###############################################################################################################
rule all:
input:
sim_output_filename
rule design_probes:
input:
fasta_filename = data_dir + '/{sample}/input/{sample}.fasta',
sample_dir = data_dir + '/{sample}'
output:
design_probe_filename = dynamic(data_dir + '/{sample}/{target_rank}/s_{similarity}/primer3/{taxon}_consensus.int')
params:
sample = "{sample}",
target_rank = "{target_rank}",
similarity = "{similarity}",
ud = usearch_path,
p3d = primer3_program_path
shell:
"python3 {config[__default__][SCRIPTS_PATH]}/hiprfish_design_probes.py "
"{input.fasta_filename} {input.sample_dir} "
"-db {config[blast][16s_db]} "
"-t {params.target_rank} "
"-s {params.similarity} "
"-ud {params.ud} "
"-p3d {params.p3d}"
rule blast_probes:
input:
design_probe_filename = data_dir + '/{sample}/{target_rank}/s_{similarity}/primer3/{taxon}_consensus.int'
output:
probe_blast_complete_filename = data_dir + '/{sample}/{target_rank}/s_{similarity}/primer3/{taxon}.probe.blast.complete.txt'
params:
blast_database = lambda wildcards: get_blast_database(wildcards.sample, data_dir)
shell:
"python3 {config[__default__][SCRIPTS_PATH]}/hiprfish_blast_probes.py "
"{params.blast_database} {input.design_probe_filename}"
rule evaluate_taxon_probes:
input:
design_probe_filename = data_dir + '/{sample}/{target_rank}/s_{similarity}/primer3/{taxon}_consensus.int',
probe_blast_complete_filename = data_dir + '/{sample}/{target_rank}/s_{similarity}/primer3/{taxon}.probe.blast.complete.txt'
output:
probe_evaluation_complete_filename = data_dir + '/{sample}/{target_rank}/s_{similarity}/blast/{taxon}.probe.evaluation.complete.txt'
shell:
"python3 -W ignore {config[__default__][SCRIPTS_PATH]}/hiprfish_evaluate_probes.py "
"{input.design_probe_filename}"
rule select_taxon_probes:
input:
probe_evaluation_complete_filename = lambda wildcards: get_taxon_probe_evaluation_complete_filename(sim_tab, wildcards.design_id, wildcards.taxon)
output:
taxon_design_evaluation_filename = data_dir + '/simulation/{design_id}/{taxon}_probe_selection.csv'
params:
tpn = lambda wildcards: get_sim_tpn(sim_tab, wildcards.design_id),
freqll = lambda wildcards: get_freq_ll(sim_tab, wildcards.design_id),
bot = lambda wildcards: get_sim_bot(sim_tab, wildcards.design_id),
probe_selection_method = lambda wildcards: get_sim_probe_selection_method(sim_tab, wildcards.design_id),
target_rank = lambda wildcards: get_sim_target_rank(sim_tab, wildcards.design_id),
otu = lambda wildcards: get_sim_otu(sim_tab, wildcards.design_id),
min_tm = lambda wildcards: get_sim_mintm(sim_tab, wildcards.design_id),
max_tm = lambda wildcards: get_sim_maxtm(sim_tab, wildcards.design_id),
gc = lambda wildcards: get_sim_gc(sim_tab, wildcards.design_id),
mch = lambda wildcards: get_sim_mch(sim_tab, wildcards.design_id)
shell:
"python3 {config[__default__][SCRIPTS_PATH]}/hiprfish_select_probes.py "
"{input.probe_evaluation_complete_filename} {wildcards.design_id} {output.taxon_design_evaluation_filename} "
"-c {params.probe_selection_method} "
"-t {params.target_rank} "
"-o {params.otu} "
"-tmin {params.min_tm} "
"-tmax {params.max_tm} "
"-m {params.mch} "
"-tpn {params.tpn} "
"-freqll {params.freqll} "
"-gc {params.gc} "
"-bot {params.bot}"
rule collect_selected_probe_information:
input:
sim_filename = sim_input_filename,
design_directory = data_dir + '/simulation/{design_id}',
taxon_evaluation_filename_list = dynamic(data_dir + '/simulation/{design_id}/{taxon}_probe_selection.csv')
output:
taxon_best_probes_filename = data_dir + '/simulation/{design_id}/taxon_best_probes.csv',
taxon_best_probes_filtered_filename = data_dir + '/simulation/{design_id}/taxon_best_probes_filtered.csv',
probes_summary_filename = data_dir + '/simulation/{design_id}/taxon_best_probes_summary.csv'
params:
bot = lambda wildcards: get_sim_bot(sim_tab, wildcards.design_id)
shell:
"python3 {config[__default__][SCRIPTS_PATH]}/hiprfish_collect_taxon_best_probes.py "
"{input.design_directory} {input.sim_filename} "
"{output.taxon_best_probes_filename} {output.taxon_best_probes_filtered_filename} {output.probes_summary_filename} {params.bot}"
rule generate_full_probes:
input:
taxon_best_probes_filename = data_dir + '/simulation/{design_id}/taxon_best_probes.csv',
output:
full_length_probe_summary_filename = data_dir + '/simulation/{design_id}/{design_id}_full_length_probes_summary.txt'
params:
design_dir = data_dir + '/simulation/{design_id}',
consensus_directory = lambda wildcards: get_consensus_directory(sim_tab, wildcards.design_id, data_dir),
blast_database = lambda wildcards: get_blast_database_by_design_id(sim_tab, wildcards.design_id, data_dir),
bot = lambda wildcards: get_sim_bot(sim_tab, wildcards.design_id),
mch = lambda wildcards: get_sim_mch(sim_tab, wildcards.design_id),
bplc = lambda wildcards: get_sim_bplc(sim_tab, wildcards.design_id),
primerset = lambda wildcards: get_sim_primerset(sim_tab, wildcards.design_id),
target_rank = lambda wildcards: get_sim_target_rank(sim_tab, wildcards.design_id),
barcode_selection = lambda wildcards: get_sim_barcode_selection(sim_tab, wildcards.design_id)
shell:
"python3 {config[__default__][SCRIPTS_PATH]}/hiprfish_generate_full_probes.py "
"{params.design_dir} {params.consensus_directory} {params.blast_database} "
"{params.bot} {params.mch} {params.bplc} "
"-ps {params.primerset} "
"-t {params.target_rank} "
"-bs {params.barcode_selection}"
rule collect_probe_coverage_results:
input:
full_length_probe_summary_list,
sim_input_filename = sim_input_filename,
data_dir = data_dir
output:
sim_output_filename
shell:
"python3 {config[__default__][SCRIPTS_PATH]}/hiprfish_collect_simulation_results.py "
"{input.data_dir} {input.sim_input_filename} {output}"
| [
"[email protected]"
] | ||
e88077c34739ecedaa32ccd0a5e4b145d929f309 | f725bade01b86baf57509c716fba7d4a0d8cbe0f | /python/projecteuler/p124.py | 0f56042d19fddc12a3240a634bde11f249be39e9 | [] | no_license | guoea/sample-code | 07b9ba8a5006f0143238a7b4aefb69fbb64042b7 | 1ecdcdd6548edffe7e3e19c94b7ba4f0ea59526a | refs/heads/master | 2023-02-03T07:27:30.035284 | 2020-12-23T23:38:51 | 2020-12-23T23:38:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,033 | py | import math
import numpy as np
def get_all_primes(n):
arr = [True for n in range(n+1)]
arr[0], arr[1] = False, False
arr[2] = True
index = 2
while index < len(arr):
for j in range(index * 2, n+1, index):
arr[j] = False
index += 1
while index < len(arr) and not arr[index]:
index += 1
return [i for i in range(n+1) if arr[i]]
all_primes = get_all_primes(1000000)
len(all_primes)
def prime_factors(num):
f = {}
remaining = num
for p in all_primes:
while remaining % p == 0:
if p in f:
f[p] += 1
else:
f[p] = 1
remaining /= p
if remaining == 1:
break
return f
prime_factors(504)
def rad(n):
f = prime_factors(n)
return np.product([prime for prime, _ in f.items()], dtype=np.int64)
assert rad(504) == 42
limit = 100001
rad_list = []
for i in range (1, limit):
rad_list.append((rad(i), i))
rad_list.sort()
print(rad_list[9999])
| [
"[email protected]"
] | |
8621950ead90a0d32bd5496e2f26acae2d603d95 | 4866d3058dfe4cd3f3e9d842eb9554736559b5d4 | /String/5.py | 043101c5e3286857963f37a49e0223f98ba44f9d | [] | no_license | KIMGEEK/Python-Ruby_training | bd94b1b5d3e6c962a47e80b3be8571e20143c450 | c614b3e7aceda1b63141abde79b67fb35ea3712d | refs/heads/main | 2023-06-10T14:27:27.820791 | 2021-06-30T08:43:53 | 2021-06-30T08:43:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | print(10+5)
print("10"+"5") | [
"[email protected]"
] | |
0576b2d56d6ef55155bf670ff37e0e8fcb7d9db3 | bf7640c56a0d73660be76633842c1d7aa599cf4b | /PersonReg.py | 5774e93d47a646eb75ea91b7069a6c74000c073b | [] | no_license | Senethys/5DA000 | 509131ccdbfe5f4ade7a3acedfb778703de8d724 | cff2d6f496f4ca20f0d4ecb29852a4601d92ffe3 | refs/heads/master | 2020-03-27T22:45:48.303142 | 2018-09-03T22:12:25 | 2018-09-03T22:12:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,442 | py | # -*- coding: latin-1 -*-
import sqlite3
from Person import Person
import cgitb
cgitb.enable()
class PersonReg:
"""The most important part pf the assigment, handles all interactions
with the SQL database. Most often called from the the cgi files
and in rare cases in the page objects to fetch notes."""
def __init__(self, dbname):
'''
Checks if there is a table with the name "Agents" and "Notes",
and creates a .db file with a given name, containing the tables
of the same names.
"Agents" will hold information amout a person, and when called,
will create a Person object with the information containing in
the database table. While "Notes" table will contain the id of
the Person to which the note belongs.
'''
self.db=sqlite3.connect(dbname)
cursor=self.db.cursor()
cursor.execute("""CREATE TABLE if not exists `Agents` ( `id` INTEGER PRIMARY KEY AUTOINCREMENT,
`name` TEXT,
`lastname` TEXT,
`email` TEXT);""")
cursor.execute("""CREATE TABLE if not exists `Notes` ( `id` INTEGER PRIMARY KEY AUTOINCREMENT,
`person_id` TEXT,
`title` TEXT,
`note` TEXT);""")
self.db.commit()
def search(self, searchTerm):
'''
Return ID numbers in a list if the given string matches anything
in the database.
'''
cursor=self.db.cursor()
result=[]
for idnr in cursor.execute("select id from Agents where name or lastname or email like '%"+searchTerm+"%'"):
result.append(idnr)
return result
def getAgent(self, agent_id):
'''
Returns all information about an agent from the database with
the given id number, contained in a Person object.
'''
cursor=self.db.cursor()
res = cursor.execute("select id, name, lastname, email from Agents where id = %s" % agent_id).fetchone()
agent = Person(res[0],res[1],res[2],res[3])
return agent
def addAgent(self, name, lastname, email):
'''
| [
"[email protected]"
] | |
dafbbb61ab430fc0f860ce175aa8b937c7053a1a | 0cd932581091540eb5e37e07ce982a70a24955d3 | /externalModels/python/3dPosition/3dPositionYaml.py | 32502c02f4dc09c6ec8a7fe69eeb0730e22034da | [
"MIT"
] | permissive | boubinjg/SoftwarePilot | 4af165ebef6092ca29d2781184feaebd5253572f | 89c1106f48cda7cf37b2af23ac64052fbbf08bb8 | refs/heads/master | 2022-12-10T08:57:59.252982 | 2022-10-04T19:01:54 | 2022-10-04T19:01:54 | 176,774,502 | 39 | 11 | MIT | 2021-10-02T14:19:53 | 2019-03-20T16:29:47 | HTML | UTF-8 | Python | false | false | 647 | py | import sys
#path_name = '/Users/naveentr/Drone_codebase/feature_extraction/DJI_1957.JPG'
path_name = sys.argv[1] #get the image name/path_to_image from the user.
X = "1"
Y = "1"
Z = "1"
with open(path_name, 'r') as fp:
for line in fp:
if line.strip().split('=')[0] == "X":
X = line.strip().split('=')[1]
elif line.strip().split('=')[0] == "Y":
Y = line.strip().split('=')[1]
elif line.strip().split('=')[0] == "Z":
Z = line.strip().split('=')[1]
#meta_data = ImageMetaData(path_name)
#latlng = meta_data.get_lat_lng()
print("X="+X)
print("Y="+Y)
print("Z="+Z)
#print(meta_data)
| [
"[email protected]"
] | |
056d47e226f6ed1321d50431ee6f8b427d8d5631 | e61713688e0a7b6bbd4d9bf18c8838c45dbd3d01 | /FWCore/Integration/test/testSwitchProducerProvenanceAnalyzer_cfg.py | 60ff3e0cbb4ed8b224e362971e4a6ba60a7fcac7 | [
"Apache-2.0"
] | permissive | FlorianBury/cmssw | 21042a1d7823bc99234006e15b0b2a67c34999da | 6406d33feab56ab2af79b00b533f62b5368ac33e | refs/heads/master | 2021-06-09T23:30:59.378761 | 2019-01-21T12:54:56 | 2019-01-21T12:54:56 | 166,834,745 | 0 | 0 | Apache-2.0 | 2019-01-21T15:15:31 | 2019-01-21T15:15:31 | null | UTF-8 | Python | false | false | 389 | py | import FWCore.ParameterSet.Config as cms
import sys
process = cms.Process("ANA1")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring("file:"+sys.argv[-1])
)
process.analyzer = cms.EDAnalyzer("SwitchProducerProvenanceAnalyzer",
src1 = cms.InputTag("intProducer"),
src2 = cms.InputTag("intProducer", "other")
)
process.p = cms.Path(process.analyzer)
| [
"[email protected]"
] | |
321dacc443a8cdc2e1183237e44c6917993fec8d | cf16bb3eebe2d8c1683243085b24cb51432bde56 | /meredith/__log__.py | 24c3db9b81db5c85e399dbce45d432d594f05032 | [] | no_license | kwarwp/carol | 0ed3c8da736e60dcf2e85c68f93bb7238edc1cc9 | b540639dcbd1769adc99e318a4477166d44e4a6b | refs/heads/master | 2020-03-15T01:45:20.269113 | 2018-10-24T21:27:57 | 2018-10-24T21:27:57 | 131,901,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py |
{'date': 'Fri Oct 05 2018 11:19:48.164 GMt-0300 (Horário Padrão de Brasília) -X- SuPyGirls -X-',
'error': '''Traceback (most recent call last):
module _core.main line 160
dialog.action(lambda *_: self.start()
module _core.supygirls_factory line 135
self.act(self, lambda *_: self.hide() or extra()) if self.act else None
module _core.supygirls_factory line 306
return self._first_response(lambda: self._executa_acao(), self.extra, self.error)
module _core.supygirls_factory line 278
traceback.print_exc(file=sys.stderr)
module _core.supygirls_factory line 295
exec(self.code, glob) # dict(__name__="__main__"))
module <module> line 9
Historia()
module <module> line 7
PRAIA = Cena(img = "http://www.pousadazemaria.com.br/system/images/pousada-ze-maria-fernando-de-noronha-t237.jpg")
TypeError: 'module' object is not callable
'''},
{'date': 'Fri Oct 05 2018 11:55:05.995 GMt-0300 (Horário Padrão de Brasília) -X- SuPyGirls -X-',
'error': '''
module <string> line 8
golfinho = Elemento(img=GOLFINHO, tit="Golfinho", style=dict(left=150, top=60, width=60, height=200)
^
SyntaxError: invalid syntax
'''}, | [
"[email protected]"
] | |
517e47525a81267dd94fe9a75cd79194e7cdca85 | 9f8067e557f44b4f28dde6381ff65d7e1f550e4d | /reservations/views.py | fd3c002e1066d2389545795ab9a463f0bfb58184 | [] | no_license | Alisher007/spa2 | e56aae341b927a39170b3a9a40badc0643fff414 | 04a611abbcd22bf977602bdf85675ea2e3f92b0d | refs/heads/master | 2021-09-29T12:15:10.658713 | 2020-03-29T12:42:30 | 2020-03-29T12:42:30 | 251,028,673 | 0 | 0 | null | 2021-09-22T18:48:39 | 2020-03-29T12:38:58 | JavaScript | UTF-8 | Python | false | false | 7,978 | py | from django.shortcuts import render, redirect, get_object_or_404
from django.template.loader import render_to_string
from django.http import JsonResponse
from django.db.models import Q
from datetime import date
from products.models import Product
from customers.models import Customer
from products.models import Product
from reservations.forms import ReservationForm, TestForm
from reservations.models import Res, Timelist, Room
import json
from django.http import HttpResponseRedirect
from django.contrib import messages
def home(request):
return render(request, 'reservation/home.html')
def res_update(request,id):
res_target = get_object_or_404(Res, pk=id)
form = ReservationForm(request.POST or None , instance=res_target)
if form.is_valid():
res_time_in = list(range(int(request.POST.get('starttime')), int(request.POST.get('endtime')) + 1 ))
res_duplicate = Res.objects.filter(
Q(arrdate=request.POST.get('arrdate')),
Q(roomid=request.POST.get('roomid')),
Q(starttime__in=(res_time_in)) |
Q(endtime__in=(res_time_in))
).exclude(pk=id).count()
if res_duplicate >= 1:
messages.error(request, 'There is already reservation exists')
return HttpResponseRedirect(request.path_info)
else:
messages.info(request,' reservation has been updated')
form.save()
return redirect('res:list', slug=res_target.arrdate)
tester = ''
for target_list in res_target.products.values_list('id', flat=True):
tester += str(target_list) + ','
context = {
'form': form,
'res_id': id,
'customer': res_target.customerid.name,
'customerid': res_target.customerid.id,
'products': ','.join(list(res_target.products.values_list('name', flat=True))),
'productsid': tester,
}
return render(request, 'reservation/res_update.html', context)
def res_create_test(request,):
form = TestForm(request.POST)
if form.is_valid():
if int(request.POST.get('starttime')) >= int(request.POST.get('endtime')):
messages.error(request, 'start time should before the end time')
instance = form.save(commit=False)
instance.user = request.user
res_time_in = list(range(int(request.POST.get('starttime')), int(request.POST.get('endtime')) + 1 ))
res_duplicate = Res.objects.filter(
Q(arrdate=request.POST.get('arrdate')),
Q(roomid=request.POST.get('roomid')),
Q(starttime__in=(res_time_in)) |
Q(endtime__in=(res_time_in))
).count()
if res_duplicate >= 1:
messages.error(request, 'There is already reservation exists')
return HttpResponseRedirect(request.path_info)
else:
instance.save()
context = {
'form': form,
}
return redirect('res:list', slug=request.POST.get('arrdate') )
context = {
'form': form,
}
return render(request, 'reservation/res_test.html', context)
def res_create(request,):
initial_data = {}
initial_data['starttime'] = request.GET.get('time')
initial_data['roomid'] = request.GET.get('table')
initial_data['arrdate'] = request.GET.get('date')
form = ReservationForm(request.POST or None, initial = initial_data)
if form.is_valid():
if int(request.POST.get('starttime')) >= int(request.POST.get('endtime')):
messages.error(request,' the end time should be higher than the start time')
return HttpResponseRedirect(request.path_info)
instance = form.save(commit=False)
print(request.user, ' request.user')
instance.user = request.user
res_time_in = list(range(int(request.POST.get('starttime')), int(request.POST.get('endtime')) + 1 ))
res_duplicate = Res.objects.filter(
Q(arrdate=request.POST.get('arrdate')),
Q(roomid=request.POST.get('roomid')),
Q(starttime__in=(res_time_in)) |
Q(endtime__in=(res_time_in))
).count()
if res_duplicate >= 1:
messages.error(request,' the room is occupied')
else:
messages.info(request,' reservation has been created')
instance.save()
form.save_m2m()
context = {
'form': form,
}
return redirect('res:list', slug=request.POST.get('arrdate') )
context = {
'form': form,
}
return render(request, 'reservation/res_create.html', context)
def res_delete(request, id):
print(request.POST.get('date'))
res = Res.objects.get(id=id)
messages.info(request,f'{res} has been deleted')
res.delete()
return JsonResponse({'data':'ok'}, safe=False)
def res_list(request,slug=None):
today_ = str(date.today())
if slug == None:
slug = today_
# getting the reservations for selected date
res = list(Res.objects.filter(arrdate=slug).prefetch_related('products'))
table = list(Room.objects.all())
time_list = list(Timelist.objects.all())
# creating availibility table
avail_list = [[{
'table':str(ta),'table_id':ta.id,'time':str(ti),'time_id':ti.id, 'status':'vacant', 'date':slug
} for ta in table] for ti in time_list]
# inserting queryset data into availability table
product_list = []
product_list_id = []
if res:
for avail_list1 in avail_list:
for avail_list2 in avail_list1:
for re in res:
if re.roomid.pk == avail_list2.get('table_id') and re.starttime.pk <= avail_list2.get('time_id') and re.endtime.pk >= avail_list2.get('time_id'):
avail_list2.update({
'table':avail_list2.get('table'),
'time':avail_list2.get('time'),
'customer': re.customerid.name,
'res_id':re.pk,
'products': ','.join(re.products.values_list('name', flat=True)),
'products_id': ','.join(str(re.products.values_list('id', flat=True))),
'status':'' ,
'occupied':'occupied'})
product_list = []
product_list_id = []
context = {
'avail': avail_list,
'table': table,
'time_list': time_list,
'date': today_,
}
return render(request, 'reservation/res_list.html', context)
def res_ajax(request,):
room_json = list(Room.objects.values())
time_list_json = list(Timelist.objects.values())
context = {
'room': room_json,
'time': time_list_json,
}
return JsonResponse(context, safe=False)
def customer_search(request):
ctx = {}
url_parameter = request.GET.get("q")
if url_parameter:
customers = Customer.objects.filter(name__icontains=url_parameter)
else:
customers = Customer.objects.all()[:5]
ctx["customers"] = customers
data_dict = {"html_customers-search": 'test'}
if request.is_ajax():
html = render_to_string(
template_name="reservation/customers-search.html", context={"customers": customers}
)
data_dict = {"html_customers-search": html}
return JsonResponse(data=data_dict, safe=False)
def product_search(request):
ctx = {}
url_parameter = request.GET.get("q")
if url_parameter:
products = Product.objects.filter(name__icontains=url_parameter)
else:
products = Product.objects.all()[:5]
ctx["products"] = products
data_dict = {"html_products-search": 'test'}
if request.is_ajax():
html = render_to_string(
template_name="reservation/products-search.html", context={"products": products}
)
data_dict = {"html_products-search": html}
return JsonResponse(data=data_dict, safe=False)
| [
"[email protected]"
] | |
e64570e001fad63d2ab380a3b13e537c0eb9e84d | 6c2b07d29685f097f552f59eac07b56d2fcc7a2f | /middleChinesePronunciator/pronLogic/apps.py | 36eebd7fc7ec01d26875b1ce43f0385b4d6da267 | [] | no_license | caw051/middleChinesePronunciator | 1de1bcd5e007ecaee5181e22816cacb89d0d98d0 | d959dea54f9f377822f29a0f3aa40eef378bc64c | refs/heads/master | 2020-07-23T16:41:08.696611 | 2019-09-29T08:03:17 | 2019-09-29T08:03:17 | 207,633,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | from django.apps import AppConfig
class PronlogicConfig(AppConfig):
name = 'pronLogic'
| [
"[email protected]"
] | |
c93190617ff7ebeb56f51f3c6bba7316c0acd771 | bad52a828d8995ef3e80bedec63a4b0fa14945e8 | /wishful_module_srslte/__init__.py | fda9b00c8902ed1434b7dd84df30db6813cd1167 | [] | no_license | luwangg/module_srslte | 1a306a90d32f7e1f3f397fe438691ff7c329d213 | b4819386836ad3b92617913fe65864a15c2a471b | refs/heads/master | 2020-04-08T00:08:56.931441 | 2018-10-24T10:11:15 | 2018-10-24T10:11:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29 | py | from .module_srslte import *
| [
"[email protected]"
] | |
52b518dfdc67baed00845dc1a5a7ca931bf771b3 | 51bab5c90567f2c7b2b8708a33e444774954f26c | /logging/callee.py | 10ed72fc5de446d990bf303f98935facadb96e20 | [] | no_license | Lohengrinnn/pytrick | d55edf095f530484670869f0b43734b24c53f7fd | 61b2dce9fc762fffd39ac3a61d80e998d7c5e5bf | refs/heads/main | 2023-04-11T13:41:44.458570 | 2021-04-20T19:23:52 | 2021-04-20T19:23:52 | 359,897,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def func():
s_handler = logging.StreamHandler()
f_handler = logging.FileHandler("file.log")
s_handler.setLevel(logging.DEBUG)
f_handler.setLevel(logging.WARNING)
logger.addHandler(s_handler)
logger.addHandler(f_handler)
# handler.level >= level and logger.level >= level
# so debug won't be printed.
print("debug won't print since logger.level = INFO")
# s_handler and default handler created within basicConfig
# both output to console.
logger.debug("debug")
logger.info("info")
logger.warn("warn")
| [
"[email protected]"
] | |
f87af5f39d4fe133be0da1a76d9c4a04108cb547 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02715/s294971061.py | ee4f079a5487c6d3efdcbf1a97f1275c22a6cc49 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | N,K = list(map(int,input().split()))
MOD = 10**9+7
arr = [pow(K//i, N, MOD) for i in range(1,K+1)]
for i in range(K//2,0,-1):
arr[i-1] -= sum(arr[2*i-1:K:i]) % MOD
arr[i-1] %= MOD
arr = [(i+1)*j%MOD for i,j in enumerate(arr)]
print(sum(arr)%MOD) | [
"[email protected]"
] | |
dcfb6853d0e3395861a0c32e7f2e03dcde75a5f6 | b4e3ce1f752e3470e05b1a8ccea00da31250dcf9 | /minimumOfThree.py | a4de9a29d69d0dba9e0bd4997cd90da98df634a0 | [] | no_license | brayanestive18/CodeAbbey | 29851772a469368359ee8244a82dce820c86ff03 | 0d6127828159cdfaf02ea381a583b2a381100c3f | refs/heads/master | 2020-03-06T20:46:51.932684 | 2019-09-01T22:46:08 | 2019-09-01T22:46:08 | 127,062,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,545 | py | #!/usr/bin/env python
""" Minimum Of Three
python minimumOfTwo.py
Ingrese el total de numeros de datos:
input 28
Ingrese los numeros:
Input:
-2324803 -9546536 -7755899
7790004 -9096052 8883763
-3718911 9095374 -8995681
5882345 -14932 -1171013
-4752924 3820231 3333289
2591187 6860972 -5716512
-1187736 -1618721 3761545
5421937 8773194 4438018
7196063 3878242 -1336884
-5851795 -6020252 -1247609
8963805 1654944 -794145
-8792094 -555052 109803
-9908331 5726037 -794823
-8904012 1608382 9190245
-75026 6855457 3010476
-6741737 -553355 -128552
-2458249 8258908 8252727
-8696703 3680845 7025922
5741315 876908 904164
-5595569 5025113 4883912
3156822 3988918 -3461144
-7637322 5196823 5983804
2472480 5288492 1709841
-8322343 6384479 -6681777
-9132098 -3690546 -9826319
3878378 -432284 -379675
-6250174 7109467 -2120767
-7997446 8412763 -8439921
Output
-9546536 -9096052 -8995681 -1171013 -4752924 -5716512 -1618721 4438018 -1336884 -6020252 -794145 -8792094
-9908331 -8904012 -75026 -6741737 -2458249 -8696703 876908 -5595569 -3461144 -7637322 1709841 -8322343
-9826319 -432284 -6250174 -8439921
"""
maxi = []
cnt = raw_input("Ingrese el total de numeros de datos: ")
print "Ingrese los numeros: "
for i in range(0, int(cnt)):
data = raw_input().split()
if int(data[0]) < int(data[1]):
if int(data[0]) < int(data[2]):
maxi.append(data[0])
else:
maxi.append(data[2])
elif int(data[1]) < int(data[2]):
if int(data[1]) < int(data[0]):
maxi.append(data[1])
else:
maxi.append(data[2])
print "Output: "
print ' '.join(maxi) | [
"[email protected]"
] | |
fcca2d613c388b57dfdde7b60777cb95156d8533 | 9e6129645e4ec7e14b65736c72147450de7583b1 | /rotate-string/rotate-string.py | 8e193ce842bc0031e11d5785d45638a39eba0a4a | [] | no_license | hamazumi/Leetcode | 97c09c22e5d55d5c8c29f4abdce1ae72faf7781c | f34f99e9e28634ea593363392b494bf1c5cff44e | refs/heads/main | 2023-08-16T22:01:51.035655 | 2021-10-09T19:04:12 | 2021-10-09T19:04:12 | 387,860,885 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | class Solution:
def rotateString(self, s: str, goal: str) -> bool:
s2=s*2
print(s2)
if goal in s2 and len(s)==len(goal):
return True
return False | [
"[email protected]"
] | |
1537ab99bfa2d022007be7a7f72d856fabdc4865 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Flask/Book_evaluator/venv/Lib/site-packages/passlib/handlers/scram.py | 69211c05d5190fe2a53a8d8e5bf7e2d2aa550a65 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:c01b28060d2a2d6f0703936c81c9ddd5b33b64360415b6a9006a0fd3fe38379f
size 22539
| [
"[email protected]"
] | |
2f497e81c59a8fa590681166bc75ba3aac1dc2fc | 80a80ddd08e57d28896e4a8f2bd6053913db5157 | /setup.py | a69e9e19519488934d1755471bc5c56bccf2f280 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | tonygeomoraes/pyres | e162dac3f6035b5af1dd9b3e096b306c23490278 | b0b8bc37d4d88fc407c15c3c8d07aa8c20ee86b7 | refs/heads/master | 2021-05-07T22:26:22.545190 | 2017-07-05T15:00:37 | 2017-07-05T15:00:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py | import sys
from setuptools import setup
# To use:
# python setup.py bdist --format=wininst
# trap someone trying to install pyres with something other
# than python 2 or 3
if not sys.version_info[0] in [2, 3]:
print('Sorry, pyres not supported in your Python version')
print(' Supported versions: 2 and 3')
print(' Your version of Python: {}'.format(sys.version_info[0]))
sys.exit(1) # return non-zero value for failure
long_description = ''
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except:
with open("README.md", 'r') as f:
long_description = f.read()
setup(name='pyres',
description='pyres is a Python package to create, run, and post-process R2 electrical resistivity forward and inverse models.',
long_description=long_description,
author='Kevin M. Befus',
author_email='[email protected]',
url='https://bitbucket.org/kbefus/pyres/',
license='New BSD',
platforms='Windows, Mac OS-X, Unix',
install_requires=['numpy>=1.7'],
packages=['pyres'],
version='1.0',
keywords='geophysics resistivity inversion')
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.