blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ec23a853f5ceb288a79ccc61e24ce866e8b343d7 | c8fface11c98413c17c1a839fae323be76bf00d2 | /4Dobble Game.py | a31cafdd301e2cd7cdcc377e1e0a1100b3d21b0a | []
| no_license | praveen-95572/Nptel-the-joy-of-computing-using-python- | 6dd75bf669bdd3493f0ab6b1a993399662eb18c1 | 46c34f3e303fa3fb46e0c9f2f2dc5f61ea22ac8e | refs/heads/master | 2023-06-24T08:29:50.225493 | 2021-07-26T13:04:34 | 2021-07-26T13:04:34 | 389,632,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | import string
import random
#print(string.ascii_letters)
symbols=[]
symbols=list(string.ascii_letters)
card1=[0]*5
card2=[0]*5
pos1=random.randint(0,4)
pos2=random.randint(0,4) #pos1 and pos2 are same symbol position in card1 and card2 respectively
samesymbol=random.choice(symbols)
symbols.remove(samesymbol)
if(pos1==pos2):
card2[pos1]=samesymbol
card1[pos1]=samesymbol
else:
card2[pos2]=samesymbol
card1[pos1]=samesymbol
card1[pos2]=random.choice(symbols)
symbols.remove(card1[pos2])
card2[pos1]=random.choice(symbols)
symbols.remove(card2[pos1])
i=0
while(i<5):
if(i!=pos1 and i!=pos2):
alphabet1=random.choice(symbols)
symbols.remove(alphabet1)
alphabet2=random.choice(symbols)
symbols.remove(alphabet2)
card1[i]=alphabet1
card2[i]=alphabet2
i=i+1
print(card1)
print(card2)
ch=input("Spot the similar symbol")
if(ch==samesymbol):
print("right")
else:
print("wrong")
| [
"[email protected]"
]
| |
a1be103c453678ec3adbbd37563c923bc80349e7 | 913ffcf29991e57c504bc639cfabe471dfd41782 | /Draw Chat/menu_inicial.py | 6f4b787d6202c659479626f3793eb14b1510374c | []
| no_license | JaimeGo/PyQt-Projects | ef30761c5c2c025b9f98db7ed7e7d66b32d9b535 | c54eeaff69424ab463d64391422005bba3ceabd7 | refs/heads/master | 2020-03-18T13:52:40.067614 | 2018-05-25T07:01:57 | 2018-05-25T07:01:57 | 134,814,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,052 | py | from PyQt4 import QtGui, uic, QtCore
from sys import exit
from chat_grupal import ChatGrupal
formulario_1 = uic.loadUiType("menu_inicio.ui")
class MenuInicial(formulario_1[0], formulario_1[1]):
def __init__(self, cliente):
super().__init__()
self.cliente = cliente
self.setupUi(self)
self.pushButton.clicked.connect(self.registrarse)
self.pushButton_2.clicked.connect(self.ingresar)
self.pushButton_3.clicked.connect(self.salir)
self.menu_ingreso = MenuIngreso(self.cliente, self)
self.menu_registro = MenuRegistro(self.cliente, self)
self.chat_grupal = ChatGrupal(self.cliente, self)
self.seleccion_sala = SeleccionSala(self.cliente, self)
def registrarse(self):
self.hide()
self.menu_registro.show()
def ingresar(self):
self.hide()
self.menu_ingreso.show()
def salir(self):
self.cliente.disconnect()
self.hide()
exit(0)
def closeEvent(self, event):
self.hide()
self.cliente.disconnect()
formulario_2 = uic.loadUiType("ingreso.ui")
class MenuIngreso(formulario_2[0], formulario_2[1]):
def __init__(self, cliente, menu_inicial):
super().__init__()
self.cliente = cliente
self.menu_inicial = menu_inicial
self.setupUi(self)
self.pushButton.clicked.connect(self.continuar)
def continuar(self):
usuario = self.lineEdit.text()
contraseña = self.lineEdit_2.text()
seguir_escuchando = True
primer_largo = len(self.cliente.current_list)
self.cliente.send_message_to_server("antiguo_usuario:" + usuario + "," + contraseña)
while seguir_escuchando:
if len(self.cliente.current_list) > primer_largo:
if self.cliente.current_list[-1] == "contraseña_aceptada":
print("SE ACEPTÓ!!!")
seguir_escuchando = False
self.hide()
self.menu_inicial.seleccion_sala.show()
self.cliente.usuario = usuario
self.cliente.menu.chat_grupal.rellenar_amigos()
elif self.cliente.current_list[-1] == "contraseña_rechazada":
print("SE RECHAZÓ!!!")
seguir_escuchando = False
self.label_3.setText("Error: Datos incorrectos")
self.label_3.setStyleSheet("QLabel {color:red}")
self.lineEdit.clear()
self.lineEdit_2.clear()
def closeEvent(self, event):
self.hide()
self.cliente.disconnect()
formulario_3 = uic.loadUiType("registro.ui")
class MenuRegistro(formulario_3[0], formulario_3[1]):
def __init__(self, cliente, menu_inicial):
super().__init__()
self.cliente = cliente
self.menu_inicial = menu_inicial
self.setupUi(self)
self.pushButton.clicked.connect(self.continuar)
def continuar(self):
usuario = self.lineEdit.text()
contraseña = self.lineEdit_2.text()
confirmacion = self.lineEdit_3.text()
if len(usuario) == 0 or len(contraseña) == 0 or len(confirmacion) == 0:
self.label_4.setText("Error: Falta información")
self.label_4.setStyleSheet("QLabel {color:red}")
self.lineEdit.clear()
self.lineEdit_2.clear()
self.lineEdit_3.clear()
elif contraseña != confirmacion:
self.label_4.setText("Error: Contraseñas no coinciden")
self.label_4.setStyleSheet("QLabel {color:red}")
self.lineEdit.clear()
self.lineEdit_2.clear()
self.lineEdit_3.clear()
else:
self.lineEdit_2.clear()
self.lineEdit_3.clear()
self.cliente.send_message_to_server("nuevo_usuario:" + usuario + "," + contraseña)
self.hide()
self.menu_inicial.show()
def closeEvent(self, event):
self.hide()
self.cliente.disconnect()
formulario_4 = uic.loadUiType("seleccion_sala.ui")
class SeleccionSala(formulario_4[0], formulario_4[1]):
def __init__(self, cliente, menu_inicial):
super().__init__()
self.cliente = cliente
self.menu_inicial = menu_inicial
self.setupUi(self)
self.pushButton.clicked.connect(self.entrar_1)
self.pushButton_2.clicked.connect(self.entrar_2)
self.pushButton_3.clicked.connect(self.entrar_3)
def entrar_1(self):
self.cliente.sala = "sala_1:::"
self.hide()
self.menu_inicial.chat_grupal.show()
self.cliente.send_message_to_server("empieza_partida:")
def entrar_2(self):
self.cliente.sala = "sala_2:::"
self.hide()
self.menu_inicial.chat_grupal.show()
self.cliente.send_message_to_server("empieza_partida:")
def entrar_3(self):
self.cliente.sala = "sala_3:::"
self.hide()
self.menu_inicial.chat_grupal.mostrar_minichat_grup()
| [
"[email protected]"
]
| |
7dc6e352a48eddfe374a1dd70cd502f92368190a | 3b376850dd360e0eaeb4f710bbec30cd2e4f8f4a | /users/urls.py | b898aae8777a3f975e96f05e2fde286668f9b9ab | []
| no_license | nielHNIA/Crm-project | 409dfc28e0086a7ade36972d4043e92ac29badf7 | 6f342a83652fe210ad76ba6322dc11e1df710689 | refs/heads/master | 2023-02-05T04:58:51.572334 | 2021-01-02T02:57:06 | 2021-01-02T02:57:06 | 326,096,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | from django.urls import path, include
app_name = 'users'
from . import views
urlpatterns = [
path('', include('django.contrib.auth.urls')),
path('register/', views.register, name='register'),
]
| [
"[email protected]"
]
| |
d793aff26fdb5850f8ce22d236f87142a4d570d6 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/word-count/67602c62088e448aa6899a2feae08bb8.py | 9742b7715ac57210a724a124215699112573a4b4 | []
| no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 256 | py | #
# word-count exercise
# iter. 2
#
def word_count(phrase):
wordlist = phrase.split()
#output in dictionary format to past the test
output = {}
for x in wordlist :
if x not in output :
output [x] = 1
else : output [x] += 1
return output
| [
"[email protected]"
]
| |
9195a76592f3dbac3dc38ac9aa7b3815503c4e68 | db7601406ea38e0b361d9a1c54ba640ae9b132eb | /10494 If We Were a Child Again.py | e91933043b3c91fcfde5c909009fb177d10bb283 | []
| no_license | FalseF/Algorithms-and-Problem-Solving-with-Python | c06c049d7499df76795eac8b82d8f5aebe126109 | d53ee80da5ff865eef05bbe280bdc68dae4f275d | refs/heads/master | 2023-07-17T06:24:47.918286 | 2021-09-06T16:32:30 | 2021-09-06T16:32:30 | 403,690,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py |
n=12
ck=95.123
r=1
while(n):
n=n-1
r*=ck
print(r)
| [
"[email protected]"
]
| |
61cfada2daa7561decdbe434fcd476886f36a64b | a884525000ecec4e14e51ffa2a3f7ddc7d0e516e | /Intermediate/Day 39/Calling constructor from outside.py | a6f477b71a38316d634ee5311103b5d78d421ff8 | []
| no_license | Dong2Yo/Learning-Python | 0fac4437126da0b2351a4212f6bb1355df87724e | 8599c3a97afa2dc3fcb4e72c654e90f4b1848fe6 | refs/heads/main | 2023-08-25T03:46:08.660780 | 2021-10-17T10:50:22 | 2021-10-17T10:50:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | class sample:
def __init__(self):
print("Class instantiated")
mysample=sample()
mysample.__init__()
| [
"[email protected]"
]
| |
8f77c9a2fe285d78dd8306f572f66615be551d8d | 0bfe2290ecb7212f03c5a36d6de2a0441423fbd2 | /weekly-stats.py | dd9241143a82f6ab66e7a36a98d6f5fb88029e46 | [
"MIT"
]
| permissive | ca4ti/call-stats | 5e4bd482cf883378c6e1c82c809c410415d2a891 | 7413636918f438043a00158bd26a777002acdbfc | refs/heads/master | 2023-02-25T05:23:45.462700 | 2016-08-15T11:20:10 | 2016-08-15T11:20:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | from asternic_email import CallStats
import local_settings
import optparse
import arrow
last_week = arrow.get().replace(days=-7).format('YYYY-MM-DD')
help_txt = (
"Date inside of the week you wish to create a"
" report for. For example for last week use {}").format(last_week)
parser = optparse.OptionParser()
parser.add_option(
'-s', dest='start_of_week', default=arrow.get().format('YYYY-MM-DD'),
nargs=1, help=help_txt)
opts, remainder = parser.parse_args()
stats = CallStats(local_settings.EXTENSIONS)
stats.set_week(opts.start_of_week)
stats.connect_smtp(
local_settings.SMTP_SERVER,
local_settings.SMTP_USER,
local_settings.SMTP_PASSWORD)
stats.fetch_stats()
print stats.stats
stats.generate_emails('Your weekly call stats are as follows:')
print ">>> DONE!"
| [
"[email protected]"
]
| |
83f0a7ab3cbfab9a992ced7a1692165995ce681f | 3ea7513732b5c38d485a2be3d327a7c079329331 | /crudapi/views.py | bbfff5dd59057df93a1d62902cb712355050c851 | []
| no_license | manojpraveen101/api | 285e4a992419333ead98f33c333b47975926dfe6 | 46defb852bf013c86b73889b16ebfca36fe9eb5a | refs/heads/master | 2023-03-16T16:37:58.938862 | 2021-03-08T13:16:46 | 2021-03-08T13:16:46 | 345,570,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,620 | py | import logging
from django.core.exceptions import ObjectDoesNotExist, EmptyResultSet
from django.http.response import JsonResponse, HttpResponse
from django.shortcuts import render
from rest_framework.decorators import api_view
from rest_framework.parsers import JSONParser
from crudapi.models import Employee
from crudapi.serializers import EmployeeSerializer
logger = logging.getLogger(__name__)
def setcookie(request):
response = HttpResponse("Cookie Set")
response.set_cookie('firstname', 'praveen')
return response
def getcookie(request):
value = request.COOKIES['firstname']
return HttpResponse("firstname is : "+ value)
@api_view(['GET', 'POST', 'DELETE', 'PUT'])
def employee_list(request):
if request.method == 'GET':
logger.debug("inside get method")
employee = Employee.objects.all()
firstname = request.GET.get('firstname',None)
if firstname is not None:
logger.debug("firstname is {}".format(firstname))
employee = employee.filter(firstname=firstname)
employee_serializer = EmployeeSerializer(employee, many=True)
return JsonResponse(employee_serializer.data, safe=False)
elif request.method == 'POST':
logger.debug("inside post method")
employee_data = JSONParser().parse(request)
employee_serializer = EmployeeSerializer(data=employee_data)
if employee_serializer.is_valid():
employee_serializer.save()
return JsonResponse({"message": "valid"})
else:
return JsonResponse({ "message":"not valid"})
elif request.method == 'DELETE':
logger.debug("inside delete method")
firstname = request.GET.get('firstname', None)
employee = Employee.objects.filter(firstname=firstname)
if firstname is not None:
employee.delete()
return JsonResponse({"message":"deleted successfully"})
else:
return JsonResponse({"message":"deletion not successful"})
elif request.method == 'PUT':
logger.debug("inside put method")
employee_data = JSONParser().parse(request)
firstname = employee_data.get("firstname")
if firstname is not None:
employee = Employee.objects.filter(firstname=firstname)
employee.update(**employee_data)
return JsonResponse({"message":"updated successful"})
else:
return JsonResponse({"message":"update failed"})
# try:
# employee = TEmployee.objects.get(id=10)
# except TEmployee.DoesNotExist:
# return HttpResponse("EXCEPTION") | [
"[email protected]"
]
| |
47eee066e6b2436acfd0b25a8c700664ca98fd5c | a4deea660ea0616f3b5ee0b8bded03373c5bbfa2 | /executale_binaries/register-variants/pshufb_xmm_xmm.gen.vex.py | e4c89ba1279d236c1e25139f95a869d267887f3b | []
| no_license | Vsevolod-Livinskij/x86-64-instruction-summary | 4a43472e26f0e4ec130be9a82f7e3f3c1361ccfd | c276edab1b19e3929efb3ebe7514489f66087764 | refs/heads/master | 2022-02-02T18:11:07.818345 | 2019-01-25T17:19:21 | 2019-01-25T17:19:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 158 | py | import angr
proj = angr.Project('pshufb_xmm_xmm.exe')
print proj.arch
print proj.entry
print proj.filename
irsb = proj.factory.block(proj.entry).vex
irsb.pp() | [
"[email protected]"
]
| |
c152fe84a4529c8caaabf63186e0a65476cfe50a | 7e963835ac072178b415c992cccc24c161dad32f | /venv/bin/sqlformat | 338a7f966051794ea80430265b4b37042139f480 | []
| no_license | urmi6750/new-djnago | cf47d5796007c7e1d15de71765fb1fd76fc44dfb | 3840ba2e4f2e2790698b0d3537734d1daa7e051f | refs/heads/master | 2021-05-17T16:06:09.610551 | 2020-04-01T20:13:32 | 2020-04-01T20:13:32 | 250,861,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | #!/home/fariya/PycharmProjects/untitled/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
a1b304081c5ddb28ccb5c07f0347c0e5a72b93f7 | 1ec86f822bb30ea18f992bfc0badc818423a2161 | /venv/Scripts/pip3-script.py | 21c1769b7a28819993aaa80e796fd701f1350e65 | []
| no_license | urbrob/strona | 58ad912529a4148855fa171b0d0966c090f691f4 | 4c5962fe05e1460197510ce38b820495bcf067b9 | refs/heads/master | 2020-04-27T08:38:04.563497 | 2019-03-06T07:55:04 | 2019-03-06T07:55:04 | 174,178,786 | 0 | 0 | null | 2019-03-06T16:17:06 | 2019-03-06T16:17:05 | null | UTF-8 | Python | false | false | 412 | py | #!C:\Users\sylwi\PycharmProjects\strona\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
]
| |
396d25013a696593483857993a2a2f54c29edbf4 | ae3ddb20e33ea0e6b3af8d738532b1b8c21e2e74 | /scripts/genome_assembly_annotate.py | 9dee024eb9a39f6215b58f3bfe217da1364bcb11 | []
| no_license | TonyMannion/Microbial-Comparative-Genomics | dded0e8e5a1cf4101bf76ca0fff2ddfc0c764d39 | 33067214fa248badc0d2a3e0bace417163c1d6a7 | refs/heads/master | 2021-07-14T12:35:01.463328 | 2021-02-15T14:51:15 | 2021-02-15T14:51:15 | 236,011,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,811 | py | import os
import time
import glob
import argparse
import numpy as np
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--username', dest='username', help='Provide username for PATRIC account. Prompt to enter password will appear.')
parser.add_argument('-m','--metadata_file',dest='metadata_file',help='Specify metadata file.')
parser.add_argument('-f', '--upload_files', dest='upload_files', default = 'yes', help='Upload read and/or contig files? Enter "yes" or "no". Default is "yes". If file with same name has already been upload to PATRIC, it will be overwritten by file upload here.')
parser.add_argument('-a', '--assembly_annotate', dest='assembly_annotate', default = 'yes', help='Execute assembly and annotate pipeline? Enter "yes" or "no". Default is "yes".')
parser.add_argument('-c', '--check_job', dest='check_job', default = 'yes', help='Check status of assemlby/annotation job? Enter "yes" or "no". Default is "yes". When job is complete, genome reports, contigs, and annotations data will be downloaded to output folder.')
parser.add_argument('-d', '--download_reports', dest='download_reports', default = 'yes', help='Download genome reports, contigs, and annotations data for assembled/annotated genomes? Enter "yes" or "no". Default is "no". Use this flag to download data from previously completed jobs.')
parser.add_argument('-o', '--output_folder', dest='output_folder', help='Specify output folder for downloaded data.')
args=parser.parse_args()
#login
print 'Enter password to log into PATRIC...'
os.system('p3-login ' + str(args.username) + ' > patric_domain_temp_genome_assemlby_annotation.txt')
patric_domain = open('patric_domain_temp_genome_assemlby_annotation.txt', "rb").readlines()[1].replace('Logged in with username ', '').rstrip()
#upload data
if str(args.upload_files) == 'yes':
df_reads = pd.read_csv(str(args.metadata_file), sep='\t', usecols=['R1','R2','genome_name_reads'])
R1_list = df_reads['R1'].dropna().tolist()
R2_list = df_reads['R2'].dropna().tolist()
df_contigs = pd.read_csv(str(args.metadata_file), sep='\t', usecols=['contigs','genome_name_contigs'])
contigs_list = df_contigs['contigs'].dropna().tolist()
for R1 in R1_list:
print 'Uploading ' + str(R1) + ' to PATRIC...'
os.system('p3-cp ' + str(R1) + ' ws:/' + str(patric_domain) + '/home/AssemblyJob -f')
for R2 in R2_list:
print 'Uploading ' + str(R2) + ' to PATRIC...'
os.system('p3-cp ' + str(R2) + ' ws:/' + str(patric_domain) + '/home/AssemblyJob -f')
for contigs in contigs_list:
print 'Uploading ' + str(contigs) + ' to PATRIC...'
os.system('p3-cp ' + str(contigs) + ' ws:/' + str(patric_domain) + '/home/AssemblyJob -f')
#assembly annotate
if str(args.assembly_annotate) == 'yes':
#reads
df_reads = pd.read_csv(str(args.metadata_file), sep='\t', usecols=['R1','R2','genome_name_reads']).replace(' ','_', regex=True)
R1_list = df_reads['R1'].dropna().tolist()
R2_list = df_reads['R2'].dropna().tolist()
genome_name_list_reads = df_reads['genome_name_reads'].dropna().tolist()
zip(R1_list,R2_list,genome_name_list_reads)
for R1, R2, genome_name in zip(R1_list,R2_list,genome_name_list_reads):
in_file = open('params_reads.json', "rb")
out_file = open('params_reads_out_temp_genome_assemlby_annotation.json', "wb")
reader = in_file.read()
repls1= (('R1', '/' + str(patric_domain) + '/home/AssemblyJob/' + str(R1)),('R2', '/' + str(patric_domain) + '/home/AssemblyJob/' + str(R2)),('Genome_name_path', '/' + str(patric_domain) + '/home/AssemblyJob'),('Genome_name',str(genome_name)),)
writer1 = reduce(lambda a, kv: a.replace(*kv), repls1, reader)
writer2 = out_file.write(writer1)
in_file.close()
out_file.close()
os.system('appserv-start-app ComprehensiveGenomeAnalysis params_reads_out_temp_genome_assemlby_annotation.json \"[email protected]/home/\"'+ ' > ' + str(genome_name) + '_job_ID_temp_genome_assemlby_annotation.txt')
job_id = open(str(genome_name) + '_job_ID_temp_genome_assemlby_annotation.txt', "rb").readline().replace('Started task ', '').rstrip()
print "Comprehensive Genome Analysis job sent for " + str(genome_name) + ' as job id ' + job_id
#contigs
df_contigs = pd.read_csv(str(args.metadata_file), sep='\t', usecols=['contigs','genome_name_contigs']).replace(' ','_', regex=True)
contigs_list = df_contigs['contigs'].dropna().tolist()
genome_name_list_contigs = df_contigs['genome_name_contigs'].dropna().tolist()
zip(contigs_list,genome_name_list_contigs)
for contigs, genome_name in zip(contigs_list,genome_name_list_contigs):
in_file = open('params_contigs.json', "rb")
out_file = open('params_contigs_out_temp_genome_assemlby_annotation.json', "wb")
reader = in_file.read()
repls1= (('contigs_path', '/' + str(patric_domain) + '/home/AssemblyJob/' + str(contigs)),('out_path', '/' + str(patric_domain) + '/home/AssemblyJob'),('Genome_name',str(genome_name)))
writer1 = reduce(lambda a, kv: a.replace(*kv), repls1, reader)
writer2 = out_file.write(writer1)
in_file.close()
out_file.close()
os.system('appserv-start-app ComprehensiveGenomeAnalysis params_contigs_out_temp_genome_assemlby_annotation.json \"[email protected]/home/\"'+ ' > ' + str(genome_name) + '_job_ID_temp_genome_assemlby_annotation.txt')
job_id = open(str(genome_name) + '_job_ID_temp_genome_assemlby_annotation.txt', "rb").readline().replace('Started task ', '').rstrip()
print "Comprehensive Genome Analysis job sent for " + str(genome_name) + ' as job id ' + job_id
#check job
if str(args.check_job) == 'yes':
df_reads = pd.read_csv(str(args.metadata_file), sep='\t', usecols=['R1','R2','genome_name_reads']).replace(' ','_', regex=True)
genome_name_list_reads = df_reads['genome_name_reads'].dropna().tolist()
df_contigs = pd.read_csv(str(args.metadata_file), sep='\t', usecols=['contigs','genome_name_contigs']).replace(' ','_', regex=True)
genome_name_list_contigs = df_contigs['genome_name_contigs'].dropna().tolist()
genome_name_list = genome_name_list_reads + genome_name_list_contigs
for genome_name in genome_name_list:
job_id2 = open(str(genome_name) + '_job_ID_temp_genome_assemlby_annotation.txt', "rb").readline().replace('Started task ', '').rstrip()
while True:
os.system('p3-job-status' + ' ' + job_id2 + ' > ' + str(genome_name) + '_job_status_temp_genome_assemlby_annotation.txt')
job_id_status = open(str(genome_name) + '_job_status_temp_genome_assemlby_annotation.txt', "rb").readline().rstrip()
print 'Checking status of ' + str(genome_name) + ' as job id ' + job_id_status
t = time.localtime()
current_time = time.strftime('%H:%M:%S', t)
print 'Current time: ' + current_time
if job_id_status == job_id2 + ': completed':
break
time.sleep(300) #check status of first jobs every 300 seconds (ie 5 minutes)
print 'Comprehensive Genome Analysis done for ' + str(genome_name)
#download data
if not os.path.exists(str(args.output_folder)):
os.mkdir(str(args.output_folder))
os.system('p3-cp ws:\"/' + str(patric_domain) + '/home/AssemblyJob/.' + str(genome_name) + '/FullGenomeReport.html\"' + ' ' + str(args.output_folder) +'/'+str(genome_name) + '_FullGenomeReport.html')
os.system('p3-cp ws:\"/' + str(patric_domain) + '/home/AssemblyJob/.' + str(genome_name) + '/.annotation/annotation.contigs.fasta\"' + ' ' + str(args.output_folder) +'/'+str(genome_name) + '_contigs.fasta')
os.system('p3-cp ws:\"/' + str(patric_domain) + '/home/AssemblyJob/.' + str(genome_name) + '/.annotation/annotation.txt\"' + ' ' + str(args.output_folder) +'/'+str(genome_name) + '_annotation.txt')
os.system('p3-cp ws:\"/' + str(patric_domain) + '/home/AssemblyJob/.' + str(genome_name) + '/.annotation/annotation.feature_protein.fasta\"' + ' ' + str(args.output_folder) +'/'+str(genome_name) + '_protein.fasta')
os.system('p3-cp ws:\"/' + str(patric_domain) + '/home/AssemblyJob/.' + str(genome_name) + '/.annotation/annotation.feature_dna.fasta\"' + ' ' + str(args.output_folder) +'/'+str(genome_name) + '_DNA.fasta')
#add column with genome name
df = pd.read_csv(str(args.output_folder) +'/'+str(genome_name) + '_annotation.txt', sep='\t')
df['genome_name']=str(genome_name)
column_order = ['genome_name','contig_id','feature_id','type','location','start','stop','strand','function','aliases','plfam','pgfam','figfam','evidence_codes','nucleotide_sequence','aa_sequence']
df[column_order].to_csv(str(args.output_folder) +'/'+str(genome_name) + '_annotation.txt', sep='\t', index=False)
#download data
if str(args.download_reports) == 'yes':
df_reads = pd.read_csv(str(args.metadata_file), sep='\t', usecols=['R1','R2','genome_name_reads']).replace(' ','_', regex=True)
genome_name_list_reads = df_reads['genome_name_reads'].dropna().tolist()
df_contigs = pd.read_csv(str(args.metadata_file), sep='\t', usecols=['contigs','genome_name_contigs']).replace(' ','_', regex=True)
genome_name_list_contigs = df_contigs['genome_name_contigs'].dropna().tolist()
genome_name_list = genome_name_list_reads + genome_name_list_contigs
for genome_name in genome_name_list:
if not os.path.exists(str(args.output_folder)):
os.mkdir(str(args.output_folder))
os.system('p3-cp ws:\"/' + str(patric_domain) + '/home/AssemblyJob/.' + str(genome_name) + '/FullGenomeReport.html\"' + ' ' + str(args.output_folder) +'/'+str(genome_name) + '_FullGenomeReport.html')
os.system('p3-cp ws:\"/' + str(patric_domain) + '/home/AssemblyJob/.' + str(genome_name) + '/.annotation/annotation.contigs.fasta\"' + ' ' + str(args.output_folder) +'/'+str(genome_name) + '_contigs.fasta')
os.system('p3-cp ws:\"/' + str(patric_domain) + '/home/AssemblyJob/.' + str(genome_name) + '/.annotation/annotation.txt\"' + ' ' + str(args.output_folder) +'/'+str(genome_name) + '_annotation.txt')
os.system('p3-cp ws:\"/' + str(patric_domain) + '/home/AssemblyJob/.' + str(genome_name) + '/.annotation/annotation.feature_protein.fasta\"' + ' ' + str(args.output_folder) +'/'+str(genome_name) + '_protein.fasta')
os.system('p3-cp ws:\"/' + str(patric_domain) + '/home/AssemblyJob/.' + str(genome_name) + '/.annotation/annotation.feature_dna.fasta\"' + ' ' + str(args.output_folder) +'/'+str(genome_name) + '_DNA.fasta')
#add column with genome name
df = pd.read_csv(str(args.output_folder) +'/'+str(genome_name) + '_annotation.txt', sep='\t')
df['genome_name']=str(genome_name)
column_order = ['genome_name','contig_id','feature_id','type','location','start','stop','strand','function','aliases','plfam','pgfam','figfam','evidence_codes','nucleotide_sequence','aa_sequence']
df[column_order].to_csv(str(args.output_folder) +'/'+str(genome_name) + '_annotation.txt', sep='\t', index=False)
#delete temp files
temp_filter_files=glob.glob('*_temp_genome_assemlby_annotation.txt')
for temp_file in temp_filter_files:
os.remove(str(temp_file))
| [
"[email protected]"
]
| |
d83ee3ab1824a8146fa04d80c14a0b011af7d09e | 047031e749f95f385e45629e540b002e132d8520 | /PMT/projects/migrations/0003_project_slug.py | fff1ac6e753b1efd4e5570652bdc7de0ab95156a | []
| no_license | carsonalexander14/project-management-tool | 907e968a8c2a7a2914d992f81cfe54ae83294b8a | 8987b22364e366f033ee739690598206aab00ad6 | refs/heads/master | 2022-11-26T16:37:02.772473 | 2021-04-15T17:40:47 | 2021-04-15T17:40:47 | 243,101,283 | 0 | 0 | null | 2022-11-22T07:54:47 | 2020-02-25T20:59:11 | CSS | UTF-8 | Python | false | false | 391 | py | # Generated by Django 3.0.3 on 2021-01-21 02:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0002_remove_position_related_skill'),
]
operations = [
migrations.AddField(
model_name='project',
name='slug',
field=models.SlugField(null=True),
),
]
| [
"[email protected]"
]
| |
1a7942bfa09a42c5b1e7c442e32d5b0be847dcc1 | cbb805d9efd3bd5a03a32a234c84eca06297e21c | /main.py | 6099667cb777dfff3ef82c4a1085fb2d5b133bbf | []
| no_license | kaungmyatthumdy/FlaskWithJinja | 4cac0dad018135b5165fb93aaaf8137be5552569 | 8dc403a1074b28bd500b7daa5ccef7dd68cad38f | refs/heads/master | 2020-03-22T02:53:58.546975 | 2018-07-02T08:03:58 | 2018-07-02T08:03:58 | 139,400,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | from flask import Flask, render_template
myapp = Flask(__name__)
@myapp.route("/")
def hello():
return render_template("index.html")
if __name__ == "__main__":
myapp.run(debug=True)
| [
"[email protected]"
]
| |
a06d251656d70de9cf2669ad98288f791db8af37 | 150464efa69db3abf328ef8cd912e8e248c633e6 | /_4.python/__code/Python大數據特訓班(第二版)/ch03/ch03_all.py | 5b3de4b523557a5a8656e835bb3ba0eb10899289 | []
| no_license | bunshue/vcs | 2d194906b7e8c077f813b02f2edc70c4b197ab2b | d9a994e3afbb9ea84cc01284934c39860fea1061 | refs/heads/master | 2023-08-23T22:53:08.303457 | 2023-08-23T13:02:34 | 2023-08-23T13:02:34 | 127,182,360 | 6 | 3 | null | 2023-05-22T21:33:09 | 2018-03-28T18:33:23 | C# | UTF-8 | Python | false | false | 10,672 | py | # filewrite1.py
content='''Hello Python
中文字測試
Welcome'''
f=open('file1.txt', 'w' ,encoding='utf-8', newline="")
f.write(content)
f.close()
# filewrite2.py
content='''Hello Python
中文字測試
Welcome'''
with open('file1.txt', 'w' ,encoding='utf-8', newline="") as f:
f.write(content)
# fileread1.py
with open('file1.txt', 'r', encoding='utf-8') as f:
output_str=f.read(5)
print(output_str) # Hello
# fileread2.py
with open('file1.txt', 'r', encoding ='UTF-8') as f:
print(f.readline())
print(f.readline(3))
# fileread3.py
with open('file1.txt', 'r', encoding='utf-8') as f:
content=f.readlines()
print(type(content))
print(content)
# fileread4.py
with open('file2.txt', 'r', encoding ='UTF-8') as f:
print(f.readlines())
# csv_read.py
import csv
# 開啟 csv 檔案
with open('school.csv', newline='') as csvfile:
# 讀取 csv 檔案內容
rows = csv.reader(csvfile)
# 以迴圈顯示每一列
for row in rows:
print(row)
# csv_read_dict.py
import csv
# 開啟 csv 檔案
with open('school.csv', newline='') as csvfile:
# 讀取 csv 檔內容,將每一列轉成 dictionary
rows = csv.DictReader(csvfile)
# 以迴圈顯示每一列
for row in rows:
print(row['座號'],row['姓名'],row['國文'],row['英文'],row['數學'])
# csv_write_list1.py
import csv
with open('test1.csv', 'w', newline='') as f:
# 建立 csv 檔寫入物件
writer = csv.writer(f)
# 寫入欄位及資料
writer.writerow(['座號', '姓名', '國文', '英文', '數學'])
writer.writerow([1, '葉大雄', 65, 62, 40])
writer.writerow([2, '陳靜香', 85, 90, 87])
writer.writerow([3, '王聰明', 92, 90, 95])
# csv_write_list2.py
import csv
# 建立csv二維串列資料
csvtable = [
['座號', '姓名', '國文', '英文', '數學'],
[1, '葉大雄', 65, 62, 40],
[2, '陳靜香', 85, 90, 87],
[3, '王聰明', 92, 90, 95]
]
# 寫入csv檔案
with open('test2.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(csvtable)
# csv_write_dict.py
import csv
with open('test3.csv', 'w', newline='') as csvfile:
# 定義欄位
fieldnames = ['座號', '姓名', '國文', '英文', '數學']
# 將 dictionary 寫入 csv 檔
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
# 寫入欄位名稱
writer.writeheader()
# 寫入資料
writer.writerow({'座號': 1, '姓名': '葉大雄', '國文': 65, '英文': 62, '數學': 40})
writer.writerow({'座號': 2, '姓名': '陳靜香', '國文': 85, '英文': 90, '數學': 87})
writer.writerow({'座號': 3, '姓名': '王聰明', '國文': 92, '英文': 90, '數學': 95})
# jsonload1.py
import json
class_str = """
{
"一年甲班": [
{
"座號": 1,
"姓名": "葉大雄",
"國文": 65,
"英文": 62,
"數學": 40
},
{
"座號": 2,
"姓名": "陳靜香",
"國文": 85,
"英文": 90,
"數學": 87
},
{
"座號": 3,
"姓名": "王聰明",
"國文": 92,
"英文": 90,
"數學": 95
}
]
}
"""
datas = json.loads(class_str)
print(type(datas))
for data in datas["一年甲班"]:
print(data, data['姓名'])
# jsonload2.py
import json
with open('class_str.json', 'r', encoding='utf-8') as f:
datas = json.load(f)
print(type(datas))
for data in datas["一年甲班"]:
print(data, data['姓名'])
# jsondump1.py
import json
with open('class_str.json', 'r', encoding='utf-8') as f:
datas = json.load(f)
print(datas, type(datas))
dumpdata = json.dumps(datas, ensure_ascii=False)
print(dumpdata, type(dumpdata))
# jsondump2.py
import json
with open('class_str.json', 'r', encoding='utf-8') as f:
datas = json.load(f)
with open('new_class_str.json', 'w', encoding='utf-8') as f:
dumpdata = json.dump(datas, f, ensure_ascii=False)
# xlsx_write.py
import openpyxl
# 建立一個工作簿
workbook=openpyxl.Workbook()
# 取得第 1 個工作表
sheet = workbook.worksheets[0]
# 以儲存格位置寫入資料
sheet['A1'] = '一年甲班'
# 以串列寫入資料
listtitle=['座號', '姓名', '國文', '英文', '數學']
sheet.append(listtitle)
listdatas=[[1, '葉大雄', 65, 62, 40],
[2, '陳靜香', 85, 90, 87],
[3, '王聰明', 92, 90, 95]]
for listdata in listdatas:
sheet.append(listdata)
# 儲存檔案
workbook.save('test.xlsx')
# xlsx_read.py
import openpyxl
# 讀取檔案
workbook = openpyxl.load_workbook('test.xlsx')
# 取得第 1 個工作表
sheet = workbook.worksheets[0]
# 取得指定儲存格
print(sheet['A1'], sheet['A1'].value)
# 取得總行、列數
print(sheet.max_row, sheet.max_column)
# 顯示 cell資料
for i in range(1, sheet.max_row+1):
for j in range(1, sheet.max_column+1):
print(sheet.cell(row=i, column=j).value,end=" ")
print()
sheet['A1'] = '二年甲班'
workbook.save('test.xlsx')
# sqlite_cursor.py
import sqlite3
conn = sqlite3.connect('school.db') # 建立資料庫連線
cursor = conn.cursor() # 建立 cursor 物件
# 建立一個資料表
sqlstr='''CREATE TABLE IF NOT EXISTS scores \
("id" INTEGER PRIMARY KEY NOT NULL,
"name" TEXT NOT NULL,
"chinese" INTEGER NOT NULL,
"english" INTEGER NOT NULL,
"math" INTEGER NOT NULL
)
'''
cursor.execute(sqlstr)
# 新增記錄
cursor.execute('insert into scores values(1, "葉大雄", 65, 62, 40)')
cursor.execute('insert into scores values(2, "陳靜香", 85, 90, 87)')
cursor.execute('insert into scores values(3, "王聰明", 92, 90, 95)')
conn.commit() # 更新
conn.close() # 關閉資料庫連線
# sqlite_crud1.py
import sqlite3
conn = sqlite3.connect('school.db') # 建立資料庫連線
# 建立一個資料表
sqlstr='''CREATE TABLE IF NOT EXISTS scores \
("id" INTEGER PRIMARY KEY NOT NULL,
"name" TEXT NOT NULL,
"chinese" INTEGER NOT NULL,
"english" INTEGER NOT NULL,
"math" INTEGER NOT NULL
)
'''
conn.execute(sqlstr)
conn.commit() # 更新
conn.close() # 關閉資料庫連線
# sqlite_crud2.py
import sqlite3
conn = sqlite3.connect('school.db') # 建立資料庫連線
# 定義資料串列
datas = [[1,'葉大雄',65,62,40],
[2,'陳靜香',85,90,87],
[3,'王聰明',92,90,95]]
# 新增資料
for data in datas:
conn.execute("INSERT INTO scores (id, name, chinese, english, math) VALUES \
({}, '{}', {}, {}, {})".format(data[0], data[1], data[2], data[3], data[4]))
conn.commit() # 更新
conn.close() # 關閉資料庫連線
# sqlite_crud3.py
import sqlite3
conn = sqlite3.connect('school.db') # 建立資料庫連線
# 更新資料
conn.execute("UPDATE scores SET name='{}' WHERE id={}".format('林胖虎', 1))
conn.commit() # 更新
conn.close() # 關閉資料庫連線
# sqlite_crud4.py
import sqlite3
conn = sqlite3.connect('school.db') # 建立資料庫連線
# 刪除資料
conn.execute("DELETE FROM scores WHERE id={}".format(1))
conn.commit() # 更新
conn.close() # 關閉資料庫連線
# fetchall.py
import sqlite3
conn = sqlite3.connect('school.db') # 建立資料庫連線
cursor = conn.execute('select * from scores')
rows = cursor.fetchall()
# 顯示原始資料
print(rows)
# 逐筆顯示資料
for row in rows:
print(row[0],row[1])
conn.close() # 關閉資料庫連線
# fetchone.py
import sqlite3
conn = sqlite3.connect('school.db') # 建立資料庫連線
cursor = conn.execute('select * from scores')
row = cursor.fetchone()
print(row[0], row[1])
conn.close() # 關閉資料庫連線
# mysqltable.py
import pymysql
conn = pymysql.connect('localhost',port=3306,user='root',passwd='1234',charset='utf8', db='pythondb') #連結資料庫
with conn.cursor() as cursor:
sql = """
CREATE TABLE IF NOT EXISTS Scores (
ID int NOT NULL AUTO_INCREMENT PRIMARY KEY,
Name varchar(20),
Chinese int(3),
English int(3),
Math int(3)
);
"""
cursor.execute(sql) #執行SQL指令
conn.commit() #提交資料庫
conn.close()
# mysqlinsert.py
import pymysql
conn = pymysql.connect('localhost',port=3306,user='root',passwd='1234',charset='utf8', db='pythondb') #連結資料庫
with conn.cursor() as cursor:
sql = """
insert into scores (Name, Chinese, English, Math) values
('葉大雄',65,62,40),
('陳靜香',85,90,87),
('王聰明',92,90,95)
"""
cursor.execute(sql)
conn.commit() #提交資料庫
conn.close()
# mysqlquery.py
import pymysql
conn = pymysql.connect('localhost',port=3306,user='root',passwd='1234',charset='utf8', db='pythondb') #連結資料庫
with conn.cursor() as cursor:
sql = "select * from scores"
cursor.execute(sql)
datas = cursor.fetchall() # 取出所有資料
print(datas)
print('-' * 30) # 畫分隔線
sql = "select * from scores"
cursor.execute(sql)
data = cursor.fetchone() # 取出第一筆資料
print(data)
conn.close()
# mysqlupdate.py
import pymysql
conn = pymysql.connect('localhost',port=3306,user='root',passwd='1234',charset='utf8', db='pythondb') #連結資料庫
with conn.cursor() as cursor:
sql = "update scores set Chinese = 98 where ID = 3"
cursor.execute(sql)
conn.commit()
sql = "select * from scores where ID = 3"
cursor.execute(sql)
data = cursor.fetchone()
print(data)
conn.close()
# mysqldelete.py
import pymysql
conn = pymysql.connect('localhost',port=3306,user='root',passwd='1234',charset='utf8', db='pythondb') #連結資料庫
with conn.cursor() as cursor:
sql = "delete from scores where ID = 3"
cursor.execute(sql)
conn.commit()
sql = "select * from scores"
cursor.execute(sql)
data = cursor.fetchall()
print(data)
conn.close()
# LinkGoogleSheet.py
import gspread
from oauth2client.service_account import ServiceAccountCredentials as sac
# 設定金鑰檔路徑及驗證範圍
auth_json = 'PythonConnectGsheet1-6a6086d149c5.json'
gs_scopes = ['https://spreadsheets.google.com/feeds']
# 連線資料表
cr = sac.from_json_keyfile_name(auth_json, gs_scopes)
gc = gspread.authorize(cr)
# 開啟資料表
spreadsheet_key = '1OihpM657yWo1lc3RjskRfZ8m75dCPwL1IPwoDXSvyzI'
sheet = gc.open_by_key(spreadsheet_key)
# 開啟工作簿
wks = sheet.sheet1
# 清除所有內容
wks.clear()
# 新增列
listtitle=['座號', '姓名', '國文', '英文', '數學']
wks.append_row(listtitle) # 標題
listdatas=[[1, '葉大雄', 65, 62, 40],
[2, '陳靜香', 85, 90, 87],
[3, '王聰明', 92, 90, 95]]
for listdata in listdatas:
wks.append_row(listdata) # 資料內容
| [
"[email protected]"
]
| |
199aa3a40db1995b68f6f7db614edf3060fc6fa3 | 925293c229ea4e51ede2f00964e1d474693ed726 | /neuralnet.py | bd27014f8dd8db09adb7dafa5205eeb3186dfdd2 | []
| no_license | jessprim/IntroNeuralNet | 4041a53fc07c9c7dfedbb4d74f21f827f570a0d0 | e7693445d1e9aa7b05e9ad96e4f57175545b8181 | refs/heads/master | 2021-04-30T00:07:10.068851 | 2018-02-15T04:42:05 | 2018-02-15T04:42:05 | 121,570,225 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | # Load Dependency:
import numpy as np
# Activation Function:
def activate(x,deriv=False):
if(deriv==True):
return x*(1-x)
return 1/(1+np.exp(-x))
# Input Dataset:
_input = np.array([ [0,0,1],
[0,1,1],
[1,0,1],
[1,1,1] ])
# Output Dataset
output = np.array([[0,0,1,1]]).T
# Set Seed - For reproducibility
np.random.seed(5)
# Synaptic Random Weights With Mean 0
synapse = 2*np.random.random((3,1)) - 1
# Putting It Together:
for j in xrange(70000):
# Layers
# Forward propagation
layer1 = _input
layer2 = activate(np.dot(layer1,synapse))
# Calculate Error (Actual - Predicted Values)
error = output - layer2
# Multiply The Error By Slope of The Sigmoids (Here we calculate the Sigmoid's derivative)
delta = error * activate(layer2,True)
# Update Weights
synapse += np.dot(layer1.T,delta)
print "Output After Training:"
print layer2
| [
"[email protected]"
]
| |
f4b7beae000280cf87847699e43e1a68ed71e434 | c8dbd229195a384ae8e4e671a6a59b67352de167 | /python_rest/venv/bin/easy_install | 8abed8dd358f7d3df9082c4db730064e18483fd6 | []
| no_license | shinde-prasad/booklist-oops-js | a97990c9eb4234929538c11b8831359a1853fc8b | 99b4a5c9c12a4bc08728f8805215a27a53e1646e | refs/heads/master | 2020-03-29T15:20:54.691354 | 2018-09-27T11:49:18 | 2018-09-27T11:49:18 | 150,058,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 313 | #!/mnt/D0A2B33FA2B328BC/Frontend-Workspace/VS-workspace/oop-book-list/python_rest/venv/bin/python2
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
453bf4ec36013630bd101b835594ecaf92567b0e | 0146c1c427453f170ba0cb1b76a8ac1f1d2b194c | /juggle.py | 0bde64e97df1c342e13eedbf82485daa1d5a2d81 | []
| no_license | ismarou/isaacgym_sandbox | ee7d71e5702cfdc1eda7d57b2cce8c8d6c7ea7de | 30368cb37d07bfdce45b2fbb3ba3ec86d66cdc95 | refs/heads/master | 2023-04-28T06:46:50.570236 | 2021-05-18T08:53:08 | 2021-05-18T08:53:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,097 | py | """
[email protected]
"""
import matplotlib.pyplot as plt
import numpy as np
import time
from policies import CubicMP, PiecewiseMP
from envs import IsaacSim, IsaacJugglingWam4
from ereps import eREPS
# stroke based movement primitive to get going
poss_stroke = np.array([[- 0.10, 1.20, 0.0, 1.25],
[- 0.09, 1.01, 0.0, 0.99],
[+ 0.08, 1.16, 0.0, 1.19],
[+ 0.112, 0.95, 0.0, 1.09],
[- 0.08, 1.18, 0.0, 1.22]])
vels_stroke = np.zeros_like(poss_stroke)
times_stroke = np.array([0.095, 0.475, 0.57, 0.95])
# cyclic movement primitive
poss_cyclic = np.array([[- 0.08, 1.19, 0.0, 1.22],
[- 0.12, 1.03, 0.0, 0.95],
[+ 0.08, 1.19, 0.0, 1.22],
[+ 0.12, 1.03, 0.0, 0.95]])
vels_cyclic = np.zeros_like(poss_cyclic)
times_cyclic = np.array([0.095, 0.475, 0.57, 0.95])
# visible parameters to be trained
param_names = ['pos1_act0_s', 'pos1_act1_s', 'pos1_act3_s',
'pos2_act0_s', 'pos2_act1_s', 'pos2_act3_s',
'pos3_act0_s', 'pos3_act1_s', 'pos3_act3_s',
't1_s', 't2_s', 't3_s',
'pos0_act0_c', 'pos0_act1_c', 'pos0_act3_c',
'pos1_act0_c', 'pos1_act1_c', 'pos1_act3_c',
'pos2_act0_c',
'pos3_act0_c',
't1_c']
# cosntraints on hidden parameters
constraints = {'equal': [['pos4_act0_s', 'pos0_act0_c'],
['pos4_act1_s', 'pos0_act1_c'],
['pos4_act3_s', 'pos0_act3_c'],
['pos2_act1_c', 'pos0_act1_c'],
['pos2_act3_c', 'pos0_act3_c'],
['pos3_act1_c', 'pos1_act1_c'],
['pos3_act3_c', 'pos1_act3_c']],
'offset': [['t3_c', 't1_c', 0.475]],
'mirror': []}
def reward(sim):
rewards = np.zeros(len(sim.envs))
for i in range(len(sim.envs)):
env = sim.envs[i]
x_balls = env.get_ball_positions()
heigt_balls = np.array([x.z for x in x_balls])
if min(heigt_balls) > 0.5:
rewards[i] = 1
return rewards
class Func:
""" interface for eREPS to evaluate a batch of parameters """
def __init__(self, sim, policies):
self.sim = sim
self.policies = policies
self.dm_act = len(policies[0].get_params())
self.nb_envs = len(self.sim.envs)
self.dt = self.sim.envs[0].dt
def eval(self, params_batch):
q = []
dq = []
tau = []
for i in range(self.nb_envs):
self.policies[i].set_params(params_batch[i])
q0, _, _ = self.policies[i].get_action(0)
self.sim.reset(q=q0)
kt = 0
returns = np.zeros(self.nb_envs)
for _ in range(1000):
for k_envs in range(self.nb_envs):
q_des, dq_des, tau_des = self.policies[k_envs].get_action(kt*self.dt)
self.sim.envs[k_envs].apply_action(q_des, dq_des, tau_des)
kt = self.sim.step()
q.append(self.sim.envs[0].pos)
dq.append(self.sim.envs[0].vel)
returns += reward(self.sim)
return returns
def main():
nb_envs = 30
policies = []
for _ in range(nb_envs):
policy_stroke = CubicMP(poss_stroke, vels_stroke, times_stroke, cyclic=False, id='s')
policy_cyclic = CubicMP(poss_cyclic, vels_cyclic, times_cyclic, cyclic=True, id='c')
intervals = np.array([policy_stroke.duration, np.inf])
policy = PiecewiseMP([policy_stroke, policy_cyclic], intervals, visible_params=param_names, constraints=constraints)
policies.append(policy)
sim = IsaacSim(IsaacJugglingWam4, num_envs=nb_envs)
func = Func(sim, policies)
mu0 = policies[0].get_params()
# reps = eREPS(func=func, n_episodes=nb_envs, kl_bound=20, mu0=mu0, cov0=5e-4)
# reps.run(10, verbose=True)
func.eval(np.tile(mu0, (nb_envs, 1)))
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
28c7b91ebf60af1184a4ec46854dbac4312490ea | a62b4b44572f6551db6ea3498e38f57476e3e911 | /demo02.py | 47a32a3ce85fc82b8d78b233062c09dfad626889 | []
| no_license | lmjdas/exe | 6da004ee661cf06ff0c3dce1c7970f3dfee67b3e | 6bd5472b8869dbbd1a2b34a04c9ca23061693deb | refs/heads/main | 2023-08-23T01:51:32.144750 | 2021-10-15T13:15:35 | 2021-10-15T13:15:35 | 416,738,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20 | py | 2+2
print("智障")
| [
"[email protected]"
]
| |
5ab5444b4709afc95701728675b9a2b1d60faf77 | 165dcb74cd7c489fa65d2c02e2421a5928f95a02 | /find_path.py | 22221e79a664f4ec1ab0bf3b5a44af60c6f556c5 | []
| no_license | TongBaoTran/Graph-Theory-Programming | 6e972cc47a10dbfcba6ae53bbce825ef7ee0e622 | e3fb2ee518aed3c1e8cd8126843d92285b56378d | refs/heads/master | 2022-06-05T17:26:37.506384 | 2020-03-06T18:50:27 | 2020-03-06T18:50:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,035 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 6 19:41:44 2020
@author: baotr
"""
class Graph(object):
def __init__(self, graph_dict=None):
if graph_dict == None:
graph_dict={}
self.graph_dict=graph_dict
def neighbors(self, node):
return self.graph_dict[node]
def add_vertex(self, vertex):
if vertex not in self.graph_dict.keys():
self.graph_dict[vertex]=[]
def vertices(self):
return list(self.graph_dict.keys())
def add_edge(self, edge):
vert1, vert2 = edge[0], edge[1]
if vert1 in self.graph_dict.keys():
self.graph_dict[vert1].append(vert2)
else:
self.graph_dict[vert1]=[vert2]
## All paths between two nodes
def find_path(self, start_vertex, end_vertex, path=None, paths=None):
""" find a path from start_vertex to end_vertex in graph """
if path == None:
path = []
if paths==None:
paths=[]
self.paths=paths
if start_vertex not in self.graph_dict.keys():
return None
path = path + [start_vertex]
if start_vertex == end_vertex:
return path
if start_vertex not in self.graph_dict.keys():
return None
for vertex in self.graph_dict[start_vertex]:
if vertex not in path:
new_path = self.find_path(vertex, end_vertex, path, paths)
if new_path and isinstance(new_path[0], str):
self.paths.append(new_path)
return self.paths
graph_dict = { "a" : ["b", "c"],
"b" : ["a"],
"c" : ["b", "a", "e", "d"],
"d" : ["b", "c","e"],
"e" : ["d", "b"],
"f" : [] }
graph = Graph(graph_dict)
print(graph.find_path("a", "d")) | [
"[email protected]"
]
| |
8e4deeb02622dcbe1ed430ad3b1918bef54bf16d | 0d3ab7354bf54ddd78ee2d3a48796691eb41bb68 | /mnist.py | c665b59b4ae71dd6c91400d972f65b888837a7b4 | []
| no_license | WinstonQian7/ml-learning-repo | 89ce2e7d7b5bd83ffa2f363687722c7b9272e070 | 8d8eb392e6b62a30ff4d8b30c9a4d77ea54ea2c4 | refs/heads/main | 2023-08-19T10:44:49.326490 | 2021-10-14T15:10:25 | 2021-10-14T15:10:25 | 405,760,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,535 | py | from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# reshape to be [samples][width][height][channels]
X_train = X_train.reshape((X_train.shape[0], 28, 28, 1)).astype('float32')
X_test = X_test.reshape((X_test.shape[0], 28, 28, 1)).astype('float32')
# normalize inputs from 0-255 to 0-1
X_train = X_train / 255
X_test = X_test / 255
# one hot encode outputs
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
# define the larger model
def larger_model():
# create model
model = Sequential()
model.add(Conv2D(30, (5, 5), input_shape=(28, 28, 1), activation='relu'))
model.add(MaxPooling2D())
model.add(Conv2D(15, (3, 3), activation='relu'))
model.add(MaxPooling2D())
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
#T1 0.73%
#T2 0.64%
#T3 0.56%
#T4 0.68%
#T5 0.70%
#T6 0.63%
#T7 0.67%
#T8 0.71%
#T9 0.72%
#T10 0.81%
#TA
def test_model():
model = Sequential()
model.add(Conv2D(32, (5,5), input_shape=(28,28,1), activation = 'relu'))
model.add(MaxPooling2D())
model.add(Conv2D(64, (5,5), activation = 'relu'))
model.add(MaxPooling2D())
model.add(BatchNormalization())
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
#Error ~0.54%
# build the model
model = test_model()
# Fit the model
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=300)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("Large CNN Error: %.2f%%" % (100-scores[1]*100))
import numpy as np
from keras.preprocessing import image
img = image.load_img | [
"[email protected]"
]
| |
7a06523870d885350bcd34d61515636f04dbade5 | a2e8e2d72bc084c2fd6edc9d0d4fcfab83b0dc41 | /src/normalAnalysis.py | e752f584036d7f0c3533a0f7152524fe2e7d0259 | []
| no_license | kingjin94/enhanced_simulation | 6899df1968a8e5d4be5b2881494f8d0a1b7f8549 | 45021f9691e5f772ef4840d69b0d504a6ee79441 | refs/heads/master | 2020-06-13T13:14:48.414887 | 2019-11-13T14:27:25 | 2019-11-13T14:27:25 | 194,667,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,224 | py | import rospy
import numpy as np
from geometry_msgs.msg import WrenchStamped
from gazebo_msgs.msg import ContactsState
rospy.init_node("Test", anonymous=True)
# get state
msg = rospy.wait_for_message("/panda/ft/tool", WrenchStamped)
F = np.asarray((msg.wrench.force.x, msg.wrench.force.y, msg.wrench.force.z))
M = np.asarray((msg.wrench.torque.x, msg.wrench.torque.y, msg.wrench.torque.z))
# get ground truth, normal is published in world frame it seems
msg_contact = rospy.wait_for_message("/panda/bumper/panda_probe_ball", ContactsState)
# minimize force and torque errors wrt. f_n, f_t1/2, phi, theta
from sympy import * # apt install python-sympy
theta, phi = symbols('theta phi', real=True)
r = 0.03
L = 0.12 + 0.7
p_coll = Matrix([[r*sin(theta)*cos(phi)],
[r*sin(theta)*sin(phi)],
[r*cos(theta)]])
P_hand = Matrix([[0],[0],[-L]])
Normal = Matrix([[sin(theta)*cos(phi)],
[sin(theta)*sin(phi)],
[cos(theta)]])
T_1 = 1/r * Matrix([[-r*sin(theta)*sin(phi)],
[r*sin(theta)*cos(phi)],
[0]])
T_2 = 1/r * Matrix([[r*cos(theta)*cos(phi)],
[r*cos(theta)*sin(phi)],
[r*(-sin(theta))]])
# global force balance
f_n, f_t1, f_t2 = symbols('f_n f_t1 f_t2')
F_coll = f_n*(-Normal) + f_t1*T_1 + f_t2*T_2
f_obs_x, f_obs_y, f_obs_z = symbols('f_obs_x f_obs_y f_obs_z')
F_obs = Matrix([[f_obs_x], [f_obs_y], [f_obs_z]])
equ_F = F_coll + F_obs
# global torque balance about contact point
m_obs_x, m_obs_y, m_obs_z = symbols('m_obs_x m_obs_y m_obs_z')
M_obs = Matrix([[m_obs_x], [m_obs_y], [m_obs_z]])
equ_M = M_obs + (P_hand-p_coll).cross(F_obs)
equ_M = simplify(equ_M)
# apt install python-scipy
# does not work, approach to dumb or just false?
x = (f_n, f_t1, f_t2, phi, theta)
lambda_F = lambdify(x, equ_F.subs(f_obs_x, F[0]).subs(f_obs_y, F[1]).subs(f_obs_z, F[2]), "numpy")
lambda_M = lambdify(x, equ_M.subs(m_obs_x, M[0]).subs(m_obs_y, M[1]).subs(m_obs_z, M[2]).subs(f_obs_x, F[0]).subs(f_obs_y, F[1]).subs(f_obs_z, F[2]), "numpy")
x0 = np.array([0.,0.,0.,0.,0.])
def residuum(x):
return np.sum(lambda_F(*x))+np.sum(lambda_M(*x))
minimize(residuum, x0, method='nelder-mead')
| [
"[email protected]"
]
| |
321e910f13675a64ef9a7d4e04c7325c66131994 | 71ca21a02e5ab15715fc9b2c89ef6d2fb4cef75d | /studies_api/rest/v1/exceptions/submission.py | 3b4831900bf6aa867f45aca407a8729eafdf7e6c | []
| no_license | stephenomalley/study-api | 3da4dbce9c17ddf56bec431beeebed722557655a | 669497dbad65466b456728428f70ed8f124527f7 | refs/heads/master | 2021-05-04T07:30:06.173914 | 2016-10-13T19:27:44 | 2016-10-13T19:27:44 | 70,636,540 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 309 | py | class SubmissionLimitExceeded(Exception):
"""
Exception thrown when the maximum number of places on a study already exists.
"""
pass
class StudyNotCreated(Exception):
"""
Exception type thrown when an attempt to create a submission with an invalid study id is made.
"""
pass
| [
"[email protected]"
]
| |
47a5283f013e69c0e318103c2aab7676938afc29 | 77e9d856429373258ff98d49fbe0c585549ffda0 | /PJ1_search/search.py | 20d1fb3a755814316c92249569933bbcfa331423 | [
"Apache-2.0"
]
| permissive | beibidesr/CS188-Homework | 7dccf162ffe74ad95c22cba85eb6afdf7fca9ee8 | 6712da1b27907f4096752c379c342481927000c8 | refs/heads/master | 2022-12-22T08:38:06.276030 | 2020-09-27T15:01:20 | 2020-09-27T15:01:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,382 | py | # search.py
# ---------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# ([email protected]) and Dan Klein ([email protected]).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel ([email protected]).
"""
In search.py, you will implement generic search algorithms which are called by
Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem.
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state.
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost' is
the incremental cost of expanding to that successor.
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions.
The sequence must be composed of legal moves.
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other maze, the
sequence of moves will be incorrect, so only use this for tinyMaze.
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s, s, w, s, w, w, s, w]
def depthFirstSearch(problem):
fringe = util.Stack()
start = [problem.getStartState(), 0, []]
fringe.push(start)
closed = []
while not fringe.isEmpty():
[state, cost, path] = fringe.pop()
if problem.isGoalState(state):
return path
if not state in closed:
closed.append(state)
for child_state, child_action, child_cost in problem.getSuccessors(state):
new_cost = cost + child_cost
new_path = path + [child_action]
fringe.push([child_state, new_cost, new_path])
def breadthFirstSearch(problem):
fringe = util.Queue()
start = [problem.getStartState(), 0, []]
fringe.push(start) # queue push at index_0
closed = []
while not fringe.isEmpty():
[state, cost, path] = fringe.pop()
if problem.isGoalState(state):
return path
if state not in closed:
closed.append(state)
for child_state, child_action, child_cost in problem.getSuccessors(state):
new_cost = cost + child_cost
new_path = path + [child_action]
fringe.push([child_state, new_cost, new_path])
def uniformCostSearch(problem):
fringe = util.PriorityQueue()
start = [problem.getStartState(), 0, []]
p = 0
fringe.push(start, p) # queue push at index_0
closed = []
while not fringe.isEmpty():
[state, cost, path] = fringe.pop()
if problem.isGoalState(state):
return path
if state not in closed:
closed.append(state)
for child_state, child_action, child_cost in problem.getSuccessors(state):
new_cost = cost + child_cost
new_path = path + [child_action, ]
fringe.push([child_state, new_cost, new_path], new_cost)
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
fringe = util.PriorityQueue()
start = [problem.getStartState(), 0, []]
p = 0
fringe.push(start, p) # queue push at index_0
closed = []
while not fringe.isEmpty():
[state, cost, path] = fringe.pop()
# print(state)
if problem.isGoalState(state):
# print(path)
return path # here is a deep first algorithm in a sense
if state not in closed:
closed.append(state)
for child_state, child_action, child_cost in problem.getSuccessors(state):
new_cost = cost + child_cost
new_path = path + [child_action, ]
fringe.push([child_state, new_cost, new_path], new_cost + heuristic(child_state, problem))
util.raiseNotDefined()
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch
| [
"[email protected]"
]
| |
8da161cd6f56e6ed705cee2eb410fc227eacde91 | 16fb4d7d9277f738463eaa0a1e5b3e0329df4a7a | /clases.py | 35848f59cd378f34a85b9894cf403b73a42b2ca4 | []
| no_license | gchapa22/python_workshop | e7267840e64980fad6adc6dc86fee521d1f29bbf | 41ef18107952af16632eb8f051463e5689765226 | refs/heads/master | 2016-09-06T18:18:21.122727 | 2014-04-03T17:55:45 | 2014-04-03T17:55:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py |
class Animal(object):
def __init__(self):
self.attr = 1
def speak(self):
return "Noise"
class Dog(Animal):
def speak(self):
return "Waw"
def bite(self):
return "chomp"
class Cat(Animal):
def speak(self):
return "Miau"
fido = Dog()
pelusa = Cat()
print fido.speak()
print pelusa.speak()
| [
"[email protected]"
]
| |
40df7bdb6d3ff42134e00c035dac62fe565ad508 | 9181d67cdbc8ffbb8b4bdd8c7605df544fc1a37a | /venv/Scripts/easy_install-script.py | 1d25dfec4918fd0dd33f930999b9228bcb4c8757 | []
| no_license | DmitryDankov207/resender-bot | 21cfc37f007cc6efc67b9af96a78996ade497cad | 07dad8f6dd23e6b601411951af67f59225fbf2c0 | refs/heads/master | 2022-02-27T11:36:51.696951 | 2019-11-04T09:02:38 | 2019-11-04T09:02:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | #!C:\Users\12dda\PycharmProjects\bot\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
]
| |
cc58cd6f621d9910d0ad66afdebbf2d5ef6d9c08 | b2acf2dab56a42c22304b0e236b154d453175d6d | /Forex/Account.py | dd10ebe408b05f733a47a2298d40d575eaa813bd | [
"MIT"
]
| permissive | bitsalt/bitbot | 8cd14da5015598e18e65d41843ab7ecb7d3347da | 24dcc037fcc2778929cb1091f2c87f38359e50c1 | refs/heads/main | 2023-03-29T22:07:01.746273 | 2021-04-13T20:42:25 | 2021-04-13T20:42:25 | 357,523,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,200 | py | import Forex.config as fxconfig
class Account:
def __init__(self):
cfg = fxconfig.make_config_instance()
api = cfg.create_context()
response = api.account.summary(cfg.active_account)
account = response.body['account']
self.id = account.id
#
# Client-assigned alias for the Account. Only provided if the Account
# has an alias set
#
self.alias = account.alias
#
# The home currency of the Account
#
self.currency = account.currency
#
# The current balance of the Account.
#
self.balance = account.balance
#
# ID of the user that created the Account.
#
self.createdByUserID = account.createdByUserID
#
# The date/time when the Account was created.
#
self.createdTime = account.createdTime
#
# The current guaranteed Stop Loss Order mode of the Account.
#
self.guaranteedStopLossOrderMode = account.guaranteedStopLossOrderMode
#
# The total profit/loss realized over the lifetime of the Account.
#
self.pl = account.pl
#
# The total realized profit/loss for the Account since it was last
# reset by the client.
#
self.resettablePL = account.resettablePL
#
# The date/time that the Account's resettablePL was last reset.
#
self.resettablePLTime = account.resettablePLTime
#
# The total amount of financing paid/collected over the lifetime of the
# Account.
#
self.financing = account.financing
#
# The total amount of commission paid over the lifetime of the Account.
#
self.commission = account.commission
#
# The total amount of fees charged over the lifetime of the Account for
# the execution of guaranteed Stop Loss Orders.
#
self.guaranteedExecutionFees = account.guaranteedExecutionFees
#
# Client-provided margin rate override for the Account. The effective
# margin rate of the Account is the lesser of this value and the OANDA
# margin rate for the Account's division. This value is only provided
# if a margin rate override exists for the Account.
#
self.marginRate = account.marginRate
#
# The date/time when the Account entered a margin call state. Only
# provided if the Account is in a margin call.
#
self.marginCallEnterTime = account.marginCallEnterTime
#
# The number of times that the Account's current margin call was
# extended.
#
self.marginCallExtensionCount = account.marginCallExtensionCount
#
# The date/time of the Account's last margin call extension.
#
self.lastMarginCallExtensionTime = account.lastMarginCallExtensionTime
#
# The number of Trades currently open in the Account.
#
self.openTradeCount = account.openTradeCount
#
# The number of Positions currently open in the Account.
#
self.openPositionCount = account.openPositionCount
#
# The number of Orders currently pending in the Account.
#
self.pendingOrderCount = account.pendingOrderCount
#
# Flag indicating that the Account has hedging enabled.
#
self.hedgingEnabled = account.hedgingEnabled
#
# The date/time of the last order that was filled for this account.
#
self.lastOrderFillTimestamp = account.lastOrderFillTimestamp
#
# The total unrealized profit/loss for all Trades currently open in the
# Account.
#
self.unrealizedPL = account.unrealizedPL
#
# The net asset value of the Account. Equal to Account balance +
# unrealizedPL.
#
self.NAV = account.NAV
#
# Margin currently used for the Account.
#
self.marginUsed = account.marginUsed
#
# Margin available for Account currency.
#
self.marginAvailable = account.marginAvailable
#
# The value of the Account's open positions represented in the
# Account's home currency.
#
self.positionValue = account.positionValue
#
# The Account's margin closeout unrealized PL.
#
self.marginCloseoutUnrealizedPL = account.marginCloseoutUnrealizedPL
#
# The Account's margin closeout NAV.
#
self.marginCloseoutNAV = account.marginCloseoutNAV
#
# The Account's margin closeout margin used.
#
self.marginCloseoutMarginUsed = account.marginCloseoutMarginUsed
#
# The Account's margin closeout percentage. When this value is 1.0 or
# above the Account is in a margin closeout situation.
#
self.marginCloseoutPercent = account.marginCloseoutPercent
#
# The value of the Account's open positions as used for margin closeout
# calculations represented in the Account's home currency.
#
self.marginCloseoutPositionValue = account.marginCloseoutPositionValue
#
# The current WithdrawalLimit for the account which will be zero or a
# positive value indicating how much can be withdrawn from the account.
#
self.withdrawalLimit = account.withdrawalLimit
#
# The Account's margin call margin used.
#
self.marginCallMarginUsed = account.marginCallMarginUsed
#
# The Account's margin call percentage. When this value is 1.0 or above
# the Account is in a margin call situation.
#
self.marginCallPercent = account.marginCallPercent
#
# The ID of the last Transaction created for the Account.
#
self.lastTransactionID = account.lastTransactionID
def get_account_info(self, key):
if key == 'currency':
return self.account.currency
| [
"[email protected]"
]
| |
33fefc0ea918910f473b074cc5dc93ae1294a282 | 225723d8311f63378b729e576aaa2c0cf977f6c9 | /myCVGenerator/myCVGenerator/wsgi.py | 2876240cf95bae39b2bfdd59b28398ed2f41c215 | []
| no_license | Acerosa/CV-Generator | f7e3625264e1242e8086a85b79717b1ac6fda73a | 389280def918d037b557a909685c3eceaeb99481 | refs/heads/main | 2023-01-04T07:34:28.042004 | 2020-11-03T09:44:33 | 2020-11-03T09:44:33 | 308,640,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
WSGI config for myCVGenerator project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'myCVGenerator.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
| |
a82572d154e3b2593d02ea5e3d02809cfbda836d | 6084cf0b13307fe7c09e2e336ceaec321f21757b | /small_tests/runner_threading_test.py | c4599ef48bcc96c1f84a3c0bf6239fe5d7a74342 | []
| no_license | asmialek/python_experiments | eae349b718e0e1c3d70f95cee2a0c6e03ba70971 | daef809564afb77b705486bb7241fae6c0940390 | refs/heads/master | 2020-03-12T15:51:02.645817 | 2019-04-03T21:51:41 | 2019-04-03T21:51:41 | 130,701,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,977 | py | import sys
import copy
import builtins
import traceback
# from queue import Queue
import queue
import threading
import time
################# look at me ################################
# http://www.rueckstiess.net/research/snippets/show/ca1d7d90
#############################################################
class ReadOnlyBuiltins(dict):
"""
Type used for a read only version of the __builtins__ dictionary.
"""
# def __hash__(self):
# return hash(repr(self))
def clear(self):
ValueError("Read only!")
def __delitem__(self, key):
ValueError("Read only!")
def pop(self, key, default=None):
ValueError("Read only!")
def popitem(self):
ValueError("Read only!")
def setdefault(self, key, default=None):
ValueError("Read only!")
def __setitem__(self, key, value):
ValueError("Read only!")
def update(self, dict, **kw):
ValueError("Read only!")
def create_read_only_builtins(builtins_dict):
"""Substitutes given dictionary with a non modifiable version.
Args:
builtins_dict (dict): Dictionary to be modified.
Returns:
(dict): Non modifiable dictionary.
"""
safe_builtins = ReadOnlyBuiltins(builtins_dict)
def __init__(*args, **kw):
ValueError("Read only!")
ReadOnlyBuiltins.__init__ = __init__
return safe_builtins
class SafeImport(object):
"""Creates safe replacement for builtin `__init__` function. Can import
only from modules whitelist. It is created as a class, because
`multiprocessing.Process` uses pickle for safekeeping, and you cannot
pickle nested functions. `_safe_import` function needs to be nested, to use
`module_whitelist` variable, which needs to be modified from the outside.
Returns:
(func): "Safe" import function.
"""
def __init__(self):
self.module_whitelist = ['time']
def __call__(self, *args, **kwargs):
return self._safe_import
def _safe_import(self, module_name, globals={}, locals={},
fromlist=[], level=-1):
if module_name in self.module_whitelist:
return __import__(module_name, globals, locals,
fromlist, level)
else:
raise ImportError('Module \'' + module_name + '\' is not on '
'the import '
'whitelist')
def _safe_open(file, mode='r', buffering=-1, encoding=None,
errors=None, newline=None, closefd=True):
"""Creates safe replacement for builtin `open` function.
Todo:
- Check for open modes, whether every destructive one will be
blocked.
"""
for char in mode:
if char in ['w', 'a', '+']:
raise IOError('Mode \'' + char + '\' is disallowed in the '
'sandbox.')
return open(file, mode, buffering, encoding,
errors, newline, closefd)
def create_whitelist():
"""Creates builtins whitelist for `exec` environment.
Returns:
(set): Set of names to be whitelisted.
"""
ret = set()
def recurse(item):
if item.__subclasses__():
for sub_item in item.__subclasses__():
ret.add(sub_item.__name__)
recurse(sub_item)
return
ret.add(item.__name__)
recurse(builtins.BaseException)
constants = {'False',
'None',
'True',
'__doc__',
'__name__',
'__package__',
}
types = {'basestring',
'bytearray',
'bytes',
'complex',
'dict',
'float',
'frozenset',
'int',
'long',
'object',
'set',
'str',
'tuple',
'unicode',
}
functions = {'__import__',
'abs',
'all',
'any',
'ascii',
'apply',
'bin',
'bool',
'bytearray',
'bytes',
'callable',
'chr',
'classmethod',
'complex',
'dict',
'dir',
'divmod',
'enumerate',
'filter',
'float',
'format',
'frozenset',
'getattr',
'globals',
'hasattr',
'hash',
'help,'
'hex',
'id',
'input',
'int',
'isinstance',
'issubclass',
'iter',
'len',
'list',
'locals',
'map',
'max',
'min',
'next',
'object',
'oct',
'ord',
'pow',
'print',
'property',
'range',
'repr',
'reversed',
'round',
'set',
'setattr',
'slice',
'sorted',
'staticmethod',
'str',
'sum',
'super',
'tuple',
'type',
'vars',
'zip',
}
ret = ret | constants | types | functions
return ret
def _run(filename, new_builtins, queue):
try:
exec(open(filename).read(), {'__builtins__': dict(**new_builtins)}, {})
except Exception as e:
queue.put(e)
return 0
class Runner(object):
"""Acts as a sandbox environment for user scripts. Aims to be as safe as
possible. Uses `exec` for script execution.
Possible circumventions:
- Find `file` in a list of `object` class subclasses.
- Impose one function on another by overwriting `func_code`
"""
rtr = ''
def __init__(self, builtins_expansion=None):
self.current_run = None
self.main = sys.modules['__main__'].__dict__
self.orig_builtins = self.main['__builtins__'].__dict__
# Build new builtins dictionary, from names whitelist
self.builtins_whitelist = create_whitelist()
self.new_builtins = dict()
for item in self.orig_builtins.keys():
if item in self.builtins_whitelist:
self.new_builtins[item] = \
copy.deepcopy(self.orig_builtins[item])
if builtins_expansion:
self.new_builtins.update(builtins_expansion)
# Remove items specified in blacklist from builtins
self.builtins_blacklist = []
for item in self.builtins_blacklist:
if item in self.new_builtins.keys():
self.new_builtins.pop(item)
# Whitelist of module names, that can be imported into Runner scripts
self.module_whitelist = []
# Adding custom "safe" methods to new builtins
self.safe_import_object = SafeImport()
self.new_builtins['__import__'] = self.safe_import_object()
self.new_builtins['open'] = _safe_open
self.new_builtins = create_read_only_builtins(self.new_builtins)
def run(self, filename, feedback=False, timeout=None):
"""Safely executes Python script located in user `home` directory.
Todo:
- Add return string (one way communication)
- Cleanup implementation
Args:
filename (str): Name of the file containing the script.
feedback (flag, bool): For printing query results into console
timeout (int): Timeout in seconds after which the execution stops
"""
run_queue = queue.Queue()
# manager = multiprocessing.Manager()
# new_builtins = manager.dict()
# for key in self.new_builtins:
# new_builtins[key] = self.new_builtins[key]
self.current_run = threading.Thread(target=_run, args=(filename,
self.new_builtins,
run_queue))
try:
self.current_run.daemon = True
self.current_run.start()
start_time = time.time()
while self.current_run.is_alive():
if time.time() - start_time > 3:
print('Killing process!')
self.current_run._stop()
print('> Killed!')
self.current_run._delete()
break
else:
print('> else')
rtr_value = queue.get()
if isinstance(rtr_value, BaseException):
raise rtr_value
elif rtr_value is 0:
return 'return_string'
else:
raise RuntimeError('something happened')
except RuntimeError:
raise
except SyntaxError as e:
e.filename = filename
raise e
except Exception as e:
_, _, tb = sys.exc_info()
line_number = str(traceback.extract_tb(tb)[-1][1])
args = list(e.args)
if len(args) > 0:
args[0] = str(args[0]) + '\nIn file \"{}\", line {}' \
.format(filename, line_number)
e.args = tuple(args)
raise e
# print('> Executed')
# raise queue.get()
def strop_current_run(self):
# self.current_run
pass
if __name__ == '__main__':
################################
# THIS IS FOR TESTING PURPOSES #
################################
box = Runner()
box.run('E:\\test.py')
# box.run_old('E:\\test.py')
| [
"[email protected]"
]
| |
3e25deebd22289144408e2ae152663c49c07bb88 | 7ce0f3ce7dff2bf3050fca752373c39dbed41ebf | /LoanAnalyticsssss/LoanAnalytics/loan_analytics/loan_analytics/Helper.py | d84b7119b718fd306a7d9a0d454761e412a8f337 | []
| no_license | jiying9/loanAnalysis | dc079c4051d4438d872fffc73dc56af229ae8c6f | 3b12d39c00bee7ccca0e99998ccaac787093c0d5 | refs/heads/main | 2023-01-24T02:03:09.228359 | 2020-11-27T17:18:20 | 2020-11-27T17:18:20 | 316,558,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,719 | py | from prettytable import PrettyTable
import matplotlib.pyplot as plt
import numpy as np
import decimal
import plotly.graph_objs as go
from plotly.graph_objs import Scatter,Layout
import plotly
import plotly.offline as py
import pandas as pd
import plotly.express as px
class Helper:
""" Helper class for printing and plotting of loan schedules.
"""
@staticmethod
def display(value, digits=2):
""" Return a displayable value with a specified number of digits.
:param value: value to display
:param digits: number of digits right of the decimal place
:return: formatted displayable value
"""
temp = str(decimal.Decimal(str(value) + '0' * digits))
return temp[:temp.find('.') + digits + 1]
@staticmethod
def plot(loan):
payment_number, applied_principal, applied_interest, end_principal = [], [], [], []
# iterate over the loan schedule
#
for pay in loan.schedule.values():
payment_number.append(pay[0])
applied_principal.append(pay[4])
applied_interest.append(pay[5])
end_principal.append(pay[6])
ind = np.arange(len(payment_number))
width = 0.35
p1 = plt.bar(ind, applied_principal, width)
p2 = plt.bar(ind, applied_interest, width, bottom=applied_principal)
plt.ylabel('USD')
plt.title('Schedule')
plt.xticks(np.arange(0, max(payment_number), 12))
plt.yticks(np.arange(0, max(applied_principal + applied_interest), 500))
plt.legend((p1[0], p2[0]), ('Principal', 'Interest'), loc='lower right')
plt.show()
@staticmethod
def print(loan):
x = PrettyTable()
x.field_names = ['Payment Number', 'Begin Principal', 'Payment', 'Extra Payment',
'Applied Principal', 'Applied Interest', 'End Principal']
for field_name in x.field_names:
x.align[field_name] = "r"
for pay in loan.schedule.values():
x.add_row([pay[0],
Helper.display(pay[1]),
Helper.display(pay[2]),
Helper.display(pay[3]),
Helper.display(pay[4]),
Helper.display(pay[5]),
Helper.display(pay[6])])
print(x)
@staticmethod
def getimg(loan):
df = pd.DataFrame({
"month": [pay[0] for pay in loan.schedule.values()],
"principal": [Helper.display(pay[1]) for pay in loan.schedule.values()]
})
return px.bar(df, x="month", y="principal", barmode="group")
| [
"[email protected]"
]
| |
162955668c9881485c3760eae76a6559879c4eb3 | e48704ccf1ff1ff83163b8b7deed5c2973d70b30 | /yield/test8_except.py | 3343045c4bd566e955f6bf89c152a7b9fba9334a | []
| no_license | nilecui/nile_python_swk | e410c9ead2c6455b4c0ff55f1188f287f9fabe1b | cf39d7272821edb74c0bcca7120514e4e23a3b80 | refs/heads/master | 2021-03-12T23:01:20.038183 | 2013-04-06T09:30:38 | 2013-04-06T09:30:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | #!/usr/bin/python
#-*- coding:utf-8 -*-
def echo(value=None):
print "Excution starts when next() is called for the first time."
try:
while True:
try:
value=(yield value)
except Exception,e:
value=3
finally:
print "Don't forget to clean up when 'close()'is called"
generator=echo(1)
print generator.next() #1
print generator.next() #None
print generator.send(2) #2 yield value
generator.throw(TypeError,"spam")
generator.close()
| [
"[email protected]"
]
| |
cc932fa561bbdb458aab97b1b882a7a91e775d24 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /Pf2kDoCRvEL8qzKTs_1.py | ba8544a0fea52347f5454de892ffd9d16371d082 | []
| no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py |
def order_people(lst, people):
if lst[0] * lst[1] < people: return 'overcrowded'
res = []
for row in range(lst[0]):
direc = -1 if len(res) % 2 else 1
res.append([n if n <= people else 0 for n in range(len(res) * lst[1] + 1, (len(res) + 1) * lst[1] + 1)][::direc])
return res
| [
"[email protected]"
]
| |
c6e4684f00604f3de05e374e87b459aa5bea5fce | 7aa9f792cf95a9819b974f4a83f4af9c39606b30 | /src/__init__.py | a05cc4c8be3eacec8edb2dc20c27e9b9ec489eca | []
| no_license | brolze/okex_test_project | e7b38a2638c0fa6410347c5d3003cacba0945493 | 3206e54c3990e0ae4235b8517ce9f9499ca67bc1 | refs/heads/main | 2023-03-23T11:24:39.301862 | 2021-03-20T07:42:26 | 2021-03-20T07:42:26 | 334,464,430 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 30 17:30:54 2021
@author: xujianqiao
"""
| [
"[email protected]"
]
| |
c7118b9671b22885cdb4e72acc137c952a2ac1f2 | 3413f253ba9b11fa795a57306be714d3f317e155 | /dfvfs/analyzer/qcow_analyzer_helper.py | 78a96b3447827aca49f19355ea1dda90798b3f97 | [
"Apache-2.0"
]
| permissive | ekristen/dfvfs | 5e7e87b2fa9b39c15817dc42228b0f943abbf735 | 2dcb8a3e5bdc36bcb11f133a18f718b3010dd93e | refs/heads/master | 2021-01-17T22:11:59.482625 | 2016-05-14T08:15:12 | 2016-05-14T08:15:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,179 | py | # -*- coding: utf-8 -*-
"""The QCOW format analyzer helper implementation."""
from dfvfs.analyzer import analyzer
from dfvfs.analyzer import analyzer_helper
from dfvfs.analyzer import specification
from dfvfs.lib import definitions
class QCOWAnalyzerHelper(analyzer_helper.AnalyzerHelper):
"""Class that implements the QCOW analyzer helper."""
FORMAT_CATEGORIES = frozenset([
definitions.FORMAT_CATEGORY_STORAGE_MEDIA_IMAGE])
TYPE_INDICATOR = definitions.TYPE_INDICATOR_QCOW
def GetFormatSpecification(self):
"""Retrieves the format specification."""
format_specification = specification.FormatSpecification(
self.type_indicator)
# QCOW version 1 signature and version.
format_specification.AddNewSignature(b'QFI\xfb\x00\x00\x00\x01', offset=0)
# QCOW version 2 signature and version.
format_specification.AddNewSignature(b'QFI\xfb\x00\x00\x00\x02', offset=0)
# QCOW version 3 signature and version.
format_specification.AddNewSignature(b'QFI\xfb\x00\x00\x00\x03', offset=0)
return format_specification
# Register the analyzer helpers with the analyzer.
analyzer.Analyzer.RegisterHelper(QCOWAnalyzerHelper())
| [
"[email protected]"
]
| |
35196e8a2bbb42d4660f9850ec32b9729408a369 | 8931d9e5a7cab2a4363105d3bd72de629b3444af | /auth.py | 4fd9e15e6594e2f69b1fef9b3a304b4847d4ad1a | []
| no_license | kpurusho/MoneySquareBackend | de255a5995a5888298151dac97f06a8fcda7a328 | 1939317ec92b44e2161cf38d4adf249a996cfd18 | refs/heads/master | 2022-11-20T23:58:39.713941 | 2020-07-26T12:21:39 | 2020-07-26T12:21:39 | 275,732,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | import os
import cachecontrol
import google.auth.transport.requests
import requests
from functools import wraps
from google.oauth2 import id_token
class AuthenticationFailedException(Exception):
pass
def oidc_auth_required(request, email=None, audience=None):
def decorator(f):
@wraps(f)
def wrapper(*args, **kvargs):
token = request.headers.get('idToken')
session = requests.session()
cached_session = cachecontrol.CacheControl(session)
transport_request = google.auth.transport.requests.Request(session=cached_session)
decoded_token = id_token.verify_oauth2_token(token, transport_request)
if decoded_token['iss'] != 'accounts.google.com':
raise AuthenticationFailedException()
if email and decoded_token['email'] != email:
raise AuthenticationFailedException()
return f(*args, **kvargs)
return wrapper
return decorator | [
"[email protected]"
]
| |
c4f20fce595f09bdabfdbdf0d65d5b1465e45826 | 9f91fa2910d13273a50ae416c116e16385a4eb95 | /natvenv/env/bin/jp.py | d6d50fb640cee342a3af191071bf38b79dcb68ac | []
| no_license | natrayanp/mysb_v2 | cac811e7f66670f8546cccdbca386ba6ff4f8cd6 | 24dea04e2a631ca6b465b3f62077a83a5dce9758 | refs/heads/master | 2022-11-20T16:49:30.341095 | 2018-07-31T17:18:04 | 2018-07-31T17:18:04 | 116,319,931 | 0 | 1 | null | 2022-11-14T21:09:17 | 2018-01-05T00:05:13 | Python | UTF-8 | Python | false | false | 1,748 | py | #!/home/natrayan/project/AwsProject/Python/Tradingapp/tradingapp5/natvenv/env/bin/python3.6
import sys
import json
import argparse
from pprint import pformat
import jmespath
from jmespath import exceptions
def main():
parser = argparse.ArgumentParser()
parser.add_argument('expression')
parser.add_argument('-f', '--filename',
help=('The filename containing the input data. '
'If a filename is not given then data is '
'read from stdin.'))
parser.add_argument('--ast', action='store_true',
help=('Pretty print the AST, do not search the data.'))
args = parser.parse_args()
expression = args.expression
if args.ast:
# Only print the AST
expression = jmespath.compile(args.expression)
sys.stdout.write(pformat(expression.parsed))
sys.stdout.write('\n')
return 0
if args.filename:
with open(args.filename, 'r') as f:
data = json.load(f)
else:
data = sys.stdin.read()
data = json.loads(data)
try:
sys.stdout.write(json.dumps(
jmespath.search(expression, data), indent=4))
sys.stdout.write('\n')
except exceptions.ArityError as e:
sys.stderr.write("invalid-arity: %s\n" % e)
return 1
except exceptions.JMESPathTypeError as e:
sys.stderr.write("invalid-type: %s\n" % e)
return 1
except exceptions.UnknownFunctionError as e:
sys.stderr.write("unknown-function: %s\n" % e)
return 1
except exceptions.ParseError as e:
sys.stderr.write("syntax-error: %s\n" % e)
return 1
if __name__ == '__main__':
sys.exit(main())
| [
"[email protected]"
]
| |
0cbe366c9fcfb220b4dc178aaae8b058b5713ff2 | 63ce91bae5eeadf885262b8fe0e769a64454d257 | /ray_torch_lightning_advanced.py | 9d62f3141c7e6f55a16c79f12c3cd7dd81faca0d | [
"Apache-2.0"
]
| permissive | Data-drone/cv_experiments | c7349e7808f7f9c1315ce1efe33be1f86f4a9f80 | d6e1d9716c03a9165e3d8a08f4cc1287323a56ca | refs/heads/master | 2021-06-26T04:33:10.079771 | 2021-01-19T11:40:30 | 2021-01-19T11:40:30 | 196,596,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,559 | py | ### test script for Ray and
import torch
import pytorch_lightning as pl
from torch.utils.data import DataLoader, random_split
from torch.nn import functional as F
from torchvision.datasets import CIFAR100
from torchvision import transforms
from torchvision import models
from torch.nn import CrossEntropyLoss
import os
class LightningMNISTClassifier(pl.LightningModule):
"""
This has been adapted from
https://towardsdatascience.com/from-pytorch-to-pytorch-lightning-a-gentle-introduction-b371b7caaf09
adjusted for cifar100
"""
def __init__(self, config, data_dir=None):
super(LightningMNISTClassifier, self).__init__()
self.data_dir = data_dir or os.getcwd()
self.lr = config["lr"]
self.batch_size = config["batch_size"]
#self.momentum = config["momentum"]
# mnist images are (1, 28, 28) (channels, width, height)
self.model = models.resnet34(pretrained=False)
self.criterion = CrossEntropyLoss()
self.tr_accuracy = pl.metrics.Accuracy()
self.vl_accuracy = pl.metrics.Accuracy()
self.test_accuracy = pl.metrics.Accuracy()
def forward(self, x):
return self.model(x)
def training_step(self, train_batch, batch_idx):
x, y = train_batch
logits = self.forward(x)
loss = self.criterion(logits, y)
accuracy = self.tr_accuracy(logits, y)
self.log("ptl/train_loss", loss)
self.log("ptl/train_accuracy", accuracy)
return loss
def validation_step(self, val_batch, batch_idx):
x, y = val_batch
logits = self.forward(x)
loss = self.criterion(logits, y)
accuracy = self.vl_accuracy(logits, y)
return {"val_loss": loss, "val_accuracy": accuracy}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x["val_loss"] for x in outputs]).mean()
avg_acc = torch.stack([x["val_accuracy"] for x in outputs]).mean()
self.log("ptl/val_loss", avg_loss)
self.log("ptl/val_accuracy", self.vl_accuracy.compute())
@staticmethod
def download_data(data_dir):
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
])
return CIFAR100(data_dir, train=True, download=True, transform=transform)
def prepare_data(self):
mnist_train = self.download_data(self.data_dir)
print(len(mnist_train))
self.mnist_train, self.mnist_val = random_split(
mnist_train, [45000, 5000])
def train_dataloader(self):
return DataLoader(self.mnist_train, batch_size=int(self.batch_size), num_workers=4)
def val_dataloader(self):
return DataLoader(self.mnist_val, batch_size=int(self.batch_size), num_workers=4)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
return optimizer
def train_mnist(config):
model = LightningMNISTClassifier(config)
trainer = pl.Trainer(max_epochs=50, gpus=1) #, show_progress_bar=False)
trainer.fit(model)
import shutil
import tempfile
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.utilities.cloud_io import load as pl_load
from ray import tune
from ray.tune import CLIReporter
from ray.tune.schedulers import ASHAScheduler, PopulationBasedTraining
from ray.tune.integration.pytorch_lightning import TuneReportCallback, \
TuneReportCheckpointCallback
def train_mnist_tune(config, data_dir=None, num_epochs=10, num_gpus=0):
model = LightningMNISTClassifier(config, data_dir)
trainer = pl.Trainer(
max_epochs=num_epochs,
gpus=num_gpus,
logger=TensorBoardLogger(
save_dir=tune.get_trial_dir(), name="", version="."),
progress_bar_refresh_rate=0,
callbacks=[
TuneReportCallback(
{
"loss": "ptl/val_loss",
"mean_accuracy": "ptl/val_accuracy"
},
on="validation_end")
])
trainer.fit(model)
def tune_mnist_asha(num_samples=10, num_epochs=50, gpus_per_trial=0, cpus_per_trial=4):
data_dir = os.path.join(tempfile.gettempdir(), "mnist_data_")
LightningMNISTClassifier.download_data(data_dir)
config = {
"lr": tune.loguniform(1e-4, 1e-1),
"batch_size": tune.choice([32, 64, 128]),
}
scheduler = ASHAScheduler(
max_t=num_epochs,
grace_period=1,
reduction_factor=2)
reporter = CLIReporter(
parameter_columns=["lr", "batch_size"],
metric_columns=["loss", "mean_accuracy", "training_iteration"])
analysis = tune.run(
tune.with_parameters(
train_mnist_tune,
data_dir=data_dir,
num_epochs=num_epochs,
num_gpus=gpus_per_trial),
resources_per_trial={
"cpu": cpus_per_trial,
"gpu": gpus_per_trial
},
metric="loss",
mode="min",
config=config,
num_samples=num_samples,
scheduler=scheduler,
progress_reporter=reporter,
name="tune_mnist_asha")
print("Best hyperparameters found were: ", analysis.best_config)
shutil.rmtree(data_dir)
#tune_mnist_asha(cpus_per_trial=4, gpus_per_trial=1)
single_config = {
'lr': 1e-4,
'batch_size': 64
}
# uses about 60% of gpu
#train_mnist(single_config)
#
tune_mnist_asha(num_samples=10, num_epochs=50, gpus_per_trial=1, cpus_per_trial=4) | [
"[email protected]"
]
| |
7361bd53a9be6f44f6b646cdbedc297239e87561 | eea1d77d08b3e64519e8ac86f0cabcb78a14e7de | /main.py | a8fefa41a6d891c234790f2d5bfb33d971f98bde | []
| no_license | colemai/nutritics_client | 02f93f4a42c29be9f0a4badaa33671b485bb32eb | 788a65ef5a095c750e67385888e0ffb2846a9777 | refs/heads/master | 2022-10-25T09:05:33.478644 | 2020-06-13T08:57:20 | 2020-06-13T08:57:20 | 271,804,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | #!/usr/bin/env python3
"""
Author: Ian Coleman
Purpose: Call the Nutritics API
"""
import pdb
import requests
api_address = "https://iancoleman1a:Pokemon124*@www.nutritics.com/api/v1.1/"
def get_users ():
all_users = requests.get("{}LIST/&client".format(api_address))
return all_users.text
def create_user (uid):
call = requests.get("{}CREATE/&client&id={}".format(api_address, uid))
return call
if __name__ == "__main__":
create_user(333)
all_users = get_users()
print(all_users) | [
"[email protected]"
]
| |
19546a5640b4da97df0abe9a12cf7adb5607d3dd | ba6454efa22648faf4de1b8de069294d4cb7d09b | /python/meetup/meetup.py | 5ace7b4fcc4b3bf0fda4cd43dc59872c32fcfe74 | []
| no_license | GreatBahram/exercism | c876e727b0c42f625f71d09f721ed8416cbbbcd6 | d3e63dd7e5753e7c8356c10c7b7f796db0c90163 | refs/heads/master | 2020-04-17T03:34:06.188855 | 2019-08-15T11:23:28 | 2019-08-15T11:23:28 | 166,189,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 960 | py | import datetime
from calendar import day_name
class MeetupDayException(Exception):
pass
def weekdays_in_month(year, month, weekday):
"""Return all 4/5 dates with given weekday."""
date = datetime.date(year, month, 1)
date += datetime.timedelta(days=(weekday - date.weekday()) % 7)
first_to_fifth = (
date + datetime.timedelta(weeks=i)
for i in range(6)
)
return [
date
for date in first_to_fifth
if date.month == month
]
def meetup_day(year, month, weekday, nth):
day_names = list(day_name)
shift_by = {'1st': 0, '2nd': 1, '3rd': 2, '4th': 3, '5th': 4, 'last': -1}
dates = weekdays_in_month(year, month, day_names.index(weekday))
if nth == 'teenth':
return next(date for date in dates if date.day > 12)
try:
date = dates[shift_by[nth]]
except IndexError:
raise MeetupDayException('Date does not exist.') from None
return date
| [
"[email protected]"
]
| |
b8b00de4949bf71e8d3937c558e0b4579c217e27 | fccaec3ce13d96d853b888e49ed4177de621f3cd | /choose_your_own/your_algorithm_rf.py | 52eecba74cf19558837127207043d7a454fde16c | []
| no_license | Lundgren/ud120-intro-ml | e83471bebc1213520c531d359f3b51add0ca992b | f36a89bee650a13263dab7ae8e9b5ff3fbb8e60c | refs/heads/master | 2021-03-19T09:32:42.073926 | 2017-02-28T18:45:16 | 2017-02-28T18:45:16 | 82,955,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,598 | py | #!/usr/bin/python
import matplotlib.pyplot as plt
from prep_terrain_data import makeTerrainData
from class_vis import prettyPicture
features_train, labels_train, features_test, labels_test = makeTerrainData()
### the training data (features_train, labels_train) have both "fast" and "slow"
### points mixed together--separate them so we can give them different colors
### in the scatterplot and identify them visually
grade_fast = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==0]
bumpy_fast = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==0]
grade_slow = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==1]
bumpy_slow = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==1]
#### initial visualization
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
plt.scatter(bumpy_fast, grade_fast, color = "b", label="fast")
plt.scatter(grade_slow, bumpy_slow, color = "r", label="slow")
plt.legend()
plt.xlabel("bumpiness")
plt.ylabel("grade")
#plt.show()
################################################################################
### your code here! name your classifier object clf if you want the
### visualization code (prettyPicture) to show you the decision boundary
from sklearn.ensemble import RandomForestClassifier
from time import time
'''
# 1000 best at ~0.9192
for n in [1, 10, 50, 100, 500, 1000]:
sum = 0
for x in range(0, 10):
clf = RandomForestClassifier(n_estimators=n)
clf.fit(features_train, labels_train)
sum += clf.score(features_test, labels_test)
print "RandomForest n:", str(n), ", accuracy:", str(sum / 10)
'''
'''
# 10 best at ~0.92
for mss in [2, 3, 5, 7, 10]:
sum = 0
for x in range(0, 10):
clf = RandomForestClassifier(n_estimators=1000, min_samples_split=mss)
clf.fit(features_train, labels_train)
sum += clf.score(features_test, labels_test)
print "RandomForest n: 1000, min sample split:", str(mss), ", accuracy:", str(sum / 10)
'''
# Very random results (of course), 1000 & 10 probably isn't better than default
clf = RandomForestClassifier(n_estimators=1000, min_samples_split=10)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s" #~2.0s
t1 = time()
clf.predict(features_test)
print "prediction time:", round(time()-t1, 3), "s" #~0.45s
print "accuracy:", clf.score(features_test, labels_test) #.92
try:
prettyPicture(clf, features_test, labels_test)
plt.show()
except NameError:
pass
| [
"[email protected]"
]
| |
dce128646442ea8ce80d326e0a857c00cff81ee8 | d05abfff9c45ec8eef6f2b2dfeec2aa3662fecdf | /Courses/Phys 241/MICE.py | e8b28b0095aa449200285eb6d7daf447ffa3337f | []
| no_license | Srivastava/Hello-World | 9d695e8caa9e9117e94963a5b3a0a0ced2f5dd3c | 1f57b7b7edffd3934d75d76f495ebcab48f58911 | refs/heads/master | 2020-04-04T07:51:32.837730 | 2017-01-09T18:32:28 | 2017-01-09T18:32:28 | 51,562,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,077 | py | import sys
import time
import math
import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
import csv
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.animation as animation
#GM =0.00029632889 * 10E11
GM = 4.498309551e-13
#GM=4.43E-6
#GM =1E-3
#GMm =10E-12
#GMS=8.46611639e-8
#print GM
def rotation (x,y,z,nx,ny,nz,angle):
theta=-np.pi*angle/180.0
c=np.cos(theta)
s=np.sin(theta)
a=1-c
xN=[0]*len(x)
yN=[0]*len(x)
zN=[0]*len(x)
#print a*nx*nx+c ,a*nx*ny-s*nz,a*nx*nz+s*ny
#print a*nx*ny+s*nz,a*ny*ny+c,a*ny*nz-s*nx
#print a*nx*nz-s*ny, a*ny*nz+s*nx, a*nz*nz+c
xN=[(a*nx*nx+c)*p+(a*nx*ny-s*nz)*q+(a*nx*nz+s*ny)*r for p,q,r in zip(x,y,z)]
yN=[(a*nx*ny+s*nz)*p+(a*ny*ny+c)*q+(a*ny*nz-s*nx)*r for p,q,r in zip(x,y,z)]
zN=[(a*nx*nz-s*ny)*p+(a*ny*nz+s*nx)*q+(a*nz*nz+c)*r for p,q,r in zip(x,y,z)]
'''print len(xN)
print len(yN)
print len(zN)'''
return (xN,yN,zN)
def accel(x,y,z,n):
ax=sp.zeros([n])
ay=sp.zeros([n])
az=sp.zeros([n])
'''dist11=((x[1]-x[0])*(x[1]-x[0])+(y[1]-y[0])*(y[1]-y[0])+(z[1]-z[0])*(z[1]-z[0]))**(-1.5)
#print dist11, x[1], x[0], y[1], y[0], z[1], z[0]
aX11=-GM*(x[1]-x[0])*dist11
aY11=-GM*(y[1]-y[0])*dist11
aZ11=-GM*(z[1]-z[0])*dist11
ax[0]=-aX11
ay[0]=-aY11
az[0]=-aZ11
ax[1]=aX11
ay[1]=aY11
az[1]=aZ11'''
dist11=((x[0]*x[0]+y[0]*y[0]+z[0]*z[0])+10E-6)**(-1.5)
aX11=-GM*(x[0])*dist11*0.5
aY11=-GM*(y[0])*dist11*0.5
aZ11=-GM*(z[0])*dist11*0.5
ax[0]=aX11
ay[0]=aY11
az[0]=aZ11
dist12=((x[1]*x[1]+y[1]*y[1]+z[1]*z[1])+10E-6)**(-1.5)
aX12=-GM*(x[1])*dist12*0.5
aY12=-GM*(y[1])*dist12*0.5
aZ12=-GM*(z[1])*dist12*0.5
ax[1]=aX12
ay[1]=aY12
az[1]=aZ12
for i in range (2,n):
aX=0
aY=0
aZ=0
dist=((x[i]-x[0])*(x[i]-x[0])+(y[i]-y[0])*(y[i]-y[0])+(z[i]-z[0])*(z[i]-z[0])+10E-6)**(-1.5)
dist1=((x[i]-x[1])*(x[i]-x[1])+(y[i]-y[1])*(y[i]-y[1])+(z[i]-z[1])*(z[i]-z[1])+10E-6)**(-1.5)
aX=-GM*(x[i]-x[0])*dist
aY=-GM*(y[i]-y[0])*dist
aZ=-GM*(z[i]-z[0])*dist
aX1=-GM*(x[i]-x[1])*dist1
aY1=-GM*(y[i]-y[1])*dist1
aZ1=-GM*(z[i]-z[1])*dist1
ax[i]=aX+aX1
ay[i]=aY+aY1
az[i]=aZ+aZ1
return (ax,ay,az)
def LeapState(x,y,z,vx,vy,vz,n):
dt =1E+7
ax,ay,az=accel(x,y,z,n)
vx=[a+b*0.5*dt for a,b in zip(vx,ax)]
vy=[a+b*0.5*dt for a,b in zip(vy,ay)]
vz=[a+b*0.5*dt for a,b in zip(vz,az)]
x=[a+b*dt for a,b in zip(x,vx)]
y=[a+b*dt for a,b in zip(y,vy)]
z=[a+b*dt for a,b in zip(z,vz)]
ax,ay,az=accel(x,y,z,n)
vx=[a+b*0.5*dt for a,b in zip(vx,ax)]
vy=[a+b*0.5*dt for a,b in zip(vy,ay)]
vz=[a+b*0.5*dt for a,b in zip(vz,az)]
return (x,y,z,vx,vy,vz)
def init(npart):
x=[]
y=[]
z=[]
vx=[]
vy=[]
vz=[]
Rmin=25.0
e=0.6
a=float(Rmin/(2*(1-e)))
print a
Ra=a*(1+e)
#Ra=Rmin*(1+e)*((1-e)**(-1))
#print a,Ra
#VelMass=GM*((1+e)/(a-a*e))
#VelMass=VelMass**0.5
#vel=(GM*((2.0/Ra) - (1.0/a)))**0.5
vel = (GM*0.5*Rmin*((Ra*(Ra+Rmin))**(-1)))**(0.5)
#vel =0.5*(GM*(1-e)*(1-e)*((3+e)*Rmin)**(-1))**(0.5)
vx.append(0)
vy.append(vel)
vz.append(0)
vx.append(0)
vy.append(-vel)
vz.append(0)
x.append(-Ra)
y.append(0)
z.append(0)
x.append(Ra)
y.append(0)
z.append(0)
for i in range(0,11):
r=(0.2+0.05*i)*Rmin
velocity=((GM/r))**0.5
n=12+3*i
for j in range(0,n):
x.append(r*np.cos((2*np.pi*j)/n))
y.append(r*np.sin((2*np.pi*j)/n))
z.append(0.0)
vx.append(-velocity*np.sin((2*np.pi*j)/n))
vy.append(velocity*np.cos((2*np.pi*j)/n))
vz.append(0.0)
x[2:],y[2:],z[2:]=rotation(x[2:],y[2:],z[2:],1.0,0.0,0.0,60.0)
vx[2:],vy[2:],vz[2:]=rotation(vx[2:],vy[2:],vz[2:],1.0,0.0,0.0,60.0)
#vx[0],vy[0],vz[0]=rotation([vx[0]],[vy[0]],[vz[0]],0.0,1.0,0.0,60.0)
#x[2:],y[2:],z[2:]=rotation(x[2:],y[2:],z[2:],1.0,0.0,0.0,90.0)
#vx[2:],vy[2:],vz[2:]=rotation(vx[2:],vy[2:],vz[2:],1.0,0.0,0.0,90.0)
x[2:]=[p-Ra for p in x[2:]]
vy[2:]=[p+vel for p in vy[2:]]
for i in range(0,11):
#print i
r=(0.2+0.05*i)*Rmin
#print 0.2+0.05*i
velocity=((GM/r))**0.5
n=12+3*i
#print n
for j in range(0,n):
x.append(r*np.cos((2*np.pi*j)/n))
y.append(r*np.sin((2*np.pi*j)/n))
z.append(0.0)
vx.append(-velocity*np.sin((2*np.pi*j)/n))
vy.append(velocity*np.cos((2*np.pi*j)/n))
vz.append(0.0)
x[299:],y[299:],z[299:]=rotation(x[299:],y[299:],z[299:],1.0,0.0,0.0,15.0)
vx[299:],vy[299:],vz[299:]=rotation(vx[299:],vy[299:],vz[299:],1.0,0.0,0.0,15.0)
#x[299:],y[299:],z[299:]=rotation(x[299:],y[299:],z[299:],1.0,0.0,0.0,90.0)
#vx[299:],vy[299:],vz[299:]=rotation(vx[299:],vy[299:],vz[299:],1.0,0.0,0.0,90.0)
#vx[1],vy[1],vz[1]=rotation([vx[1]],[vy[1]],[vz[1]],1.0,0.0,0.0,60.0)
#x[299:],y[299:],z[299:]=rotation(x[299:],y[299:],z[299:],0.0,0.0,1.0,90.0)
#vx[299:],vy[299:],vz[299:]=rotation(vx[299:],vy[299:],vz[299:],0.0,0.0,1.0,90.0)
x[299:]=[p+Ra for p in x[299:]]
vy[299:]=[p-vel for p in vy[299:]]
#print len(x),len(y),len(z)
return (x,y,z,vx,vy,vz)
def main():
#n,t=(raw_input('>> N, T ').split())
t=(raw_input('>>T ').split())
#n=np.int64(n)
n=596
t=np.float128(t)
x,y,z,vx,vy,vz=init(n)
N=np.int64(math.ceil(t/1))
f=open('Data.csv','w')
with open('Data.csv', 'wb') as fp:
a = csv.writer(fp)
for i in range(0,N):
for j in range(0,n):
#print i,j
values=[]
values.append([x[j],y[j],z[j]])
a.writerows(values)
x,y,z,vx,vy,vz=LeapState(x,y,z,vx,vy,vz,n)
f.close()
main()
| [
"[email protected]"
]
| |
88fcaf6f65d7eace44bd670058f01549c220a470 | d066f1ef43678f93b516534ebe3674c7b09e309b | /PyThon学习笔记+代码/update_ini_config/Module/psutil/cpu1.py | 4d5a22ab551189a4d6578d738c4b5d2accc6903c | []
| no_license | lantian316/Stone_Study_Box | daaee53c63fc35c9a33353a885ba0a7b78202863 | d13c0338c74acfffc9b4627799249561d8dda822 | refs/heads/master | 2021-04-29T09:31:54.298740 | 2018-02-05T14:35:33 | 2018-02-05T14:35:33 | 77,659,704 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import psutil
print(psutil.cpu_times()) #显示cpu的整个信息
print(psutil.cpu_times().user)
print(psutil.cpu_count()) #获取cpu的逻辑个数(核心)
print(psutil.cpu_count( logical=False ))
print(psutil.swap_memory())
print(psutil.disk_io_counters())
print(psutil.disk_partitions())
print(psutil.disk_usage('/'))
print(psutil.disk_io_counters())
print(psutil.disk_io_counters(perdisk=True))
print()
print()
print()
print()
print()
print()
#print(psutil.pids())
p=psutil.Process(10024)
print(p.name())
print(p.exe()) #进程的bin路径
print(p.cwd()) #进程的工作目录绝对路径
print(p.status()) #进程状态
print(p.create_time()) #进程创建时间
#print(p.uids()) #进程uid信息
#print(p.gids()) #进程的gid信息
print(p.cpu_times()) #进程的cpu时间信息,包括user,system两个cpu信息
print(p.cpu_affinity()) #get进程cpu亲和度,如果要设置cpu亲和度,将cpu号作为参考就好
print(p.memory_percent()) #进程内存利用率
print(p.memory_info()) #进程内存rss,vms信息
print(p.io_counters()) #进程的IO信息,包括读写IO数字及参数
#print(p.connectios()) #返回进程列表
print(p.num_threads()) #进程开启的线程数 | [
"[email protected]"
]
| |
3c4fc4b4311666ae69e4ff6c2501b873ceba975e | 31a97987247fb9c23d19e7dcccd0ff0f27be94be | /bert-sklearn/bert_sklearn/model/utils.py | 14cefab33b2ce4a0ad8856a8e895d04d5ca8cdaa | [
"Apache-2.0"
]
| permissive | uphill-ai/NAS2019 | 31302027b46ca11b41c167aacc14dc5da1cc92ab | fe356878d7c007013002e86dd3c7a83333e01084 | refs/heads/master | 2022-12-23T14:54:51.211530 | 2022-04-06T21:14:16 | 2022-04-06T21:14:16 | 202,396,842 | 2 | 2 | Apache-2.0 | 2022-12-17T06:11:36 | 2019-08-14T17:32:03 | Jupyter Notebook | UTF-8 | Python | false | false | 3,702 | py | import torch
from .pytorch_pretrained import BertTokenizer, BasicTokenizer
from .pytorch_pretrained import PYTORCH_PRETRAINED_BERT_CACHE
from .model import BertPlusMLP
def get_basic_tokenizer(do_lower_case):
"""
Get a basic tokenizer(punctuation splitting, lower casing, etc.).
"""
return BasicTokenizer(do_lower_case=do_lower_case)
def get_tokenizer(bert_model='bert-base-uncased',
bert_vocab_file=None,
do_lower_case=False):
"""
Get a BERT wordpiece tokenizer.
Parameters
----------
bert_model : string
one of SUPPORTED_MODELS i.e 'bert-base-uncased','bert-large-uncased'
bert_vocab_file: string
Optional pathname to vocab file to initialize BERT tokenizer
do_lower_case : bool
use lower case with tokenizer
Returns
-------
tokenizer : BertTokenizer
Wordpiece tokenizer to use with BERT
"""
if bert_vocab_file is not None:
return BertTokenizer(bert_vocab_file, do_lower_case=do_lower_case)
else:
return BertTokenizer.from_pretrained(bert_model, do_lower_case=do_lower_case)
def get_model(bert_model='bert-base-uncased',
bert_config_json=None,
from_tf=False,
num_labels=2,
model_type='classifier',
num_mlp_layers=0,
num_mlp_hiddens=500,
state_dict=None,
local_rank=-1):
"""
Get a BertPlusMLP model.
Parameters
----------
bert_model : string
one of SUPPORTED_MODELS i.e 'bert-base-uncased','bert-large-uncased'
num_labels : int
For a classifier, this is the number of distinct classes.
For a regressor his will be 1.
model_type : string
specifies 'classifier' or 'regressor' model
num_mlp_layers : int
The number of mlp layers. If set to 0, then defualts
to the linear classifier/regresor as in the original Google code.
num_mlp_hiddens : int
The number of hidden neurons in each layer of the mlp.
state_dict : collections.OrderedDict object
an optional state dictionnary
local_rank : (int)
local_rank for distributed training on gpus
Returns
-------
model : BertPlusMLP
BERT model plus mlp head
"""
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE/'distributed_{}'.format(local_rank)
if bert_config_json is not None:
# load from a tf checkpoint file, pytorch checkpoint file,
# or a pytorch state dict
model = BertPlusMLP.from_model_ckpt(config_file_or_dict=bert_config_json,
weights_path=bert_model,
state_dict=state_dict,
from_tf=from_tf,
num_labels=num_labels,
model_type=model_type,
num_mlp_hiddens=num_mlp_hiddens,
num_mlp_layers=num_mlp_layers)
else:
# Load from pre-trained model archive
print("Loading %s model..."%(bert_model))
model = BertPlusMLP.from_pretrained(bert_model,
cache_dir=cache_dir,
state_dict=state_dict,
num_labels=num_labels,
model_type=model_type,
num_mlp_hiddens=num_mlp_hiddens,
num_mlp_layers=num_mlp_layers)
return model
| [
"[email protected]"
]
| |
e9be198289de62167eb43eb5900f051d45b63886 | 5eb1ab885494f7952a104bcc3756235c8116b3c5 | /Module_2_Challenge/Starter_Code/qualifier/tests/test_qualifier.py | c39d8f0300f891904302b71596b08199be2ef526 | [
"MIT"
]
| permissive | abolla04/Module_2_Challenge | 24ad33d6e8e086f925fd0a11092974b7e66b7109 | 1c583e83a9bfe936a6c2b9327031affdac044600 | refs/heads/main | 2023-05-27T10:46:09.953612 | 2021-06-14T04:51:16 | 2021-06-14T04:51:16 | 376,175,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | # Import pathlib
from pathlib import Path
#Import fileio
from qualifier.utils import fileio
# Import Calculators
from qualifier.utils import calculators
# Import Filters
from qualifier.filters import credit_score
from qualifier.filters import debt_to_income
from qualifier.filters import loan_to_value
from qualifier.filters import max_loan_size
def test_save_csv():
myfile = 'hello'
csv_path = Path('Starter_code/qualifier/tests/data/output/qualifying_loans.csv')
fileio.save_csv(csv_path, myfile)
assert csv_path.exists()
# Use Path from pathlib to output the test csv to ./data/output/qualifying_loans.csv
def test_calculate_monthly_debt_ratio():
assert calculators.calculate_monthly_debt_ratio(1500, 4000) == 0.375
def test_calculate_loan_to_value_ratio():
assert calculators.calculate_loan_to_value_ratio(210000, 250000) == 0.84
#def test_filters():
bank_data = fileio.load_csv(Path('./data/daily_rate_sheet.csv'))
current_credit_score = 750
debt = 1500
income = 4000
loan = 210000
home_value = 250000
monthly_debt_ratio = 0.375
loan_to_value_ratio = 0.84
# @TODO: Test the new save_csv code!
| [
"[email protected]"
]
| |
d56decb5438d33d14d13d2c472bbc9a6720fb931 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_21210.py | 9693c1c30eb08a3645f37e8fdea9224e4e16f558 | []
| no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | # Django: Custom User form instead of built in + custom required fields
help_text=_("Enter the same password as above, for verification."))
| [
"[email protected]"
]
| |
4817b10d73911622da1ebaca2741e011f56ae9f2 | 149bffb7c3fc7e4dc38da892b7f6fb588408b0e3 | /2017/solutions/assignment1/cs231n/classifiers/k_nearest_neighbor.py | 9169ff640474f9c68d4f545ac0ab13f4d7b14890 | []
| no_license | Luvata/CS231N-2017-2018-Solutions | e4ef936d8f7f08e661e7005d90cb37f0c2dd3645 | 4a53cf9131b1822c3312a3ea105c8c88c33b3950 | refs/heads/master | 2020-03-19T04:23:07.485586 | 2018-06-18T20:06:51 | 2018-06-18T20:06:51 | 135,822,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,140 | py | import numpy as np
from past.builtins import xrange
class KNearestNeighbor(object):
""" a kNN classifier with L2 distance """
def __init__(self):
pass
def train(self, X, y):
"""
Train the classifier. For k-nearest neighbors this is just
memorizing the training data.
Inputs:
- X: A numpy array of shape (num_train, D) containing the training data
consisting of num_train samples each of dimension D.
- y: A numpy array of shape (N,) containing the training labels, where
y[i] is the label for X[i].
"""
self.X_train = X
self.y_train = y
def predict(self, X, k=1, num_loops=0):
"""
Predict labels for test data using this classifier.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data consisting
of num_test samples each of dimension D.
- k: The number of nearest neighbors that vote for the predicted labels.
- num_loops: Determines which implementation to use to compute distances
between training points and testing points.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
if num_loops == 0:
dists = self.compute_distances_no_loops(X)
elif num_loops == 1:
dists = self.compute_distances_one_loop(X)
elif num_loops == 2:
dists = self.compute_distances_two_loops(X)
else:
raise ValueError('Invalid value %d for num_loops' % num_loops)
return self.predict_labels(dists, k=k)
def compute_distances_two_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a nested loop over both the training data and the
test data.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data.
Returns:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
is the Euclidean distance between the ith test point and the jth training
point.
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in xrange(num_test):
for j in xrange(num_train):
#####################################################################
# TODO: #
# Compute the l2 distance between the ith test point and the jth #
# training point, and store the result in dists[i, j]. You should #
# not use a loop over dimension. #
#####################################################################
dists[i,j] = (np.sqrt(np.sum((X[i] - self.X_train[j])**2)))
pass
#####################################################################
# END OF YOUR CODE #
#####################################################################
return dists
def compute_distances_one_loop(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a single loop over the test data.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in xrange(num_test):
#######################################################################
# TODO: #
# Compute the l2 distance between the ith test point and all training #
# points, and store the result in dists[i, :]. #
#######################################################################
dists[i] = np.sqrt(np.sum((X[i] - self.X_train)**2,axis=1))
pass
#######################################################################
# END OF YOUR CODE #
#######################################################################
return dists
def compute_distances_no_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using no explicit loops.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
#########################################################################
# TODO: #
# Compute the l2 distance between all test points and all training #
# points without using any explicit loops, and store the result in #
# dists. #
# #
# You should implement this function using only basic array operations; #
# in particular you should not use functions from scipy. #
# #
# HINT: Try to formulate the l2 distance using matrix multiplication #
# and two broadcast sums. #
#########################################################################
dists = np.sqrt(-2 * np.dot(X,self.X_train.T) + (X**2).sum(axis=1,keepdims=True)+ (self.X_train**2).sum(axis=1))
pass
#########################################################################
# END OF YOUR CODE #
#########################################################################
return dists
def predict_labels(self, dists, k=1):
"""
Given a matrix of distances between test points and training points,
predict a label for each test point.
Inputs:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
gives the distance betwen the ith test point and the jth training point.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
d2 = np.argsort(dists)
for i in xrange(num_test):
# A list of length k storing the labels of the k nearest neighbors to
# the ith test point.
closest_y = []
#########################################################################
# TODO: #
# Use the distance matrix to find the k nearest neighbors of the ith #
# testing point, and use self.y_train to find the labels of these #
# neighbors. Store these labels in closest_y. #
# Hint: Look up the function numpy.argsort. #
#########################################################################
closest_y = self.y_train[d2[i,:k]]
pass
#########################################################################
# TODO: #
# Now that you have found the labels of the k nearest neighbors, you #
# need to find the most common label in the list closest_y of labels. #
# Store this label in y_pred[i]. Break ties by choosing the smaller #
# label. #
#########################################################################
uniques, count = np.unique(closest_y,return_counts=True)
y_pred[i] = uniques[count==np.max(count)][0]
pass
#########################################################################
# END OF YOUR CODE #
#########################################################################
return y_pred
| [
"[email protected]"
]
| |
35c73627210b1a7f1a5074fa2d0e88abcd642a6c | bfa99342c8ae0a411e75ada24dd43017a4624d5e | /PushMessageTest/pushmessage.py | e014ddeaa25cd52e5729ac10b5622022a9fcbefd | []
| no_license | dumbcat/ROM-line-bot | 03969f79e0ecba3b0837e4a891c36b2a036506c7 | 89547b84f9ba132767cb7174dba50022d10201f6 | refs/heads/master | 2022-11-24T16:24:27.040068 | 2020-07-26T11:49:17 | 2020-07-26T11:49:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,385 | py | import json
import requests
import configparser
def sender(token, id, message):
"""Use line message api to push message to line user, group, or chat room.
Args:
token (string): Channel access token of message api.
id (string): Line ser, group, or chat room id.
message (string): A message which wants to push.
Returns:
tuple: A tuple include response code and message.
"""
url = 'https://api.line.me/v2/bot/message/push'
header = {'Content-Type': 'application/json',
'Authorization': token}
data = json.dumps({
"to": id,
"messages": [
{
"type": "text",
"text": message
}]
})
r = requests.post(url, headers=header, data=data)
if r.status_code == 200:
return r.status_code, 'Send Message Success'
else:
error_message = r.text.split(':', 1)[1].strip('}').strip('"')
return r.status_code, error_message
if __name__ == "__main__":
config = configparser.ConfigParser()
config.read('config.ini')
channel_access_token = config.get('BASE', 'token')
uesr_id = config.get('BASE', 'id')
message = input('Please input message to send: ')
code, message = sender(channel_access_token, uesr_id, message)
print(f'Response Code: {code}')
print(f'Response Message: {message}')
| [
"[email protected]"
]
| |
d05028c308d892473fe48052245b721f6dc4a996 | ac10e9ebc0de81dcfd3090b59991400eb338b9fb | /main.py | 026cbeecf6e8401e43713f9ccb03c36bb121ad65 | []
| no_license | mack-book-review/CarGame | 78bcc309667ba07356b302165a9b1bd8becfcaef | 2e34e3528cfaff31ab6ec641e6962a9f65ab4d22 | refs/heads/master | 2023-08-30T09:49:37.609406 | 2021-11-04T02:56:11 | 2021-11-04T02:56:11 | 424,453,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | from game import Game
def print_hi(name):
# Use a breakpoint in the code line below to debug your script.
print(f'Hi, {name}') # Press ⌘F8 to toggle the breakpoint.
def main():
game = Game()
game.run()
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
main()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| [
"[email protected]"
]
| |
2ba8d89941a2e6ce76eb48b159481e5ed6b30d8a | d81501a3e150ce2c63a755988fb7da10af1a7d91 | /code/predictors/axis.py | a4643d65cf1d177b35a7122fd4381a9e5fdd9052 | [
"MIT"
]
| permissive | alexmbird/luckyhorse | b6a1c64b7368b081037b097bf95ec4f6be44ceea | 815502c06117c22456d20a5064a20e95bce4470d | refs/heads/master | 2021-10-13T11:24:19.528610 | 2021-10-12T08:47:54 | 2021-10-12T08:47:54 | 77,913,644 | 4 | 0 | MIT | 2021-10-12T08:47:20 | 2017-01-03T12:05:09 | Python | UTF-8 | Python | false | false | 6,607 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from collections import deque, defaultdict, namedtuple
import random
from operator import itemgetter
import numpy as np
from concurrent.futures import ThreadPoolExecutor
from predictors.container import PredictionContainer
from predictors.coefficient import BaseCoefficient
from predictors.preds._base import PredictorBase
from utils.time import HORSEYTIEM
from utils.exceptions import InsufficientDataError
from predictors.factory import PredictorFactory
class Axis(PredictionContainer):
'''
Manage a coefficient by maintaining multiple versions of a predictor using
different values. Mutate those values to converge on the a good value.
Coefficients can never truly converge: the market's behaviour changes over time
so we must mutate constantly in case a new, more appropriate value emerges.
'''
# Use N parallel threads from top-level of an axis to do prediction & judging
NUM_THREADS = 4
def __init__(self, pfakt, klass, coefficients=None, target_coef=None, level=0):
'''
Setup a new axis.
`klass` - The Predictor class to work with.
`pfakt` - PredictorFactory to create Predictors with appropriate sources
`coefficients` - dict of coefs bound so far. We will add `mutate` to this for
our children.
`target_coef` - Name of the coefficient we will mutate. If not supplied a
random one will be selected.
'''
super(Axis,self).__init__()
if not isinstance(pfakt, PredictorFactory):
raise TypeError("pfakt needs to be an PredictorFactory, not %s" % type(pfakt))
self.pfakt = pfakt
if not issubclass(klass, PredictorBase):
raise TypeError("klass must descend from PredictorBase, not %s" % type(klass))
if coefficients is not None:
if set(coefficients.keys()) != set(klass.COEFFICIENTS.keys()):
raise ValueError("Supplied coefficients don't match Predictor")
self.coefficients = coefficients
if len(self._unbound()) == 0:
raise ValueError("All coefficients bound; nothing left to mutate")
else:
self.coefficients = dict.fromkeys(klass.COEFFICIENTS.keys(), None)
# Starting? pick a random coefficient to mutate. If none are left to mutate
# use 'None' to signal we'll create real Predictors as our children.
if target_coef is None:
self.target_coef = random.choice(self._unbound())
else:
self.target_coef = target_coef
if self.coefficients[self.target_coef] is not None:
raise ValueError("%s already bound, cannot mutate" % mutate)
# Top-level Axes may use threading to parallelize
# predictions & judgement.
self.level = level
if self.level == 0:
self.threadpool_ex = ThreadPoolExecutor(self.NUM_THREADS)
self.klass = klass
self.target_desc = klass.COEFFICIENTS[self.target_coef]
# Will our children be real Prediction* objects?
self.last_node = len(self._unbound()) == 1
# Store the present value of our mutable coefficient assigned to each child
self.coef_values = {}
# Populate ourself with children based on sensible values
for value in self.target_desc.seed():
ch = self._instantiate(value)
self.children.append(ch)
self.coef_values[ch] = value
def __str__(self):
fmt = "<%s mutating '%s', statics: %s>"
statics = filter(itemgetter(1), self.coefficients.items())
return fmt % (
self.__class__.__name__,
self.target_coef,
', '.join(["%s:%s" % (k,v) for k,v in statics]) if statics else 'none'
)
def dump(self, indent=0, last=True):
'''
Pretty-print a tree of containers and predictors
'''
for c in self.children:
fmt = "%s avnw:%s value:%s"
wavg = ('%.3f'%self.wrongness_avg[c]) if c in self.wrongness_avg else '-'
print( (' '*indent) + fmt % (c, wavg, self.coef_values[c]) )
if isinstance(c, PredictionContainer):
c.dump(indent+2, last=False)
if last:
print()
def _instantiate(self, value):
'''
Instantiate a child node. This will be either:
1) If no more coefficients are left to bind, a real Predictor obj OR
2) An Axis() object
'''
my_coefficients = self.coefficients.copy()
my_coefficients[self.target_coef] = value
if self.last_node:
return self.pfakt.create(self.klass, my_coefficients)
else:
next_mutate = sorted(self._unbound(my_coefficients))[0]
return Axis(
self.pfakt, self.klass,
my_coefficients, next_mutate,
level=self.level+1
)
def _unbound(self, c=None):
'''
Return a list of coefficients yet to be bound
'''
if c is None:
c = self.coefficients
return [k for k, v in c.items() if v is None]
def _bound(self, c=None):
'''
Return a dict of coefficients that are already bound
'''
if c is None:
c = self.coefficients
return {k:v for k,v in c.items() if v is not None}
def _mutantval(self, child):
'''
Get a child's
'''
def mutate(self, coefficients=None):
'''
Mutate our mutable value by:
1) Replacing the weakest performer with a new value halfway between the best
2) Replacing the next two weakest performer's mutable values with XXX
New coefficients are passed into children as a dict with the `coefficients`
parameter.
'''
# Dict to convey changes to our children
if coefficients is None:
coefficients = {}
def mutatechild(ch, coefs, new_val=None):
coefs = coefs.copy()
if new_val is not None:
coefs[self.target_coef] = new_val
del self.coef_values[ch]
self.coef_values[ch] = new_val
self.wrongness_avg.pop(ch,None)
self.wrongness_hist.pop(ch,None)
ch.mutate(coefs)
# Only mutate our children if:
# 1) We are mutating on a mutable coef type - not all are
# 2) all have history to be judged by
if self.target_desc.IS_MUTABLE:
if len(self.wrongness_avg) == len(self.children):
# triplets of (child, value, avg normalized wrongness)
cands = [(c,self.wrongness_avg[c]) for c in self.children]
cands.sort(key=itemgetter(1))
# Create a child between the two best scorers
v0 = self.coef_values[cands[0][0]]
v1 = self.coef_values[cands[1][0]]
new_val = v1 + ((v0-v1)/2) if v0 > v1 else v0 + ((v1-v0)/2)
mutatechild(cands.pop()[0], coefficients, new_val)
# print("mutate() - finding point between v0:%s and v1:%s" % (v0,v1))
# Replace two worst performing with random values
for i in range(0,2):
mutatechild(cands.pop()[0], coefficients, self.target_desc.random())
# Finally - if other values have changed, relay them to the children
if len(coefficients):
for c in self.children:
mutatechild(c, coefficients)
| [
"[email protected]"
]
| |
ba9bbd7d6df3b5dc24a42b58d5db54048471ec38 | 2eec9b249e7243277b59695499c779e5509c7d68 | /alldata/migrations/0009_announcement_announcementnotification_answerchoice_assignment_assignmentsubmission_coursepagemodule_.py | d578e2170496d6fcfaf6bb97e2e38e5cc5ab0b56 | [
"MIT"
]
| permissive | askhatkenenbay/Learning_Management_System_Backend | 04e7121cad7c46f32742d9274b0fce2cf562cd3b | 3e241a9f401623eec1c705ee5a38ad595b899c3b | refs/heads/master | 2023-04-07T10:59:12.431974 | 2021-04-18T06:47:13 | 2021-04-18T06:47:13 | 299,650,943 | 0 | 0 | null | 2021-02-25T08:40:46 | 2020-09-29T14:58:12 | Python | UTF-8 | Python | false | false | 9,322 | py | # Generated by Django 3.1.2 on 2020-10-12 14:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('alldata', '0008_advice_coursegrades_courseinstructor_studentenrollment'),
]
operations = [
migrations.CreateModel(
name='Coursepagemodule',
fields=[
('moduleid', models.AutoField(db_column='moduleID', primary_key=True, serialize=False)),
('title', models.CharField(max_length=45)),
('order', models.IntegerField()),
('coursesection_sectionid', models.ForeignKey(db_column='courseSection_sectionID', on_delete=django.db.models.deletion.CASCADE, to='alldata.coursesection')),
],
options={
'db_table': 'coursePageModule',
'unique_together': {('moduleid', 'coursesection_sectionid')},
},
),
migrations.CreateModel(
name='Discussion',
fields=[
('discussionid', models.AutoField(db_column='discussionID', primary_key=True, serialize=False)),
('title', models.CharField(max_length=45)),
('coursepagemodule_moduleid', models.ForeignKey(db_column='coursePageModule_moduleID', on_delete=django.db.models.deletion.CASCADE, to='alldata.coursepagemodule')),
],
options={
'db_table': 'discussion',
'unique_together': {('discussionid', 'coursepagemodule_moduleid')},
},
),
migrations.CreateModel(
name='File',
fields=[
('fileid', models.AutoField(db_column='fileID', primary_key=True, serialize=False)),
('placeid', models.CharField(db_column='placeID', max_length=45)),
('type', models.CharField(max_length=45)),
('url', models.CharField(max_length=45)),
],
options={
'db_table': 'file',
},
),
migrations.CreateModel(
name='Quiz',
fields=[
('quizid', models.AutoField(db_column='quizID', primary_key=True, serialize=False)),
('name', models.CharField(max_length=45)),
('description', models.CharField(max_length=45)),
('open_time', models.TimeField()),
('close_time', models.TimeField()),
('time_limit', models.TimeField()),
('max_poit', models.IntegerField()),
('coursepagemodule_moduleid', models.ForeignKey(db_column='coursePageModule_moduleID', on_delete=django.db.models.deletion.CASCADE, to='alldata.coursepagemodule')),
],
options={
'db_table': 'quiz',
'unique_together': {('quizid', 'coursepagemodule_moduleid')},
},
),
migrations.CreateModel(
name='Quizquestion',
fields=[
('questionid', models.AutoField(db_column='questionID', primary_key=True, serialize=False)),
('text', models.CharField(max_length=45)),
('is_open', models.BooleanField(default=False)),
('points', models.IntegerField()),
('quiz_quizid', models.ForeignKey(db_column='quiz_quizID', on_delete=django.db.models.deletion.CASCADE, to='alldata.quiz')),
],
options={
'db_table': 'quizQuestion',
'unique_together': {('questionid', 'quiz_quizid')},
},
),
migrations.CreateModel(
name='Assignment',
fields=[
('assignmentid', models.AutoField(db_column='assignmentID', primary_key=True, serialize=False)),
('name', models.CharField(max_length=45)),
('description', models.CharField(max_length=45)),
('start_date', models.DateTimeField()),
('due_date', models.DateTimeField()),
('max_point', models.IntegerField()),
('coursepagemodule_moduleid', models.ForeignKey(db_column='coursePageModule_moduleID', on_delete=django.db.models.deletion.CASCADE, to='alldata.coursepagemodule')),
],
options={
'db_table': 'assignment',
'unique_together': {('assignmentid', 'coursepagemodule_moduleid')},
},
),
migrations.CreateModel(
name='Answerchoice',
fields=[
('answerid', models.AutoField(db_column='answerID', primary_key=True, serialize=False)),
('text', models.CharField(max_length=45)),
('is_right', models.BooleanField(default=False)),
('quizquestion_questionid', models.ForeignKey(db_column='quizQuestion_questionID', on_delete=django.db.models.deletion.CASCADE, to='alldata.quizquestion')),
],
options={
'db_table': 'answerChoice',
'unique_together': {('answerid', 'quizquestion_questionid')},
},
),
migrations.CreateModel(
name='AnnouncementNotification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('notify_object_id', models.IntegerField()),
('time', models.TimeField()),
('is_turned_on', models.BooleanField(default=False)),
('user_userid', models.ForeignKey(db_column='user_userID', on_delete=django.db.models.deletion.CASCADE, to='alldata.user')),
],
options={
'db_table': 'announcement_notification',
},
),
migrations.CreateModel(
name='Quizsubmission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answerchoice_answerid', models.OneToOneField(db_column='answerChoice_answerID', on_delete=django.db.models.deletion.CASCADE, to='alldata.answerchoice')),
('quiz_quizid', models.ForeignKey(db_column='quiz_quizID', on_delete=django.db.models.deletion.CASCADE, to='alldata.quiz')),
('quizquestion_questionid', models.OneToOneField(db_column='quizQuestion_questionID', on_delete=django.db.models.deletion.CASCADE, to='alldata.quizquestion')),
('student_studentid', models.ForeignKey(db_column='student_studentID', on_delete=django.db.models.deletion.CASCADE, to='alldata.student')),
],
options={
'db_table': 'quizSubmission',
'unique_together': {('student_studentid', 'quizquestion_questionid', 'quiz_quizid', 'answerchoice_answerid')},
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=45)),
('date', models.DateTimeField()),
('discussion_discussionid', models.ForeignKey(db_column='discussion_discussionID', on_delete=django.db.models.deletion.CASCADE, to='alldata.discussion')),
('user_userid', models.ForeignKey(db_column='user_userID', on_delete=django.db.models.deletion.CASCADE, to='alldata.user')),
],
options={
'db_table': 'post',
'unique_together': {('user_userid', 'discussion_discussionid')},
},
),
migrations.CreateModel(
name='Assignmentsubmission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(blank=True, null=True, unique=True)),
('points', models.IntegerField()),
('feedback', models.CharField(blank=True, max_length=45, null=True)),
('assignment_assignmentid', models.ForeignKey(db_column='assignment_assignmentID', on_delete=django.db.models.deletion.CASCADE, to='alldata.assignment')),
('student_studentid', models.ForeignKey(db_column='student_studentID', on_delete=django.db.models.deletion.CASCADE, to='alldata.student')),
],
options={
'db_table': 'assignmentSubmission',
'unique_together': {('assignment_assignmentid', 'student_studentid')},
},
),
migrations.CreateModel(
name='Announcement',
fields=[
('announcementid', models.AutoField(db_column='announcementID', primary_key=True, serialize=False)),
('text', models.CharField(max_length=45)),
('date', models.DateTimeField()),
('coursesection_sectionid', models.ForeignKey(db_column='courseSection_sectionID', on_delete=django.db.models.deletion.CASCADE, to='alldata.coursesection')),
],
options={
'db_table': 'announcement',
'unique_together': {('announcementid', 'coursesection_sectionid')},
},
),
]
| [
"[email protected]"
]
| |
3d898e9cd22c5c11329910a8c028e68b0b8622b6 | 0745860246fcf79ebb468d91ffee4c79ee3d4945 | /daily_odds/models.py | 65297cc30257db2fb2e2d7295c7d98439a15db88 | []
| no_license | Esschichu/socceranalyst | 7fd8aa486e8d6f6d1fb355be15960aa3d896dab9 | 45bd621fc1e65a0d8d7fd14ff404a1943063499e | refs/heads/master | 2023-03-18T03:11:14.703502 | 2018-05-03T19:38:52 | 2018-05-03T19:38:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,131 | py | from django.db import models
import datetime
from django.db import models
from django.utils import timezone
class DailyMain(models.Model):
slip_date = models.DateTimeField('slip_date', default=None)
slip_name = models.CharField(max_length=40, null=True, blank=True)
def name(self):
return self.outcome
def recent_slips(self):
now = timezone.now()
return now - datetime.timedelta(days=40) <= self.slip_date <= now
def showing_prev(self):
now = timezone.now()
yester = now - datetime.timedelta(1)
return now - datetime.timedelta(days=40) <= self.slip_date <= yester
class DailySub(models.Model):
daily_main = models.ForeignKey(DailyMain, on_delete=models.CASCADE, default=None)
country = models.CharField(max_length=30, default=None)
home_team = models.CharField(max_length=30)
away_team = models.CharField(max_length=30)
prediction = models.CharField(max_length=40)
outcome = models.CharField(max_length=20, null=True, blank=True)
match_date = models.DateTimeField('match date')
h2h_home = models.CharField(max_length=30, default=None)
h2h_away = models.CharField(max_length=30, default=None)
h2h_draw = models.CharField(max_length=30, default=None)
standings_home = models.CharField(max_length=30, default=None)
standings_away = models.CharField(max_length=30, default=None)
form_home = models.CharField(max_length=30, default=None)
form_away = models.CharField(max_length=30, default=None)
goals_home = models.IntegerField(default=None)
goals_away = models.IntegerField(default=None)
missing_players_home = models.CharField(max_length=300, default=None)
missing_players_away = models.CharField(max_length=300, default=None)
def __str__(self):
return self.home_team
def __str__(self):
return self.away_team
def __str__(self):
return self.prediction
def match_outcome(self):
return self.outcome
def recent_matches(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.match_date <= now
| [
"[email protected]"
]
| |
7c2ff7068c4a75e9c61c48204c463f7cc357e497 | 4cb33f37e1322b5e7d4a8c2dc1dcd9867b03dd42 | /tstDates/myCal.py | ff0fc6bacc9e917f91ec3c9243dbe0412af72994 | []
| no_license | gilgamesh7/learn-python | 8190b72fed33653a98d2380a001a574942ee4f8f | 9eaf85ec992be5bc6d545a443bcf6ecd37cacb15 | refs/heads/master | 2020-04-07T10:21:10.805884 | 2018-12-11T08:31:21 | 2018-12-11T08:31:21 | 158,283,568 | 0 | 0 | null | 2018-11-20T00:40:28 | 2018-11-19T20:08:45 | Python | UTF-8 | Python | false | false | 378 | py | import calendar
def tstCal():
myCal=calendar.TextCalendar(calendar.MONDAY)
fmtMyCal=myCal.formatmonth(2018, 11, 1, 1)
print(fmtMyCal)
for myMonth in range(1,13):
myCal=calendar.TextCalendar(calendar.MONDAY)
fmtMyCal=myCal.formatmonth(2018,myMonth,1,1)
print(fmtMyCal)
if __name__ == '__main__':
tstCal()
exit(1) | [
"[email protected]"
]
| |
fcb3525b6b449bfe3b6d056191b980d8dad95733 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03044/s603271997.py | 359e1984b099d65a4e94c67dda0f5a26eb748584 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | from collections import deque
n = int(input())
uvw = [list(map(int, input().split())) for _ in range(n-1)]
l = [[] for _ in range(n)]
for u, v, w in uvw:
u, v = u-1, v-1
l[u].append((v, w))
l[v].append((u, w))
ans = [0] * n
parents = [-1] * n
q = deque([0])
while q:
a = q.pop()
for i, j in l[a]:
if i == parents[a]:
continue
parents[i] = a
q.append(i)
ans[i] = ans[a] if j%2 == 0 else (ans[a]+1) % 2
for i in ans:
print(i) | [
"[email protected]"
]
| |
d9cd792f95700d7261bd95f404dea35e3f406bb2 | dec643e844179268a889d47172a52fd35f8a853c | /data_prep.py | beef8ae17aae0dfdd339e10a3788336601f22d86 | [
"MIT"
]
| permissive | harpreet153/Simple_Neural_Network | a26058642543d90495316057b98c0f34f6ca17c6 | ab59c19f09bd15158867cdab55243a94065b9402 | refs/heads/master | 2020-04-07T18:47:16.176234 | 2018-11-28T20:44:42 | 2018-11-28T20:44:42 | 158,623,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py |
# coding: utf-8
import numpy as np
import pandas as pd
admissions = pd.read_csv('student_data.csv')
# Make dummy variables for rank
data = pd.concat([admissions, pd.get_dummies(admissions['rank'], prefix='rank')], axis=1)
data = data.drop('rank', axis=1)
# Standarize features
for field in ['gre', 'gpa']:
mean, std = data[field].mean(), data[field].std()
data.loc[:,field] = (data[field]-mean)/std
# Split off random 10% of the data for testing
np.random.seed(21)
sample = np.random.choice(data.index, size=int(len(data)*0.9), replace=False)
data, test_data = data.ix[sample], data.drop(sample)
# Split into features and targets
features, targets = data.drop('admit', axis=1), data['admit']
features_test, targets_test = test_data.drop('admit', axis=1), test_data['admit']
| [
"[email protected]"
]
| |
80c8c3d3ff1516f508803938c89e42a30ca9e1dc | ad670c6a90f7ee300460934691879810914d38f6 | /backend/collectors/ucas/data.py | b78a2233ead5102567ad9a8673854e5fff07e5f1 | []
| no_license | kennydude/comparethatuni | a767bfb7d786b9e5601b2e1d2597b2a491dda7b7 | 16cef01500e91904204b9033a88087898aa21896 | refs/heads/master | 2016-08-05T12:21:50.205702 | 2012-11-10T18:37:04 | 2012-11-10T18:37:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,697 | py | data = {}
# Auto:
data["BSC"] = "Bachelor of Science"
data["BA"] = "Bachelor of Arts"
data["BACC"] = "Bachelor of Accounting"
data["BAE"] = "Bachelor of Arts and Economics"
data["BARCH"] = "Bachelor of Architecture"
data["BASC"] = "Bachelor of Applied Science"
data["BAS"] = "Bachelor of Applied Science"
data["BAPPSC"] = "Bachelor of Applied Science"
data["BBA"] = "Bachelor of Businesss Administration"
data["BCJ"] = "Bachelor of Crinimal Justice"
data["BCL"] = "Bachelor of Civil Law"
data["BCOUN"] = "Bachelor of Counseling"
data["BD"] = "Bachelor of Divinity"
data["BDES"] = "Bachelor of Design"
data["BECON"] = "Bachelor of Economics"
data["BECON&FIN"] = "Bachelor of Economics and Finance"
data["BENG"] = "Bachelor of Engineering"
data["BE"] = "Bachelor of Engineering"
data["BFIN"] = "Bachelor of Finance"
data["BFA"] = "Bachelor of Fine Art"
data["BHSC"] = "Bachelor of Health Science"
data["BLITT"] = "Bachelor of Literature"
data["LITTB"] = "Bachelor of Literature"
data["BMID"] = "Bachelor of Midwifery"
data["BMIN"] = "Bachelor of Ministry"
data["BNURS"] = "Bachelor of Nursing"
data["BN"] = "Bachelor of Nursing"
data["BPHARM"] = "Bachelor of Pharmacy"
data["BPHYS"] = "Bachelor of Physics"
data["BPHIL"] = "Bachelor of Philosophy"
data["BSC(PSYCH)"] = "Bachelor of Science in Psychology"
data["BSC(ECON)"] = "Bachelor of Science in Economics"
data["BSC(ENG)"] = "Bachelor of Science in Engineering"
data["BED"] = "Bachelor of Education"
data["EDB"] = "Bachelor of Education"
data["BDS"] = "Bachelor of Dental Surgery"
data["BCHD"] = "Bachelor of Dental Surgery"
data["BMUS"] = "Bachelor of Music"
data["BMUSB"] = "Bachelor of Music"
data["BMEDSC"] = "Bachelor of Biomedical Science"
data["BMSC"] = "Bachelor of Biomedical Science"
data["MBBS"] = "Bachelor of Medicine and Bachelor of Surgery"
data["MBCHB"] = "Bachelor of Medicine and Bachelor of Surgery"
data["BSCECON"] = "Bachelor of Economic and Social Studies"
data["BSCEC"] = "Bachelor of Economic and Social Studies"
data["BSOCSC"] = "Bachelor of Social Science"
data["BTCHG"] = "Bachelor of Teaching"
data["BTH"] = "Bachelor of Theology"
data["BTHEOL"] = "Bachelor of Teology"
data["THB"] = "Bachelor of Theology"
data["BTECH"] = "Bachelor of Technology"
data["MB"] = "Bachelor of Medicine"
data["BM"] = "Bachelor of Medicine"
data["BS"] = "Bachelor of Surgery"
data["CHB"] = "Bachelor of Surgery"
data["BCHIR"] = "Bachelor of Surgery"
data["BCH"] = "Bachelor of Surgery"
data["BVETMED"] = "Bachelor of Veterinary Medicine and Surgery"
data["VETMB"] = "Bachelor of Veterinary Medicine and Surgery"
data["BVMS"] = "Bachelor of Veterinary Medicine and Surgery"
data["BVM"] = "Bachelor of Veterinary Medicine and Surgery"
data["BVS"] = "Bachelor of Veterinary Medicine and Surgery"
data["BVMEDSC"] = "Bachelor of Veterinary Medical Science"
data["BVSC"] = "Bachelor of Veterinary Medical Science"
data["LLB"] = "Bachelor of Laws"
data["MA"] = "Master in Arts"
data["MACC"] = "Master in Accountancy"
data["MBIOCHEM"] = "Master in Biochemistry"
data["MBIOL"] = "Master in Biology"
data["MDIV"] = "Master in Divinity"
data["MDES"] = "Master in Design"
data["MEARTHSCI"] = "Master in Earth Science"
data["MESCI"] = "Master in Earth Science"
data["MENVSC"] = "Master in Environmental Science"
data["MGEOG"] = "Master in Geography"
data["MEOL"] = "Master in Geology"
data["MGEOPHYS"] = "Master in Geophysics"
data["MINF"] = "Master in Informatics"
data["MMATHCOMP"] = "Master of Computational Mathematics"
data["MMORSE"] = "Master in Mathematics, Operational Research, Statistics and Economics"
data["MNATSC"] = "Master in Natural Science"
data["MNURSSC"] = "Master in Nursing Science"
data["MOCEAN"] = "Master in Oceanography"
data["MPHRAM"] = "Master in Phramacy"
data["MPLAN"] = "Master in Planning"
data["MSC"] = "Master in Science"
data["MSTAT"] = "Masters in Statistics"
data["MTHEOL"] = "Master in Theology"
data["MCHEM"] = "Master in Chemistry"
data["MENG"] = "Master in Engineering"
data["MENV"] = "Master in Environmental Studies"
data["MCOMP"] = "Master in Computing"
data["MSCI"] = "Master in Science"
data["MMATH"] = "Master in Mathematics"
data["MLAW"] = "Master in Laws"
data["MMATHSTAT"] = "Master of Mathematics and Statistics"
data["FYR"] = "Foundation Year"
data["HND"] = "Higher National Diploma"
data["DIPHE"] = "Diploma of Higher Education"
data["FDSC"] = "Foundation Degree of Science"
data["FDA"] = "Foundation Degree of Art"
data["CERTHE"] = "Certificate of Higher Education"
data["DBA"] = "Doctor of Business Administration"
data["DCLINPSYCH"] = "Doctor of Clinical Psychology"
data["DDS"] = "Doctor of Dental Surgery"
data["DNURSSC"] = "Doctor of Nursing Science"
data["DPROF"] = "Doctor of Professional Studies"
data["EDPSYCHD"] = "Doctor of Educational Psychology"
data["DEDPSY"] = "Doctor of Educational Psychology"
data["HSCD"] = "Doctor of Health Science"
data["DHSC"] = "Doctor of Health Science"
data["MD"] = "Doctor of Medicine"
data["DM"] = "Doctor of Medicine"
data["DPT"] = "Doctor of Practical Theology"
data["EDD"] = "Doctor of Education"
data["DED"] = "Doctor of Education"
data["DMUS"] = "Doctor of Music"
data["MUSD"] = "Doctor of Music"
data["DMIN"] = "Doctor of Ministry"
data["PHD"] = "Doctor of Philosphy"
data["DPHIL"] = "Doctor of Philosophy"
data["SOCSCD"] = "Doctor of Social Science"
data["THD"] = "Doctor of Theology"
data["DD"] = "Doctor of Divinity"
data["DCL"] = "Doctor of Civil Law"
data["LLD"] = "Doctor of Laws"
data["DLITT"] = "Doctor of Letters"
data["LITTD"] = "Doctor of Letters"
data["DLIT"] = "Doctor of Literature"
data["DSC"] = "Doctor of Science"
data["SCD"] = "Doctor of Science"
data["ENGD"] = "Doctor of Engineering"
data["DUNIV"] = "Doctor of the University"
| [
"[email protected]"
]
| |
f84eea71e90ff7d2d7a6dddbf57e751ba15463fc | bec09fcb7126390f1738c4990fa08b95d0b42e95 | /CrawlerXingzhengquhuaFileTest.py | dad38273a93a3ce0e64970ef4b460c3a2cf898b1 | []
| no_license | javasqlbug/pythonProject | 5bb65807872a65c14c531027007fb3ed10c36119 | e6eb38ec7ef6e08beec343177e77f01952494746 | refs/heads/master | 2022-12-23T19:44:21.680262 | 2020-09-16T13:08:07 | 2020-09-16T13:08:07 | 295,891,536 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,136 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#参考自http://c.biancheng.net/view/2011.html
import requests #导入requests包
from bs4 import BeautifulSoup
import re
import time
file_name = 'D:\\test\\xingzhengquhua\\xingzhengquhua.txt'
url='https://xingzhengquhua.51240.com/'
strhtml=requests.get(url)
soup=BeautifulSoup(strhtml.text,'lxml')
data = soup.select('#main_content > table > tr > td > table > tr')
print(data)
for item in data[2:-1]:
time.sleep(0.1)
result={
'title': re.findall('\D+', item.get_text()),
'ID': re.findall('\d+', item.get_text())
}
print(result)
with open(file_name, 'a') as file_obj:
#file_obj.write(str(result.get('ID')) + ',' + str(result.get('title')) + '\n')
#file_obj.write(",".join(result) + '\n')
file_obj.write(str(result['ID'][0]) + ',' + str(result['title'][0]) + ',,1' + '\n')
#file_obj.write('\r\n')
# 市级
url = 'https://xingzhengquhua.51240.com/' + str(re.findall('\d+', item.get_text())[0]) + '__xingzhengquhua/'
print(url)
strhtml = requests.get(url)
soup = BeautifulSoup(strhtml.text, 'lxml')
data = soup.select('#main_content > table > tr > td > table > tr')
print(data[3:])
for item in data[3:]:
time.sleep(0.1)
result = {
'title': re.findall('\D+', item.get_text()),
'ID': re.findall('\d+', item.get_text())
}
print(result)
with open(file_name, 'a') as file_obj:
file_obj.write(str(result['ID'][0]) + ',' + str(result['title'][0]) + ',,2' + '\n')
# 区级
url = 'https://xingzhengquhua.51240.com/' + str(re.findall('\d+', item.get_text())[0]) + '__xingzhengquhua/'
print(url)
strhtml = requests.get(url)
soup = BeautifulSoup(strhtml.text, 'lxml')
data = soup.select('#main_content > table > tr > td > table > tr')
print(data[3:])
for item in data[3:]:
time.sleep(0.1)
result = {
'title': re.findall('\D+', item.get_text()),
'ID': re.findall('\d+', item.get_text())
}
print(result)
with open(file_name, 'a') as file_obj:
file_obj.write(str(result['ID'][0]) + ',' + str(result['title'][0]) + ',,3' + '\n')
# 街道级
url = 'https://xingzhengquhua.51240.com/' + str(re.findall('\d+', item.get_text())[0]) + '__xingzhengquhua/'
print(url)
strhtml = requests.get(url)
soup = BeautifulSoup(strhtml.text, 'lxml')
data = soup.select('#main_content > table > tr > td > table > tr')
print(data[3:])
for item in data[3:]:
time.sleep(0.1)
result = {
'title': re.findall('\D+', item.get_text()),
'ID': re.findall('\d+', item.get_text())
}
print(result)
with open(file_name, 'a') as file_obj:
file_obj.write(str(result['ID'][0]) + ',' + str(result['title'][0]) + ',,4' + '\n')
# 居委会级
url = 'https://xingzhengquhua.51240.com/' + str(
re.findall('\d+', item.get_text())[0]) + '__xingzhengquhua/'
print(url)
strhtml = requests.get(url)
soup = BeautifulSoup(strhtml.text, 'lxml')
data = soup.select('#main_content > table > tr > td > table > tr')
print(data[3:])
for item in data[3:]:
time.sleep(0.1)
result = {
'title': re.findall('\D+', item.get_text()),
'ID': str(re.findall('\d+', item.get_text()))[2:14],
'type': str(re.findall('\d+', item.get_text()))[14:17]
}
print(result)
with open(file_name, 'a') as file_obj:
file_obj.write(result['ID'] + ',' + str(result['title'][0]) + ',' + result['type'] + ',5' + '\n')
print('程序执行结束') | [
"[email protected]"
]
| |
030e8876934a3a45463110d92ce353acf7e3978b | 564f887b3e4e81568e0088d3f4915d424213fee4 | /RE.CNN/code/cnn.py | 71ec84df1332b8c3ddcb01c2c6b9969fa1d54ca9 | []
| no_license | Minzhe/KerasDeepLearning | 9153eb836a80456c2397342236f5099c9826bb6b | 0ec0b6c0641e0f9f8a0bebc28af745de6b665b45 | refs/heads/master | 2020-03-21T09:11:02.651379 | 2018-06-30T08:36:11 | 2018-06-30T08:36:11 | 138,386,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,183 | py | ##########################################################################################################
### CNN.py ###
##########################################################################################################
# https://github.com/UKPLab/deeplearning4nlp-tutorial
"""
This is a CNN for relation classification within a sentence. The architecture is based on:
Daojian Zeng, Kang Liu, Siwei Lai, Guangyou Zhou and Jun Zhao, 2014, Relation Classification via Convolutional Deep Neural Network
Performance (without hyperparameter optimization):
Accuracy: 0.7943
Macro-Averaged F1 (without Other relation): 0.7612
Performance Zeng et al.
Macro-Averaged F1 (without Other relation): 0.789
Code was tested with:
- Python 2.7 & Python 3.6
- Theano 0.9.0 & TensorFlow 1.2.1
- Keras 2.0.5
"""
from __future__ import print_function
import numpy as np
import gzip
import sys
import pickle as pkl
import keras
from keras.models import Model
from keras.layers import Input, Dense, Dropout, Activation, Flatten, concatenate
from keras.layers import Embedding
from keras.layers import Convolution1D, MaxPooling1D, GlobalMaxPooling1D
from keras.regularizers import Regularizer
from keras.preprocessing import sequence
np.random.seed(1337) # for reproducibility
################## function #################
def getPrecision(pred_test, y_test, targetLabel):
# Precision for non-vague
targetLabelCount = 0
correctTargetLabelCount = 0
for idx in range(len(pred_test)):
if pred_test[idx] == targetLabel:
targetLabelCount += 1
if pred_test[idx] == y_test[idx]:
correctTargetLabelCount += 1
if correctTargetLabelCount == 0:
return 0
return float(correctTargetLabelCount) / targetLabelCount
def predict_classes(prediction):
return prediction.argmax(axis=-1)
################## parameters #################
batch_size = 64
nb_filter = 100
filter_length = 3
hidden_dims = 100
nb_epoch = 100
pos_dims = 50
################## read data #################
print("Loading dataset ...")
f = gzip.open('data/sem-relations.pkl.gz', 'rb')
data = pkl.load(f)
f.close()
embeddings = data['wordEmbeddings']
y_train, sent_train, pos1_train, pos2_train = data['train_set']
y_test, sent_test, pos1_test, pos2_test = data['test_set']
max_pos = max(np.max(pos1_train), np.max(pos2_train)) + 1
n_out = max(y_train) + 1
# train_y_cat = np_utils.to_categorical(y_train, n_out)
max_sent_len = sent_train.shape[1]
print("Dimension sent_train: ", sent_train.shape)
print("Dimension pos1_train: ", pos1_train.shape)
print("Dimension y_train: ", y_train.shape)
print("Dimension sent_test: ", sent_test.shape)
print("Dimension pos1_test: ", pos1_test.shape)
print("Dimension y_test: ", y_test.shape)
print("Dimension Embeddings: ", embeddings.shape)
################## CNN model #################
# embedding layers
words_input = Input(shape=(max_sent_len,), dtype='int32', name='words_input')
words = Embedding(input_dim=embeddings.shape[0], output_dim=embeddings.shape[1], weights=[embeddings], trainable=False) (words_input)
dist1_input = Input(shape=(max_sent_len,), dtype='int32', name='dist1_input')
dist1 = Embedding(input_dim=max_pos, output_dim=pos_dims, trainable=True) (dist1_input)
dist2_input = Input(shape=(max_sent_len,), dtype='int32', name='dist2_input')
dist2 = Embedding(input_dim=max_pos, output_dim=pos_dims, trainable=True) (dist2_input)
output = concatenate([words, dist1, dist2])
# convolution layer
output = Convolution1D(filters=nb_filter, kernel_size=filter_length, padding='same', activation='tanh', strides=1)(output)
# we use standard max over time pooling
output = GlobalMaxPooling1D()(output)
output = Dropout(0.25)(output)
output = Dense(n_out, activation='softmax')(output)
model = Model(inputs=[words_input, dist1_input, dist2_input], outputs=[output])
model.compile(loss='sparse_categorical_crossentropy', optimizer='Adam', metrics=['accuracy'])
model.summary()
################## training #################
print("Start training ...")
max_prec, max_rec, max_acc, max_f1 = 0, 0, 0, 0
for epoch in range(nb_epoch):
model.fit([sent_train, pos1_train, pos2_train], y_train, batch_size=batch_size, verbose=2, epochs=1)
pred_test = predict_classes(model.predict([sent_test, pos1_test, pos2_test], verbose=0))
dctLabels = np.sum(pred_test)
totalDCTLabels = np.sum(y_test)
acc = np.sum(pred_test == y_test) / float(len(y_test))
max_acc = max(max_acc, acc)
print("Accuracy: %.4f (max: %.4f)" % (acc, max_acc))
f1Sum = 0
f1Count = 0
for targetLabel in range(1, max(y_test)):
prec = getPrecision(pred_test, y_test, targetLabel)
recall = getPrecision(y_test, pred_test, targetLabel)
f1 = 0 if (prec+recall) == 0 else 2*prec*recall/(prec+recall)
f1Sum += f1
f1Count +=1
macroF1 = f1Sum / float(f1Count)
max_f1 = max(max_f1, macroF1)
print("Non-other Macro-Averaged F1: %.4f (max: %.4f)\n" % (macroF1, max_f1)) | [
"[email protected]"
]
| |
5784709a950d180e39c1205f1d2e1a0f2b52130e | 9e55492c1ea09b518b778a54bf513bb5ec573dcb | /Tools/python/userConfig_TEMPLATE.py | f94e387db161972b09492ec24bf6cb832ee9afc0 | []
| no_license | prafullaiitm/LatinoAnalysis | 36f5d2e1c2d4dfb08709e74aeab17cfb7201d6ce | ff71f4821f000a9f31fecfe0a66547a0d5265c5a | refs/heads/master | 2021-01-19T05:00:14.110174 | 2017-04-04T17:19:11 | 2017-04-04T17:19:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | #!/usr/bin/env python
baseDir = '/afs/cern.ch/user/x/xjanssen/cms/HWW2015/'
jobDir = baseDir+'jobs/'
workDir = baseDir+'workspace/'
| [
"[email protected]"
]
| |
08a3b367ed240d7bf2c7506b76c8e53f5a8ade3b | 339ff1f2089a085641c6b76818927f6b711c4750 | /eic/identities.py | 92ee4da245df8961e9cffdce60ed58c8ccd65e2d | []
| no_license | Muterra/muse-experiments | b670c67a2726722695487f2f77dcdddd1a86f65a | 4ef87a6279d60d5d25493266b21d830050ac1c1a | refs/heads/master | 2020-05-24T15:31:10.949179 | 2015-07-27T15:54:56 | 2015-07-27T15:54:56 | 39,783,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,529 | py | '''
LICENSING
-------------------------------------------------
pyEIC: A python library for EIC manipulation.
Copyright (C) 2014-2015 Nicholas Badger
[email protected]
nickbadger.com
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
USA
------------------------------------------------------
'''
# Global dependencies that aren't here because I'm being lazy
import base64
import struct
from collections import deque
import abc
# This is a universal symmetric key for public identities. It is contained
# within the EIC spec in "bootstraps".
PUBLIC_ID_SYMKEY = bytes(32)
PUBLIC_ID_PRIVKEY = bytes(512)
PUBLIC_ID_PUBKEY = bytes(512)
class IdentityProvider(metaclass=abc.ABCMeta):
''' An abstract base class for a mechanism that keeps track of
identity requirements.
'''
def __init__(self, storage_providers):
''' Blahblahblah.
'''
# How to check any of these?
self._stores = storage_providers
@property
def storage_providers(self):
''' Read-only property returning the storage providers.
'''
return self._stores
@abc.abstractmethod
def fetch_pubkey(self, euid, cipher_suite):
''' Returns the pubkey associated with the given euid.
'''
pass
@abc.abstractmethod
def new_identity(self, pubkey):
''' Creates an identity from the pubkey, returning the euid.
'''
pass
class GenericIdentityProvider(IdentityProvider):
''' Implements an access provider solely tasked with unlocking
identities.
'''
def fetch_pubkey(self, euid):
''' Gets the public key from an euid at self's storage
providers.
'''
eics = EICs.fetch(euid, PUBLIC_ID_SYMKEY, self.storage_providers)
| [
"[email protected]"
]
| |
67f79154b5a77882ba2db7f43578b38243154c28 | 91c9f6c1c6933d21076b29fce93580a4cca63ab2 | /setup.py | c5100378629fbf20ed0894b594c98d8b44576c51 | [
"Apache-2.0"
]
| permissive | pansiyuan123/mmdetection-rocm | cb4ee8474978019963f3192e2bcdff5a46601d75 | 5fbfedd4ea73645750626d70ac8ed456759b6b4f | refs/heads/master | 2022-11-18T09:01:40.626726 | 2020-07-01T05:43:22 | 2020-07-01T05:43:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,160 | py | #!/usr/bin/env python
import os
import subprocess
import time
from setuptools import find_packages, setup
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmdet/version.py'
def get_git_hash():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
elif os.path.exists(version_file):
try:
from mmdet.version import __version__
sha = __version__.split('+')[-1]
except ImportError:
raise ImportError('Unable to get git version')
else:
sha = 'unknown'
return sha
def write_version_py():
content = """# GENERATED VERSION FILE
# TIME: {}
__version__ = '{}'
short_version = '{}'
version_info = ({})
"""
sha = get_hash()
with open('mmdet/VERSION', 'r') as f:
SHORT_VERSION = f.read().strip()
VERSION_INFO = ', '.join(SHORT_VERSION.split('.'))
VERSION = SHORT_VERSION + '+' + sha
version_file_str = content.format(time.asctime(), VERSION, SHORT_VERSION,
VERSION_INFO)
with open(version_file, 'w') as f:
f.write(version_file_str)
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def make_cuda_ext(name, module, sources, sources_cuda=[]):
define_macros = []
extra_compile_args = {'cxx': []}
is_rocm_pytorch = False
if torch.__version__ >= '1.5':
from torch.utils.cpp_extension import ROCM_HOME
is_rocm_pytorch = True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False
if (torch.cuda.is_available() or is_rocm_pytorch) or os.getenv('FORCE_CUDA', '0') == '1':
extension = CUDAExtension
sources += sources_cuda
if not is_rocm_pytorch:
define_macros += [('WITH_CUDA', None)]
extension = CUDAExtension
extra_compile_args['nvcc'] = [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
else:
define_macros += [('WITH_HIP', None)]
nvcc_flags = []
extra_compile_args = {
'cxx': [],
'nvcc': nvcc_flags,
}
else:
print(f'Compiling {name} without CUDA')
extension = CppExtension
# raise EnvironmentError('CUDA is required to compile MMDetection!')
return extension(
name=f'{module}.{name}',
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
def parse_requirements(fname='requirements.txt', with_version=True):
"""
Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import sys
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
"""
Parse information from a line in a requirements text file
"""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
if __name__ == '__main__':
write_version_py()
setup(
name='mmdet',
version=get_version(),
description='Open MMLab Detection Toolbox and Benchmark',
long_description=readme(),
author='OpenMMLab',
author_email='[email protected]',
keywords='computer vision, object detection',
url='https://github.com/open-mmlab/mmdetection',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
package_data={'mmdet.ops': ['*/*.so']},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
license='Apache License 2.0',
setup_requires=parse_requirements('requirements/build.txt'),
tests_require=parse_requirements('requirements/tests.txt'),
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'optional': parse_requirements('requirements/optional.txt'),
},
ext_modules=[
make_cuda_ext(
name='compiling_info',
module='mmdet.ops.utils',
sources=['src/compiling_info.cpp']),
make_cuda_ext(
name='nms_ext',
module='mmdet.ops.nms',
sources=['src/nms_ext.cpp', 'src/cpu/nms_cpu.cpp'],
sources_cuda=[
'src/hip/nms_cuda.cpp', 'src/hip/nms_kernel.hip'
]),
make_cuda_ext(
name='roi_align_ext',
module='mmdet.ops.roi_align',
sources=[
'src/roi_align_ext.cpp',
'src/cpu/roi_align_v2.cpp',
],
sources_cuda=[
'src/hip/roi_align_kernel.hip',
'src/hip/roi_align_kernel_v2.hip'
]),
make_cuda_ext(
name='roi_pool_ext',
module='mmdet.ops.roi_pool',
sources=['src/roi_pool_ext.cpp'],
sources_cuda=['src/hip/roi_pool_kernel.hip']),
make_cuda_ext(
name='deform_conv_ext',
module='mmdet.ops.dcn',
sources=['src/deform_conv_ext.cpp'],
sources_cuda=[
'src/hip/deform_conv_cuda.cpp',
'src/hip/deform_conv_hip_kernel.hip'
]),
make_cuda_ext(
name='deform_pool_ext',
module='mmdet.ops.dcn',
sources=['src/deform_pool_ext.cpp'],
sources_cuda=[
'src/hip/deform_pool_cuda.cpp',
'src/hip/deform_pool_hip_kernel.hip'
]),
make_cuda_ext(
name='sigmoid_focal_loss_ext',
module='mmdet.ops.sigmoid_focal_loss',
sources=['src/sigmoid_focal_loss_ext.cpp'],
sources_cuda=['src/hip/sigmoid_focal_loss_hip.hip']),
make_cuda_ext(
name='masked_conv2d_ext',
module='mmdet.ops.masked_conv',
sources=['src/masked_conv2d_ext.cpp'],
sources_cuda=[
'src/hip/masked_conv2d_cuda.cpp',
'src/hip/masked_conv2d_kernel.hip'
]),
make_cuda_ext(
name='carafe_ext',
module='mmdet.ops.carafe',
sources=['src/carafe_ext.cpp'],
sources_cuda=[
'src/hip/carafe_cuda.cpp',
'src/hip/carafe_hip_kernel.hip'
]),
make_cuda_ext(
name='carafe_naive_ext',
module='mmdet.ops.carafe',
sources=['src/carafe_naive_ext.cpp'],
sources_cuda=[
'src/hip/carafe_naive_cuda.cpp',
'src/hip/carafe_naive_hip_kernel.hip'
]),
#make_cuda_ext(
# name='corner_pool_ext',
# module='mmdet.ops.corner_pool',
# sources=['src/corner_pool.cpp']),
],
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
| [
"[email protected]"
]
| |
e2048391927256fc2515bf8c8bfb13dd16269986 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /week/case/large_day/way/bad_thing_and_person/eye.py | 9d986299d4e6b9d77f6fec2a2c89dd8ed8aab7a9 | []
| no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py |
#! /usr/bin/env python
def time_or_old_fact(str_arg):
life(str_arg)
print('small_company')
def life(str_arg):
print(str_arg)
if __name__ == '__main__':
time_or_old_fact('say_high_person_up_thing')
| [
"[email protected]"
]
| |
8315cc52bffb847852532e37dd45604b0dec1350 | d904ecb1cf65ffbd2bd0332b46ae3c90a9d96158 | /dzz/dz.py | 62bb5fcb4b8ca30dc8e048af5f60992d83359f27 | []
| no_license | EraSilv/day2 | 9e1b5573017a6c3e3a4afa3cc0d19de10fcc4d34 | a1528897b150cd856d72e7ab387a1dbb5e0189ee | refs/heads/master | 2023-06-04T00:09:18.992833 | 2021-06-28T11:45:36 | 2021-06-28T11:45:36 | 368,862,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | #a = 35
#b = int(input())
#print(b > a)
#rint(b < a)
#print(b == a)
#print(b != a)
#a = int(input("первое число:"))
#b = int(input("второе число:"))
#print(a + b)
#print(a * b)
#a = int(input( "ur first no:"))
#b = float(input("ur second no:"))
#c = int(input("ur third no:"))
#print(a == b == c)
location = input("region:")
age = input("age:")
print('')
print("Ur Region:" + location)
print("Age:" + age)
print('')
ponchiki = "Ohaiyooo! I am Erlan, and i live in "
ponchiki_2 = "I am "
print(ponchiki + '' + location)
print(ponchiki_2 + '' + age)
| [
"[email protected]"
]
| |
ba382696da0039d785ebfec6866480fa880b41ae | 38ae0a339102c9fa0c24ecac7901a0103f87c1fe | /Lib/site-packages/pip/_vendor/pep517/wrappers.py | fe5b787c9e9a772e68d4f31be10a41b8036235e0 | []
| no_license | Tanzin-Ul-Islam/Django_dynamic_filterform | 11cf897391abc56929b3e6d5156660312a65ef13 | 76eb92d65a8cab5a9b294f87c144e425227feee5 | refs/heads/main | 2023-02-23T02:38:51.562393 | 2021-01-26T20:36:20 | 2021-01-26T20:36:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,291 | py | import threading
from contextlib import contextmanager
import os
from os.path import dirname, abspath, join as pjoin
import shutil
from subprocess import check_call, check_output, STDOUT
import sys
from tempfile import mkdtemp
from . import compat
__all__ = [
'BackendUnavailable',
'BackendInvalid',
'HookMissing',
'UnsupportedOperation',
'default_subprocess_runner',
'quiet_subprocess_runner',
'Pep517HookCaller',
]
try:
import importlib.resources as resources
def _in_proc_script_path():
return resources.path(__package__, '_in_process.py')
except ImportError:
@contextmanager
def _in_proc_script_path():
yield pjoin(dirname(abspath(__file__)), '_in_process.py')
@contextmanager
def tempdir():
td = mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
class BackendUnavailable(Exception):
"""Will be raised if the backend cannot be imported in the hook process."""
def __init__(self, traceback):
self.traceback = traceback
class BackendInvalid(Exception):
"""Will be raised if the backend is invalid."""
def __init__(self, backend_name, backend_path, message):
self.backend_name = backend_name
self.backend_path = backend_path
self.message = message
class HookMissing(Exception):
"""Will be raised on missing hooks."""
def __init__(self, hook_name):
super(HookMissing, self).__init__(hook_name)
self.hook_name = hook_name
class UnsupportedOperation(Exception):
"""May be raised by build_sdist if the backend indicates that it can't."""
def __init__(self, traceback):
self.traceback = traceback
def default_subprocess_runner(cmd, cwd=None, extra_environ=None):
"""The default method of calling the wrapper subprocess."""
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
check_call(cmd, cwd=cwd, env=env)
def quiet_subprocess_runner(cmd, cwd=None, extra_environ=None):
"""A method of calling the wrapper subprocess while suppressing output."""
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
check_output(cmd, cwd=cwd, env=env, stderr=STDOUT)
def norm_and_check(source_tree, requested):
"""Normalise and check a backend path.
Ensure that the requested backend path is specified as a relative path,
and resolves to a location under the given source tree.
Return an absolute version of the requested path.
"""
if os.path.isabs(requested):
raise ValueError("paths must be relative")
abs_source = os.path.abspath(source_tree)
abs_requested = os.path.normpath(os.path.join(abs_source, requested))
# We have to use commonprefix for Python 2.7 compatibility. So we
# normalise case to avoid problems because commonprefix is a character
# based comparison :-(
norm_source = os.path.normcase(abs_source)
norm_requested = os.path.normcase(abs_requested)
if os.path.commonprefix([norm_source, norm_requested]) != norm_source:
raise ValueError("paths must be inside source tree")
return abs_requested
class Pep517HookCaller(object):
"""A wrapper around a source directory to be built with a PEP 517 backend.
:param source_dir: The path to the source directory, containing
pyproject.toml.
:param build_backend: The build backend spec, as per PEP 517, from
pyproject.toml.
:param backend_path: The backend path, as per PEP 517, from pyproject.toml.
:param runner: A callable that invokes the wrapper subprocess.
:param python_executable: The Python executable used to invoke the backend
The 'runner', if provided, must expect the following:
- cmd: a list of strings representing the commands and arguments to
execute, as would be passed to e.g. 'subprocess.check_call'.
- cwd: a string representing the working directory that must be
used for the subprocess. Corresponds to the provided source_dir.
- extra_environ: a dict mapping environment variable names to values
which must be set for the subprocess execution.
"""
def __init__(
self,
source_dir,
build_backend,
backend_path=None,
runner=None,
python_executable=None,
):
if runner is None:
runner = default_subprocess_runner
self.source_dir = abspath(source_dir)
self.build_backend = build_backend
if backend_path:
backend_path = [
norm_and_check(self.source_dir, p) for p in backend_path
]
self.backend_path = backend_path
self._subprocess_runner = runner
if not python_executable:
python_executable = sys.executable
self.python_executable = python_executable
@contextmanager
def subprocess_runner(self, runner):
"""A context manager for temporarily overriding the default subprocess
runner.
"""
prev = self._subprocess_runner
self._subprocess_runner = runner
try:
yield
finally:
self._subprocess_runner = prev
def get_requires_for_build_wheel(self, config_settings=None):
"""Identify packages required for building a wheel
Returns a list of dependency specifications, e.g.::
["wheel >= 0.25", "setuptools"]
This does not include requirements specified in pyproject.toml.
It returns the result of calling the equivalently named hook in a
subprocess.
"""
return self._call_hook('get_requires_for_build_wheel', {
'config_settings': config_settings
})
def prepare_metadata_for_build_wheel(
self, metadata_directory, config_settings=None,
_allow_fallback=True):
"""Prepare a ``*.dist-info`` folder with metadata for this project.
Returns the name of the newly created folder.
If the build backend defines a hook with this name, it will be called
in a subprocess. If not, the backend will be asked to build a wheel,
and the dist-info extracted from that (unless _allow_fallback is
False).
"""
return self._call_hook('prepare_metadata_for_build_wheel', {
'metadata_directory': abspath(metadata_directory),
'config_settings': config_settings,
'_allow_fallback': _allow_fallback,
})
def build_wheel(
self, wheel_directory, config_settings=None,
metadata_directory=None):
"""Build a wheel from this project.
Returns the name of the newly created file.
In general, this will call the 'build_wheel' hook in the backend.
However, if that was previously called by
'prepare_metadata_for_build_wheel', and the same metadata_directory is
used, the previously built wheel will be copied to wheel_directory.
"""
if metadata_directory is not None:
metadata_directory = abspath(metadata_directory)
return self._call_hook('build_wheel', {
'wheel_directory': abspath(wheel_directory),
'config_settings': config_settings,
'metadata_directory': metadata_directory,
})
def get_requires_for_build_sdist(self, config_settings=None):
"""Identify packages required for building a wheel
Returns a list of dependency specifications, e.g.::
["setuptools >= 26"]
This does not include requirements specified in pyproject.toml.
It returns the result of calling the equivalently named hook in a
subprocess.
"""
return self._call_hook('get_requires_for_build_sdist', {
'config_settings': config_settings
})
def build_sdist(self, sdist_directory, config_settings=None):
"""Build an sdist from this project.
Returns the name of the newly created file.
This calls the 'build_sdist' backend hook in a subprocess.
"""
return self._call_hook('build_sdist', {
'sdist_directory': abspath(sdist_directory),
'config_settings': config_settings,
})
def _call_hook(self, hook_name, kwargs):
# On Python 2, pytoml returns Unicode values (which is correct) but the
# environment passed to check_call needs to contain string values. We
# convert here by encoding using ASCII (the backend can only contain
# letters, digits and _, . and : characters, and will be used as a
# Python identifier, so non-ASCII content is wrong on Python 2 in
# any case).
# For backend_path, we use sys.getfilesystemencoding.
if sys.version_info[0] == 2:
build_backend = self.build_backend.encode('ASCII')
else:
build_backend = self.build_backend
extra_environ = {'PEP517_BUILD_BACKEND': build_backend}
if self.backend_path:
backend_path = os.pathsep.join(self.backend_path)
if sys.version_info[0] == 2:
backend_path = backend_path.encode(sys.getfilesystemencoding())
extra_environ['PEP517_BACKEND_PATH'] = backend_path
with tempdir() as td:
hook_input = {'kwargs': kwargs}
compat.write_json(hook_input, pjoin(td, 'input.json'),
indent=2)
# Run the hook in a subprocess
with _in_proc_script_path() as script:
python = self.python_executable
self._subprocess_runner(
[python, abspath(str(script)), hook_name, td],
cwd=self.source_dir,
extra_environ=extra_environ
)
data = compat.read_json(pjoin(td, 'output.json'))
if data.get('unsupported'):
raise UnsupportedOperation(data.get('traceback', ''))
if data.get('no_backend'):
raise BackendUnavailable(data.get('traceback', ''))
if data.get('backend_invalid'):
raise BackendInvalid(
backend_name=self.build_backend,
backend_path=self.backend_path,
message=data.get('backend_error', '')
)
if data.get('hook_missing'):
raise HookMissing(hook_name)
return data['return_val']
class LoggerWrapper(threading.Thread):
"""
Read messages from a pipe and redirect them
to a logger (see python's logging module).
"""
def __init__(self, logger, level):
threading.Thread.__init__(self)
self.daemon = True
self.logger = logger
self.level = level
# create the pipe and reader
self.fd_read, self.fd_write = os.pipe()
self.reader = os.fdopen(self.fd_read)
self.start()
def fileno(self):
return self.fd_write
@staticmethod
def remove_newline(msg):
return msg[:-1] if msg.endswith(os.linesep) else msg
def run(self):
for line in self.reader:
self._write(self.remove_newline(line))
def _write(self, message):
self.logger.log(self.level, message)
| [
"[email protected]"
]
| |
0b283bf769e85611ae53808e6e2aa56546029522 | 50a21aa11a8d55fdaf8de6eb3804fdc27c16ca24 | /failed examples/try1/server1.py | e0504e42eda80e2b911041649709f6c4559ea973 | []
| no_license | gideon59a/websock | 50624ed35107202c359ba4fc1f24ef5b20e3b5d8 | eac5875033f80b06fec1254edbd9b35a0978b779 | refs/heads/main | 2023-07-18T21:15:13.073419 | 2021-09-28T08:56:51 | 2021-09-28T08:56:51 | 411,205,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | # Ref (works ok): https://pythonprogramminglanguage.com/python-flask-websocket/
# NOTE: The server fails until I downgraded flask-socketio to 4.3.2 per https://github.com/miguelgrinberg/Flask-SocketIO/issues/1432
# C:\Python\Python39\Scripts\pip.exe install -Iv flask-socketio==4.3.2
from flask import Flask, render_template
from flask_socketio import SocketIO, emit
app = Flask(__name__)
socketio = SocketIO(app)
@app.route('/')
def index():
return render_template('index.html')
@socketio.on('connect')
def test_connect():
print("*** connect event ***")
emit('after connect', {'data': 'Lets dance'})
emit('after connect', {'data': 'sent again'})
emit('after connect', {'data': '3rd time'})
if __name__ == '__main__':
socketio.run(app, host='0.0.0.0', port=50001, debug=True)
| [
"[email protected]"
]
| |
1ea6b52092f1832a41e71c7d18ba2b6654e3d8c8 | 7c5d7347294afced072950c2b22cf8a12d54a3cc | /src/calibration.py | 8840f7488bbe1a530a7b11195ea4e2fe56fdea25 | []
| no_license | DinoHub-SubT/basestation_gui_python | 129b7d2cf90175b17462f7c36f0ebf684e5f8562 | e4cf13f4f0ab9811880981d8623c085b6509eda5 | refs/heads/master | 2023-08-29T11:16:28.108675 | 2019-12-21T23:23:03 | 2019-12-21T23:23:03 | 423,113,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,348 | py | from __future__ import print_function
import os
import math
import random
import datetime
import json
import re
import subprocess
import rospkg
import rospy
import robots
from qt_gui.plugin import Plugin
from python_qt_binding import loadUi, QtCore, QtGui, QtWidgets
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Quaternion, Point
class Robot(object):
"""
Robot provides a structure for the UI to communicate with its controller and has the
following fields:
- name
Copied from a robots.Robot.
- is_aerial
True if the robot is an aerial (drone) robot. Copied from a robots.Robot.
- points
Number of points saved so far.
- last_save
Records the datetime of when either the last successful point save operation has
been performed on the robot in the UI. This should be reset to the empty string
if the user resets the points.
- uuid
A random unique identifier that conforms to the UUID4 specification. This created
once and should never be changed. Copied from a robots.Robot.
- mean_error
The computed mean error after calibrating the transform matrix.
- transform:
A 4x4 matrix that can transform CMU's Subt coordinate frame to DARPA's coordinate
frame. The default is the identity matrix.
- quaternion:
The rotation portion of 'transform' represented in quaternion form.
- darpa_tf_pub:
ROS publisher of where to publish an Odometry message to the robot to utilize
the calibrated DARPA transform.
"""
def __init__(self, cfgRobot):
self.name = cfgRobot.name
self.is_aerial = cfgRobot.is_aerial
self.points = []
self.last_save = ""
self.uuid = cfgRobot.uuid
self.mean_error = 0
self.transform = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
self.quaternion = [0, 0, 0, 1]
self.darpa_tf_pub = None
def encode_to_json(self):
"""Encode this Robot object into a dictionary that is suitable for JSON encoding."""
d = dict()
d["name"] = self.name
d["uuid"] = self.uuid
d["is_aerial"] = self.is_aerial
d["points"] = self.points
d["last_save"] = self.last_save
d["mean_error"] = self.mean_error
d["transform"] = self.transform
d["quaternion"] = self.quaternion
return d
def decode_from_json(self, obj):
"""
Decode a dictionary object that was loaded from JSON and return a new Robot object.
Note that while all Robot properties are serialized to the JSON file they are not
deserialized back into Robot as these properties are already derived from the
configuration robots.Robot.
"""
self.points = obj["points"]
self.last_save = obj["last_save"]
self.mean_error = obj["mean_error"]
self.transform = obj["transform"]
self.quaternion = obj["quaternion"]
class Calibration(Plugin):
"""
Main entry point for the robot calibration plugin and acts as the controller
for the CalibrationView.
This plugin takes an overall different strategy then the old calibration workflow.
Previously the GUI could only support two robots and the user had to run the
calibration process in a separate terminal window. This is no longer necessary
as the this plugin performs such process on the users behalf while allowing an
arbitrary number of robots.
The support for any number of robots required a different strategy to interface
with calibration process. Now the two have been decoupled as this plugin maintains
its own set of data files under the basestation_gui_python/data/calibration path.
Inside this folder there will be a JSON file for each robot that is added through
the user interface. The name of the file is the UUID that was assigned to the
Robot object when it was created. The contents of this file is the Robot object
serialized into JSON form. Ideally, a database would be the right answer here
but adding such a dependency is overkill what is needed from the GUI.
With robots continously persisted into JSON files this also decouples this plugin
from any other plugin that needs the resulting data. For example, when a robot
has detected an artifact, the other plugin that is managing artifact images can
load the robot data to obtain its DARPA transformation matrix to apply to the
artifacts coordinate frame. In other words, any type of message passing between
plugins is unnecessary.
"""
def __init__(self, context):
super(Calibration, self).__init__(context)
self.setObjectName("Calibration")
self.last_pose_total = None
self.last_pose = dict()
def odometry_to_pose(odometry):
return [
odometry.pose.pose.position.x,
odometry.pose.pose.position.y,
odometry.pose.pose.position.z,
]
# Subscribe callbacks
def make_pose_callback(robot):
def on_pose(msg):
self.last_pose[robot.uuid] = odometry_to_pose(msg)
return on_pose
def on_pose_total(msg):
self.last_pose_total = odometry_to_pose(msg)
self.subs = [rospy.Subscriber("/position", Odometry, on_pose_total)]
cal_robots = []
root = self.robot_dir()
config = robots.Config()
for cfg in config.robots:
robot = Robot(cfg)
name = robot.uuid + ".json"
path = os.path.join(root, name)
if os.path.exists(path):
with open(path) as f:
robot.decode_from_json(json.load(f))
else:
with open(path, "w") as f:
json.dump(robot.encode_to_json(), f)
topic = "/{0}/{1}".format(cfg.topic_prefix, cfg.topics["darpa_tf"])
robot.darpa_tf_pub = rospy.Publisher(topic, Odometry, queue_size=10)
topic = "/{0}/{1}".format(cfg.topic_prefix, cfg.topics["calibration"])
sub = rospy.Subscriber(topic, Odometry, make_pose_callback(robot))
self.subs.append(sub)
cal_robots.append(robot)
self.view = CalibrationView(self, cal_robots)
context.add_widget(self.view)
def shutdown_plugin(self):
for s in self.subs:
s.unregister()
#################### private methods ####################
def add_pose(self, robot, pose, total):
robot.points.append([pose, total])
robot.last_save = str(datetime.datetime.now())
self.persist(robot)
return robot
def robot_dir(self):
"""Robot_dir returns the directory where Robot objects are archived."""
p = rospkg.RosPack().get_path("basestation_gui_python")
return os.path.join(p, "data", "calibration")
def robot_filename(self, robot):
"""Robot_filename returns the full file path of where a Robot object should be archived."""
return os.path.join(self.robot_dir(), robot.uuid + ".json")
def persist(self, robot):
"""Persist archives _robot_ as json to the path specificed by robot_filename."""
try:
fn = self.robot_filename(robot)
with open(fn, "w") as f:
json.dump(robot.encode_to_json(), f)
except Exception as e:
MB = qt.QMessageBox
MB.critical(
None,
"Basestation",
"Calibration persist error: {0}: ".format(e),
buttons=MB.Ok,
defaultButton=MB.Ok,
)
#################### CalibrationView interface methods ####################
def name_changed(self, robot):
self.persist(robot)
def on_save(self, robot):
have_pose = self.last_pose.has_key(robot.uuid)
have_total = self.last_pose_total != None
if have_pose and have_total:
p = self.last_pose.get(robot.uuid)
return self.add_pose(robot, p, self.last_pose_total)
elif have_pose:
return "Have not received total pose. No point saved."
else:
return "Have not received robot pose. No point saved."
def on_reset(self, robot):
robot.points = []
robot.last_save = ""
robot.transform = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
robot.quaternion = [0, 0, 0, 1]
qt = Quaternion(0, 0, 0, 1)
tf = Odometry()
tf.pose.pose.position.x = 0
tf.pose.pose.position.y = 0
tf.pose.pose.position.z = 0
tf.pose.pose.orientation = qt
self.persist(robot)
robot.darpa_tf_pub.publish(tf)
return robot
def on_calibrate(self, robot):
RPKG = rospkg.RosPack()
ECAL = "entrance_calib"
if ECAL not in RPKG.list():
m = "The ROS '{0}' package was not found in the package list. Ensure that it is installed and sourced."
return (True, m.format(ECAL))
if len(robot.points) < 2:
# This error was from reading the calibration code and we perform it here in
# order to get a fast and clear error message to the user. The drawback is
# that if someone modifies the calibration process and changes the number of
# points required for a calibration then this has to change as well. C'est la vie.
return (
True,
"Not enough points. Need 2 or more points for a calibration.",
)
# In a previous implementation the GUI program would record the data into the
# calibration's data folder on the go while keeping separate files for the ground and
# aerial vehicle. Here we choose a different strategy where we already have the
# necessary points cached in memory and persisted in our JSON files so we just
# dump the output to the same location where the calibration process expects to
# find the data. Note that we always dump to the exact output location regardless
# if the robot is a UGV or UAV as the calibration makes no differentiation in the
# matter. This keeps us from having to modify ROS parameters to tell the calibration
# which files to load and just its defaults listed in its launch file.
cal_data = os.path.join(RPKG.get_path("entrance_calib"), "data")
est_path = os.path.join(cal_data, "data_points.txt")
tot_path = os.path.join(cal_data, "reference_points.txt")
with open(est_path, "w") as est:
with open(tot_path, "w") as tot:
this = "{0}\n".format(len(robot.points))
est.write(this)
tot.write(this)
# Example of what robot.points looks like:
# [
# [ [1,2,3], [4,5,6] ],
# ...
# [ [1,2,3], [4,5,6] ],
# ]
for e, t in robot.points:
e_pt = "{0} {1} {2}\n".format(e[0], e[1], e[2])
t_pt = "{0} {1} {2}\n".format(t[0], t[1], t[2])
est.write(e_pt)
tot.write(t_pt)
# Call the actual calibration process. Previously this step had to be done manually
# as the user had to use both the terminal and GUI in conjuction with each other
# to perform the calibration and upload the transformed frame to DARPA.
out = ""
try:
out = subprocess.check_output(
["roslaunch", ECAL, ECAL + ".launch"], stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as e:
bad = str(e) + ":\n\n" + out
rospy.logerr(bad)
return (True, bad)
# This check is for the case when subprocess.CalledProcessError is not thrown.
# A test was made to have 'entrance_calib' exit with a code of one by having one
# of the files it was looking for not exist. When doing so the exception was
# thrown and it apppeared that ROS was 'swallowing' the exit code itself and
# returning a new code of zero.
if "exit code 1" in out:
rospy.logerr(out)
return (True, "'entrance_calib' has shutdown unexpectedly:\n\n" + out)
# At this point we have successfully run the calibration and are now able
# to proceed on transforming its results and save the DARPA transform.
# Extract the mean error from the process standard output.
match = re.search(
"Mean error:\s*([.\w]+)", out, flags=re.MULTILINE | re.IGNORECASE
)
if not match:
return (True, "No mean error detected in calibration output.")
mean_error = float(match.group(1)) # save for later in case of other errors
cal_path = os.path.join(cal_data, "calib.txt")
# Example of what's in calib.txt:
# 1 2 3
# 4 5 6
# 7 8 9
#
# 10
# 11
# 12
#
# The first three rows (items 1 thru 9) represent a 3x3 rotation matrix
# while elements 10, 11, and 12 represents the translation vector.
lines = []
with open(cal_path) as f:
lines = f.readlines()
if len(lines) < 7:
return (
True,
"Calibration output is missing necessary matrix data. Transform unchanged.",
)
lines = [line.strip() for line in lines] # strip newlines
# Note that lines[3] is the empty string.
r0 = map(float, lines[0].split(" "))
r1 = map(float, lines[1].split(" "))
r2 = map(float, lines[2].split(" "))
r0.append(float(lines[4]))
r1.append(float(lines[5]))
r2.append(float(lines[6]))
if len(r0) < 4 and len(r1) < 4 and len(r2) < 4:
return (
True,
"Calibration output is missing necessary data. Transform unchanged.",
)
robot.transform[0] = r0
robot.transform[1] = r1
robot.transform[2] = r2
robot.mean_error = mean_error
# The robot accepts transform as a combination of the translation
# vector and a quaternion for the rotation.
pt = Point(r0[3], r1[3], r2[3])
qw = math.sqrt(1.0 + r0[0] + r1[1] + r2[2]) / 2.0
qx = (r2[1] - r1[2]) / (4.0 * qw)
qy = (r0[2] - r2[0]) / (4.0 * qw)
qz = (r1[0] - r0[1]) / (4.0 * qw)
qt = Quaternion(qx, qy, qz, qw)
tf = Odometry()
robot.quaternion = [qx, qy, qz, qw]
tf.pose.pose.position = pt
tf.pose.pose.orientation = qt
self.persist(robot)
robot.darpa_tf_pub.publish(tf)
return (False, robot)
# The following constants represent the columns in the table of the calibration UI.
NAME_COL = 0
AERIAL_COL = 1
POINT_COL = 2
SAVE_COL = 3
RESET_COL = 4
TRANS_COL = 5
TIME_COL = 6
def confirm(title, message):
"""
Presents a simple message box to the asking to confirm the operation.
Returns QtWidgets.QMessageBox. Yes if the user confirms; otherwise, returns
QtWidgest.QMessageBox.Cancel.
"""
MB = QtWidgets.QMessageBox
return MB.warning(
None, title, message, buttons=MB.Yes | MB.Cancel, defaultButton=MB.Yes
)
class CalibrationView(QtWidgets.QWidget):
"""
Everything UI to calibrate a robot and transforms its coordinate frame to
be uploaded to DARPA. The controller, the second argument to the constructor
is expected to have the following interface:
- def name_changed(robot):
The user at any time may change the name of the robot in the table. When this
occurs an object of type Robot is passed to this function and the name property
of the passed in robot will be the new name.
- def on_save(robot):
Called whenever the user presses the save button for a particular robot. The
passed robot will be of type of Robot and the contoller is expected to add another
point to the robot's points list, update the last_save field, and return the updated
robot object. If an error occurs then a string indicating the error message should
be returned which will cause an informational dialog to be presented to the user.
- def on_reset(robot):
Whenever the user presses the save button its number of points in the Robot object is
incremented by one if the save was successful. The resetting operation resets eliminates
all previously accumulated points when the user accepts the confirmation. The object
of type Robot will be passed to this function that should reset the points field to the
empty list and return the updated robot object. Note that this method is also called
when the user checks or unchecks the 'Aerial' checkbox as it is expected that currently
accumulated points are not valid for a different type of robot.
- def on_calibrate(robot):
After collecting a number of points the user will want to finish the calibration and
save the transformed DARPA coordinate frame. They achieve this by clicking on the
'Calibrate' button on the UI which then calls this method. The return value should be
a tuple where the first element is a boolean that is True when an error has occurred
and the second element is a string that describes the error. If the first value of
the returned tuple is false then the second element should be the robot with its
updated transform frame.
The only 'public' method exposed is the constructor which expects the
controller and existing list of robots that may have been previously
persisted. That list is expected to have robots of type Robot and will be
used to initially populate the robot table.
"""
def __init__(self, controller, robots):
super(CalibrationView, self).__init__()
rp = rospkg.RosPack()
ui = os.path.join(
rp.get_path("basestation_gui_python"), "resources", "calibration.ui"
)
loadUi(ui, self, {})
self.controller = controller
self.setObjectName("CalibrationView")
self.robot_table.itemChanged[QtWidgets.QTableWidgetItem].connect(
self.name_changed
)
self.robot_table.setColumnWidth(AERIAL_COL, 60)
self.robot_table.setColumnWidth(POINT_COL, 60)
self.robot_table.setColumnWidth(SAVE_COL, 60)
self.robot_table.setColumnWidth(RESET_COL, 60)
self.robot_table.setColumnWidth(TRANS_COL, 85)
for r in robots:
self.add_robot(r)
def add_robot(self, robot):
tab = self.robot_table
row = tab.rowCount()
item = self.non_editable(robot.name)
item.robot = robot
points = self.non_editable(str(len(robot.points)))
time = self.non_editable(robot.last_save)
checkbox = QtWidgets.QCheckBox()
# The following are callbacks for the newly added UI controls. Since
# adding a row creates new widgets/buttons, we need new callbacks that
# refer to these widgets and the robot object in question as certain
# operations want to mutate the robot.
def update():
points.setText(str(len(item.robot.points)))
time.setText(item.robot.last_save)
def reset():
item.robot = self.controller.on_reset(item.robot)
update()
def on_save():
result = self.controller.on_save(item.robot)
if type(result) is str:
QtWidgets.QMessageBox.critical(None, "Saved Point Failed", result)
else:
item.robot = result
update()
def on_reset():
answer = confirm(
"Reset Robot", "Really reset " + item.robot.name + "'s points?"
)
if answer == QtWidgets.QMessageBox.Yes:
reset()
def on_calibrate():
(has_err, result) = self.controller.on_calibrate(item.robot)
if has_err:
QtWidgets.QMessageBox.critical(None, "Calibration Failed", result)
else:
MEAN_ERROR_THRESHOLD = 0.02
item.robot = result
mbox = QtWidgets.QMessageBox.information
msg = "Mean error: {0}\n\n".format(result.mean_error)
if result.mean_error > MEAN_ERROR_THRESHOLD:
mbox = QtWidgets.QMessageBox.warning
msg += "Error greater than {0}. Consider re-calibrating.\n\n".format(
MEAN_ERROR_THRESHOLD
)
M = result.transform
row = " {: 10.6f} {: 10.6f} {: 10.6f} {: 10.6f} \n"
msg += "Transform matrix:\n\n"
msg += row.format(M[0][0], M[0][1], M[0][2], M[0][3])
msg += row.format(M[1][0], M[1][1], M[1][2], M[1][3])
msg += row.format(M[2][0], M[2][1], M[2][2], M[2][3])
msg += row.format(M[3][0], M[3][1], M[3][2], M[3][3])
mbox(None, "Calibration Complete", msg)
checkbox.setChecked(robot.is_aerial)
checkbox.setEnabled(False)
tab.insertRow(row)
tab.setItem(row, NAME_COL, item)
tab.setItem(row, POINT_COL, points)
tab.setItem(row, TIME_COL, time)
tab.setCellWidget(row, AERIAL_COL, checkbox)
tab.setCellWidget(
row,
SAVE_COL,
self.make_btn("stock_save", "Save pose from station", on_save),
)
tab.setCellWidget(
row,
RESET_COL,
self.make_btn("stock_refresh", "Clear saved pose points", on_reset),
)
tab.setCellWidget(
row,
TRANS_COL,
self.make_btn("system", "Calibrate and save DARPA transform", on_calibrate),
)
def name_changed(self, item):
"""
Called whenever the user edits the name cell for a robot.
This callback handler can not be part 'add_robot' method due to the fact
that this handler has to be attached to the entire table itself and this
same handler in that method will cause it to be called multiple times
for each attached handler (each robot). To differentiate which robot
where are referring to the 'add_robot' will add the robot as a property
to itself which is accessed here. This handler will then only be set
in the constructor for this view.
"""
if item.column() == NAME_COL:
robot = item.robot
name = item.text()
if robot.name != name:
robot.name = name
self.controller.name_changed(robot)
def make_btn(self, theme, tip, callback):
b = QtWidgets.QToolButton()
b.setIcon(QtGui.QIcon.fromTheme(theme))
b.clicked[bool].connect(callback)
if tip is not "":
b.setToolTip(tip)
return b
def non_editable(self, what):
item = QtWidgets.QTableWidgetItem(what)
flags = item.flags()
item.setFlags(flags ^ QtCore.Qt.ItemIsEditable)
return item
| [
"[email protected]"
]
| |
85f7d9eaace33e3aa4d27ed43863e1f564756294 | e19704c2098066c58a45112f0978c84cb5b0abfb | /웹 크롤링 해본 것/2. StarBucks Address/starbucks.py | 1d1602055e77c5cb5dbe55da50b976ec88b86456 | []
| no_license | Aqudi/HTML-and-python-crawling | ab911a15350562ac28b7a633dfbcf6554a54af21 | 0249dad99f690737c37bc89c0a67a6f0bfb9a938 | refs/heads/master | 2021-09-27T19:52:47.429625 | 2018-11-11T04:18:55 | 2018-11-11T04:18:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,565 | py | import sys
import io
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
from bs4 import BeautifulSoup
import requests
import os
sys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument('--log-level=3')
#driver = webdriver.Chrome(chrome_options=chrome_options, executable_path=r'C:\Users\gjdigj145\PycharmProjects\programming project\Section3\webdriver\chromedriver.exe')
driver = webdriver.Chrome(r'C:\Users\gjdigj145\PycharmProjects\programming project\Section3\webdriver\chromedriver.exe')
s = requests.Session()
for i in range(1, 18):
driver.get('http://www.istarbucks.co.kr/store/store_map.do')
time.sleep(3)
driver.find_element_by_xpath('//*[@id="container"]/div/form/fieldset/div/section/article[1]/article/header[2]/h3/a').click()
time.sleep(2)
xpath = """//*[@id="container"]/div/form/fieldset/div/section/article[1]/article/article[2]/div[1]/div[2]/ul/li["""+ str(i) +''']/a'''
driver.find_element_by_xpath(xpath).click()
time.sleep(2)
driver.find_element_by_xpath('//*[@id="mCSB_2_container"]/ul/li[1]/a').click()
time.sleep(3)
#html저장하기
html = driver.page_source
fulfilename = os.path.join("C:/", "C:/"+str(i)+'.html')
with open(fulfilename, 'w', encoding='UTF8') as f:
f.write(html)
time.sleep(10)
driver.quit()
| [
"[email protected]"
]
| |
2b40ae5ae6fe610502c28aa2772491f48e6e5c52 | 6ad9d8ff71dd0261d91d04859bbf99e003177a79 | /learn/test.py | 4caec70226a305c26049a711a388d06eb592096d | []
| no_license | youzeliang/Crwal | 4901f6fb13017ec4b95813eb21b7da55765588b9 | ae1e4243466e4ebc066f81e131d712d26215f994 | refs/heads/master | 2021-09-17T21:09:45.636029 | 2018-07-05T13:42:01 | 2018-07-05T13:42:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | classmate = ['fdf','fff','aa']
classmate.append("admin")
classmate.insert(1,"d")
print (classmate[-2])
classmate.pop(1)
print (len(classmate))
age = 1
if age < 3:
print ("ff")
sum = 0
for x in [1,2,3,4,5,66,7]:
print(x)
print(list(range(7))) | [
"[email protected]"
]
| |
e2bd7f6056a49e659124e5499e35dcc66db14362 | 0ba5622abc2125ac8a9907757680da4d7cb7b47e | /Knowrob/indigo/indigo-knowrob-dev/catkin_ws/devel/lib/python2.7/dist-packages/scanning_table_msgs/msg/_scanning_tableResult.py | b244c5b9a487f7423cb2420f13c20b29c231dd57 | []
| no_license | IoT-Lab-Minden/docker-ros | c2a327117bdf1d0b861d4580156e595a079ec5d5 | 5c57c15717cbcae515d82c3dc75470587bb2508e | refs/heads/master | 2021-01-18T03:33:22.128596 | 2017-05-17T13:48:05 | 2017-05-17T13:48:05 | 85,809,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | /root/catkin_ws/devel/.private/scanning_table_msgs/lib/python2.7/dist-packages/scanning_table_msgs/msg/_scanning_tableResult.py | [
"[email protected]"
]
| |
52044bae579c7563300a029eaebfea9252bd78bd | 2ca8409e3e575c65754e54948ada9a00a9c02a7e | /Arrays/maxDiff.py | 3e951ef1852d667e8cc1578f9af7b0d3a2686603 | []
| no_license | wmaxlloyd/CodingQuestions | 0fc230e1861d5d33768d453c11c377221ae65a05 | 805e522f0e2077ffef36edb44b6b605fd727c3d8 | refs/heads/master | 2020-08-26T19:47:54.904406 | 2020-06-29T02:45:54 | 2020-06-29T02:45:54 | 217,126,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | # Given an array,find the maximum j – i such that arr[j] > arr[i]
inputArray = [100,200,8,4,6,3,6,9,8,3,5,3,6,3,5,7,8,5,0]
pointerDifference = len(inputArray) - 1
answer = None
while pointerDifference > 0:
p1 = 0
p2 = p1 + pointerDifference
while p2 < len(inputArray):
if inputArray[p2] > inputArray[p1]:
answer = (p1,p2)
break
p1 += 1
p2 += 1
if answer:
break
pointerDifference -= 1
print(answer)
| [
"[email protected]"
]
| |
e77f286f9171f97ede448b137bb6a79cd31e0429 | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/RUCKUS-ZD-EVENT-MIB.py | f63918726cf10f7df19477dc47046783d777c829 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 47,858 | py | #
# PySNMP MIB module RUCKUS-ZD-EVENT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/RUCKUS-ZD-EVENT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:59:13 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
ruckusEvents, = mibBuilder.importSymbols("RUCKUS-ROOT-MIB", "ruckusEvents")
ModuleCompliance, ObjectGroup, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "ObjectGroup", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, Counter64, IpAddress, NotificationType, iso, ObjectIdentity, ModuleIdentity, Unsigned32, Counter32, Gauge32, MibIdentifier, Bits, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "Counter64", "IpAddress", "NotificationType", "iso", "ObjectIdentity", "ModuleIdentity", "Unsigned32", "Counter32", "Gauge32", "MibIdentifier", "Bits", "Integer32")
MacAddress, TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "MacAddress", "TextualConvention", "DisplayString")
ruckusZDEventMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 25053, 2, 2))
if mibBuilder.loadTexts: ruckusZDEventMIB.setLastUpdated('201010150800Z')
if mibBuilder.loadTexts: ruckusZDEventMIB.setOrganization('Ruckus Wireless, Inc.')
if mibBuilder.loadTexts: ruckusZDEventMIB.setContactInfo('Ruckus Wireless Inc. Postal: 880 W Maude Ave Sunnyvale, CA 94085 USA EMail: [email protected] Phone: +1-650-265-4200')
if mibBuilder.loadTexts: ruckusZDEventMIB.setDescription('Ruckus ZD event objects, including trap OID and trap payload.')
ruckusZDEventTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1))
ruckusZDEventObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 2, 2, 2))
ruckusZDEventAPJoinTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 1)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"))
if mibBuilder.loadTexts: ruckusZDEventAPJoinTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventAPJoinTrap.setDescription("Trigger when there is a AP join event. The AP's MAC address is enclosed.")
ruckusZDEventSSIDSpoofTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 2)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSSID"))
if mibBuilder.loadTexts: ruckusZDEventSSIDSpoofTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventSSIDSpoofTrap.setDescription("Trigger when a SSID-spoofing rogue AP is detected. The rogue AP's MAC address and SSID are enclosed.")
ruckusZDEventMACSpoofTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 3)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSSID"))
if mibBuilder.loadTexts: ruckusZDEventMACSpoofTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventMACSpoofTrap.setDescription("Trigger when a MAC-spoofing rogue AP is detected. The rogue AP's MAC address and SSID are enclosed.")
ruckusZDEventRogueAPTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 4)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSSID"))
if mibBuilder.loadTexts: ruckusZDEventRogueAPTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventRogueAPTrap.setDescription("Trigger when a rogue AP is detected. The rogue AP's MAC address and SSID are enclosed.")
ruckusZDEventAPLostTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 5)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"))
if mibBuilder.loadTexts: ruckusZDEventAPLostTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventAPLostTrap.setDescription("Trigger when AP lost contact. The AP's MAC address is enclosed.")
ruckusZDEventAPLostHeartbeatTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 6)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"))
if mibBuilder.loadTexts: ruckusZDEventAPLostHeartbeatTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventAPLostHeartbeatTrap.setDescription("Trigger when AP lost heartbeats. The AP's MAC address is enclosed.")
ruckusZDEventClientAuthFailBlockTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 7)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventClientMacAddr"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSSID"))
if mibBuilder.loadTexts: ruckusZDEventClientAuthFailBlockTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventClientAuthFailBlockTrap.setDescription("Triggered when a client fails authentication too many times in a row. The client's MAC address, AP's MAC address and SSID are enclosed.")
ruckusZDEventAPResetTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 8)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"))
if mibBuilder.loadTexts: ruckusZDEventAPResetTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventAPResetTrap.setDescription('Trigger when AP reboots.')
ruckusZDEventIPChangeTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 9)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventIPChangeTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventIPChangeTrap.setDescription('Trigger when IP changes.')
ruckusZDEventSystemColdStartTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 10)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventSystemColdStartTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventSystemColdStartTrap.setDescription('Trigger when system performs cold start.')
ruckusZDEventAPChannelChangeTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 11)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"))
if mibBuilder.loadTexts: ruckusZDEventAPChannelChangeTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventAPChannelChangeTrap.setDescription('Trigger when AP channel changes.')
ruckusZDEventRadiusAuthUnavailableTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 12)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventRadiusAuthUnavailableTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventRadiusAuthUnavailableTrap.setDescription('Trigger when RADIUS authentication server unavailable.')
ruckusZDEventRadiusAcctUnavailableTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 13)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventRadiusAcctUnavailableTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventRadiusAcctUnavailableTrap.setDescription('Trigger when RADIUS accounting server unavailable.')
ruckusZDEventClientJoinFailAPBusyTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 14)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventClientJoinFailAPBusyTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventClientJoinFailAPBusyTrap.setDescription('Trigger when client joins fail because AP is busy.')
ruckusZDEventInterferenceADHoc = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 15)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventInterferenceADHoc.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventInterferenceADHoc.setDescription('Trigger when an interference AD hoc is detected.')
ruckusZDEventImageUpgradeFailTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 16)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventImageUpgradeFailTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventImageUpgradeFailTrap.setDescription('Trigger when AP image upgrade fails.')
ruckusZDEventHeartbeatTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 22)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventHeartbeatTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventHeartbeatTrap.setDescription('Trigger with trap heartbeat sent.')
ruckusZDEventAttackedTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 24)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventAttackedTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventAttackedTrap.setDescription('Trigger with a malicious attack is found.')
ruckusZDEventSystemWarmStartTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 25)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventSystemWarmStartTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventSystemWarmStartTrap.setDescription('Trigger when system performs warm start.')
ruckusZDEventInterfereAPTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 26)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventInterfereAPTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventInterfereAPTrap.setDescription('Trigger when a rogue AP used same channel with current AP is detected.')
ruckusZDEventAPSystemColdStartTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 31)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventAPSystemColdStartTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventAPSystemColdStartTrap.setDescription('Trigger when an AP performs cold start.')
ruckusZDEventAPSystemWarmStartTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 32)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventAPSystemWarmStartTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventAPSystemWarmStartTrap.setDescription('Trigger when an AP performs warm start.')
ruckusZDEventAPSSIDChangedTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 33)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventAPSSIDChangedTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventAPSSIDChangedTrap.setDescription('Trigger when an AP SSID changed.')
ruckusZDEventAPClientExceedValve = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 34)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"))
if mibBuilder.loadTexts: ruckusZDEventAPClientExceedValve.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventAPClientExceedValve.setDescription('Triggered when AP online client exceed valve.')
ruckusZDEventAPAvailableStatusTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 35)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventAPAvailableStatusTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventAPAvailableStatusTrap.setDescription('Trigger when AP is available.')
ruckusZDEventAPWirelessInterfaceFaultTrap = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 36)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventAPWirelessInterfaceFaultTrap.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventAPWirelessInterfaceFaultTrap.setDescription('Trigger when AP wireless interface is fault.')
ruckusZDEventSystemCpuUtilExceedValve = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 37)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventSystemCpuUtilExceedValve.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventSystemCpuUtilExceedValve.setDescription('Trigger when System CPU utilization is exceed valve.')
ruckusZDEventSystemMemUtilExceedValve = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 38)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventSystemMemUtilExceedValve.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventSystemMemUtilExceedValve.setDescription('Trigger when System memory utilization is exceed valve.')
ruckusZDEventSystemBandwidthUtilExceedValve = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 39)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventSystemBandwidthUtilExceedValve.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventSystemBandwidthUtilExceedValve.setDescription('Trigger when System bandwidth utilization is exceed valve.')
ruckusZDEventSystemDropPacketRateExceedValve = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 40)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventSystemDropPacketRateExceedValve.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventSystemDropPacketRateExceedValve.setDescription('Trigger when System drop packet rate is exceed valve.')
ruckusZDEventAPSyncTimeFail = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 41)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventAPSyncTimeFail.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventAPSyncTimeFail.setDescription('Trigger when AP sync clock failure with AC.')
ruckusZDEventSystemCpuUtilClrWarn = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 42)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventSystemCpuUtilClrWarn.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventSystemCpuUtilClrWarn.setDescription('Trigger when System CPU utilization is under the valve.')
ruckusZDEventSystemMemUtilClrwarn = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 43)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventSystemMemUtilClrwarn.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventSystemMemUtilClrwarn.setDescription('Trigger when System memory utilization is under the valve.')
ruckusZDEventClientJoin = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 60)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventClientMacAddr"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSSID"))
if mibBuilder.loadTexts: ruckusZDEventClientJoin.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventClientJoin.setDescription("Triggered when a client join a AP success. The client's MAC address, AP's MAC address and SSID are enclosed.")
ruckusZDEventClientJoinFailed = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 61)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventClientMacAddr"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSSID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventReason"))
if mibBuilder.loadTexts: ruckusZDEventClientJoinFailed.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventClientJoinFailed.setDescription("Triggered when a client join a AP failed. The client's MAC address, AP's MAC address and SSID are enclosed.")
ruckusZDEventClientJoinFailedAPBusy = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 62)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventClientMacAddr"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSSID"))
if mibBuilder.loadTexts: ruckusZDEventClientJoinFailedAPBusy.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventClientJoinFailedAPBusy.setDescription("Triggered when a client join a AP failed because of AP too busy. The client's MAC address, AP's MAC address and SSID are enclosed.")
ruckusZDEventClientDisconnect = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 63)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventClientMacAddr"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSSID"))
if mibBuilder.loadTexts: ruckusZDEventClientDisconnect.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventClientDisconnect.setDescription("Triggered when a client disconnect from AP. The client's MAC address, AP's MAC address and SSID are enclosed.")
ruckusZDEventClientRoamOut = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 64)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventClientMacAddr"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSSID"))
if mibBuilder.loadTexts: ruckusZDEventClientRoamOut.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventClientRoamOut.setDescription("Triggered when a client roam out from AP. The client's MAC address, AP's MAC address and SSID are enclosed.")
ruckusZDEventClientRoamIn = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 65)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventClientMacAddr"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSSID"))
if mibBuilder.loadTexts: ruckusZDEventClientRoamIn.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventClientRoamIn.setDescription("Triggered when a client roam in from AP. The client's MAC address, AP's MAC address and SSID are enclosed.")
ruckusZDEventClientAuthFailed = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 66)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventClientMacAddr"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSSID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventReason"))
if mibBuilder.loadTexts: ruckusZDEventClientAuthFailed.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventClientAuthFailed.setDescription("Triggered when a client authenticate failure . The client's MAC address, AP's MAC address and SSID are enclosed.Failure reason.")
ruckusZDEventClientAuthorizationFailed = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 67)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventClientMacAddr"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSSID"))
if mibBuilder.loadTexts: ruckusZDEventClientAuthorizationFailed.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventClientAuthorizationFailed.setDescription("Triggered when a client has no authorization to join a AP. The client's MAC address, AP's MAC address and SSID are enclosed.")
ruckusZDEventAPCPUvalve = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 83)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"))
if mibBuilder.loadTexts: ruckusZDEventAPCPUvalve.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventAPCPUvalve.setDescription('Triggered when AP cpu util exceed valve.')
ruckusZDEventAPMEMvalve = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 84)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"))
if mibBuilder.loadTexts: ruckusZDEventAPMEMvalve.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventAPMEMvalve.setDescription('Triggered when AP memory util exceed valve.')
ruckusZDEventAPCPUvalveClrwarn = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 85)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"))
if mibBuilder.loadTexts: ruckusZDEventAPCPUvalveClrwarn.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventAPCPUvalveClrwarn.setDescription('Trigger when AP cpu utilization is under the valve.')
ruckusZDEventAPMEMvalveClrwarn = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 86)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"))
if mibBuilder.loadTexts: ruckusZDEventAPMEMvalveClrwarn.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventAPMEMvalveClrwarn.setDescription('Trigger when AP memory utilization is under the valve.')
ruckusZDEventAPNumStaExceedValveClrwarn = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 87)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventAPMacAddr"))
if mibBuilder.loadTexts: ruckusZDEventAPNumStaExceedValveClrwarn.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventAPNumStaExceedValveClrwarn.setDescription('Trigger when online client clr warning.')
ruckusZDEventDhcpPoolFull = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 88)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventDhcpPoolFull.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventDhcpPoolFull.setDescription('Trigger when DHCP pool is full.')
ruckusZDEventDhcpPoolAbun = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 89)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSerial"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventNEID"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventSeverity"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventType"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTime"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventStatus"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventTitle"), ("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventContent"))
if mibBuilder.loadTexts: ruckusZDEventDhcpPoolAbun.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventDhcpPoolAbun.setDescription('Trigger when DHCP pool is abundant.')
ruckusZDEventSmartRedundancyChangetoActive = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 100)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventIpAddr"))
if mibBuilder.loadTexts: ruckusZDEventSmartRedundancyChangetoActive.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventSmartRedundancyChangetoActive.setDescription('[Smart Redundancy] Peer ZoneDirector peer ip not found, system changed to active state')
ruckusZDEventSmartRedundancyActiveConnected = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 101)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventIpAddr"))
if mibBuilder.loadTexts: ruckusZDEventSmartRedundancyActiveConnected.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventSmartRedundancyActiveConnected.setDescription('[Smart Redundancy] connected, system is in active state')
ruckusZDEventSmartRedundancyActiveDisconnected = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 102)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventIpAddr"))
if mibBuilder.loadTexts: ruckusZDEventSmartRedundancyActiveDisconnected.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventSmartRedundancyActiveDisconnected.setDescription('[Smart Redundancy] disconnected, system is in active state')
ruckusZDEventSmartRedundancyStandbyConnected = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 103)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventIpAddr"))
if mibBuilder.loadTexts: ruckusZDEventSmartRedundancyStandbyConnected.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventSmartRedundancyStandbyConnected.setDescription('[Smart Redundancy] connected, system is in standby state')
ruckusZDEventSmartRedundancyStandbyDisconnected = NotificationType((1, 3, 6, 1, 4, 1, 25053, 2, 2, 1, 104)).setObjects(("RUCKUS-ZD-EVENT-MIB", "ruckusZDEventIpAddr"))
if mibBuilder.loadTexts: ruckusZDEventSmartRedundancyStandbyDisconnected.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventSmartRedundancyStandbyDisconnected.setDescription('[Smart Redundancy] disconnected, system is in standby state')
ruckusZDEventSerial = MibScalar((1, 3, 6, 1, 4, 1, 25053, 2, 2, 2, 1), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ruckusZDEventSerial.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventSerial.setDescription('Trap serial number.')
ruckusZDEventNEID = MibScalar((1, 3, 6, 1, 4, 1, 25053, 2, 2, 2, 2), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ruckusZDEventNEID.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventNEID.setDescription('Network element ID.')
ruckusZDEventSeverity = MibScalar((1, 3, 6, 1, 4, 1, 25053, 2, 2, 2, 3), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ruckusZDEventSeverity.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventSeverity.setDescription('Severity level of the trap.')
ruckusZDEventType = MibScalar((1, 3, 6, 1, 4, 1, 25053, 2, 2, 2, 4), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ruckusZDEventType.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventType.setDescription('Trap type.')
ruckusZDEventTime = MibScalar((1, 3, 6, 1, 4, 1, 25053, 2, 2, 2, 5), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ruckusZDEventTime.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventTime.setDescription('Time when trap occured.')
ruckusZDEventStatus = MibScalar((1, 3, 6, 1, 4, 1, 25053, 2, 2, 2, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("raise", 1), ("clear", 2)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ruckusZDEventStatus.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventStatus.setDescription('Trap status.')
ruckusZDEventTitle = MibScalar((1, 3, 6, 1, 4, 1, 25053, 2, 2, 2, 7), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ruckusZDEventTitle.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventTitle.setDescription('Trap title.')
ruckusZDEventContent = MibScalar((1, 3, 6, 1, 4, 1, 25053, 2, 2, 2, 8), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ruckusZDEventContent.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventContent.setDescription('Trap content.')
ruckusZDEventClientMacAddr = MibScalar((1, 3, 6, 1, 4, 1, 25053, 2, 2, 2, 15), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ruckusZDEventClientMacAddr.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventClientMacAddr.setDescription("The client's MAC address.")
ruckusZDEventAPMacAddr = MibScalar((1, 3, 6, 1, 4, 1, 25053, 2, 2, 2, 18), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ruckusZDEventAPMacAddr.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventAPMacAddr.setDescription("The AP's MAC address.")
ruckusZDEventRogueMacAddr = MibScalar((1, 3, 6, 1, 4, 1, 25053, 2, 2, 2, 20), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ruckusZDEventRogueMacAddr.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventRogueMacAddr.setDescription("The rogue AP's MAC address.")
ruckusZDEventSSID = MibScalar((1, 3, 6, 1, 4, 1, 25053, 2, 2, 2, 23), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ruckusZDEventSSID.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventSSID.setDescription('SSID.')
ruckusZDEventChannel = MibScalar((1, 3, 6, 1, 4, 1, 25053, 2, 2, 2, 25), Unsigned32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ruckusZDEventChannel.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventChannel.setDescription('Channel.')
ruckusZDEventReason = MibScalar((1, 3, 6, 1, 4, 1, 25053, 2, 2, 2, 28), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ruckusZDEventReason.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventReason.setDescription('Failure Reason.')
ruckusZDEventIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 25053, 2, 2, 2, 30), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: ruckusZDEventIpAddr.setStatus('current')
if mibBuilder.loadTexts: ruckusZDEventIpAddr.setDescription('IpAddress (ipv4 and ipv6).')
mibBuilder.exportSymbols("RUCKUS-ZD-EVENT-MIB", ruckusZDEventClientRoamIn=ruckusZDEventClientRoamIn, ruckusZDEventSystemWarmStartTrap=ruckusZDEventSystemWarmStartTrap, PYSNMP_MODULE_ID=ruckusZDEventMIB, ruckusZDEventAPLostTrap=ruckusZDEventAPLostTrap, ruckusZDEventImageUpgradeFailTrap=ruckusZDEventImageUpgradeFailTrap, ruckusZDEventSmartRedundancyStandbyConnected=ruckusZDEventSmartRedundancyStandbyConnected, ruckusZDEventClientJoinFailed=ruckusZDEventClientJoinFailed, ruckusZDEventIPChangeTrap=ruckusZDEventIPChangeTrap, ruckusZDEventClientAuthorizationFailed=ruckusZDEventClientAuthorizationFailed, ruckusZDEventAPMEMvalveClrwarn=ruckusZDEventAPMEMvalveClrwarn, ruckusZDEventAPClientExceedValve=ruckusZDEventAPClientExceedValve, ruckusZDEventRadiusAuthUnavailableTrap=ruckusZDEventRadiusAuthUnavailableTrap, ruckusZDEventSystemMemUtilExceedValve=ruckusZDEventSystemMemUtilExceedValve, ruckusZDEventAPLostHeartbeatTrap=ruckusZDEventAPLostHeartbeatTrap, ruckusZDEventAPSSIDChangedTrap=ruckusZDEventAPSSIDChangedTrap, ruckusZDEventType=ruckusZDEventType, ruckusZDEventClientMacAddr=ruckusZDEventClientMacAddr, ruckusZDEventRogueAPTrap=ruckusZDEventRogueAPTrap, ruckusZDEventSeverity=ruckusZDEventSeverity, ruckusZDEventSSID=ruckusZDEventSSID, ruckusZDEventAPMEMvalve=ruckusZDEventAPMEMvalve, ruckusZDEventDhcpPoolAbun=ruckusZDEventDhcpPoolAbun, ruckusZDEventMIB=ruckusZDEventMIB, ruckusZDEventAPChannelChangeTrap=ruckusZDEventAPChannelChangeTrap, ruckusZDEventAPSyncTimeFail=ruckusZDEventAPSyncTimeFail, ruckusZDEventStatus=ruckusZDEventStatus, ruckusZDEventSystemCpuUtilExceedValve=ruckusZDEventSystemCpuUtilExceedValve, ruckusZDEventAPMacAddr=ruckusZDEventAPMacAddr, ruckusZDEventDhcpPoolFull=ruckusZDEventDhcpPoolFull, ruckusZDEventSerial=ruckusZDEventSerial, ruckusZDEventClientJoinFailAPBusyTrap=ruckusZDEventClientJoinFailAPBusyTrap, ruckusZDEventIpAddr=ruckusZDEventIpAddr, ruckusZDEventAPJoinTrap=ruckusZDEventAPJoinTrap, ruckusZDEventTraps=ruckusZDEventTraps, ruckusZDEventTitle=ruckusZDEventTitle, ruckusZDEventClientDisconnect=ruckusZDEventClientDisconnect, ruckusZDEventSystemCpuUtilClrWarn=ruckusZDEventSystemCpuUtilClrWarn, ruckusZDEventAPResetTrap=ruckusZDEventAPResetTrap, ruckusZDEventContent=ruckusZDEventContent, ruckusZDEventRogueMacAddr=ruckusZDEventRogueMacAddr, ruckusZDEventObjects=ruckusZDEventObjects, ruckusZDEventRadiusAcctUnavailableTrap=ruckusZDEventRadiusAcctUnavailableTrap, ruckusZDEventClientJoinFailedAPBusy=ruckusZDEventClientJoinFailedAPBusy, ruckusZDEventClientAuthFailBlockTrap=ruckusZDEventClientAuthFailBlockTrap, ruckusZDEventAPSystemColdStartTrap=ruckusZDEventAPSystemColdStartTrap, ruckusZDEventAPCPUvalveClrwarn=ruckusZDEventAPCPUvalveClrwarn, ruckusZDEventSystemColdStartTrap=ruckusZDEventSystemColdStartTrap, ruckusZDEventSystemMemUtilClrwarn=ruckusZDEventSystemMemUtilClrwarn, ruckusZDEventClientAuthFailed=ruckusZDEventClientAuthFailed, ruckusZDEventAPCPUvalve=ruckusZDEventAPCPUvalve, ruckusZDEventClientRoamOut=ruckusZDEventClientRoamOut, ruckusZDEventSmartRedundancyStandbyDisconnected=ruckusZDEventSmartRedundancyStandbyDisconnected, ruckusZDEventAPWirelessInterfaceFaultTrap=ruckusZDEventAPWirelessInterfaceFaultTrap, ruckusZDEventNEID=ruckusZDEventNEID, ruckusZDEventTime=ruckusZDEventTime, ruckusZDEventInterfereAPTrap=ruckusZDEventInterfereAPTrap, ruckusZDEventSmartRedundancyChangetoActive=ruckusZDEventSmartRedundancyChangetoActive, ruckusZDEventMACSpoofTrap=ruckusZDEventMACSpoofTrap, ruckusZDEventSSIDSpoofTrap=ruckusZDEventSSIDSpoofTrap, ruckusZDEventClientJoin=ruckusZDEventClientJoin, ruckusZDEventSmartRedundancyActiveConnected=ruckusZDEventSmartRedundancyActiveConnected, ruckusZDEventSystemBandwidthUtilExceedValve=ruckusZDEventSystemBandwidthUtilExceedValve, ruckusZDEventAttackedTrap=ruckusZDEventAttackedTrap, ruckusZDEventSmartRedundancyActiveDisconnected=ruckusZDEventSmartRedundancyActiveDisconnected, ruckusZDEventHeartbeatTrap=ruckusZDEventHeartbeatTrap, ruckusZDEventAPAvailableStatusTrap=ruckusZDEventAPAvailableStatusTrap, ruckusZDEventChannel=ruckusZDEventChannel, ruckusZDEventAPSystemWarmStartTrap=ruckusZDEventAPSystemWarmStartTrap, ruckusZDEventReason=ruckusZDEventReason, ruckusZDEventSystemDropPacketRateExceedValve=ruckusZDEventSystemDropPacketRateExceedValve, ruckusZDEventInterferenceADHoc=ruckusZDEventInterferenceADHoc, ruckusZDEventAPNumStaExceedValveClrwarn=ruckusZDEventAPNumStaExceedValveClrwarn)
| [
"[email protected]"
]
| |
292803bf884231c3e3166c8b4fb1fdcad553334b | 974f13d47d2ff698e663698eee20c3d8b1030410 | /taobao/taobao/pipelines.py | 3e8419f5f9e4c13656b879bf3e4ebde26fda9bed | []
| no_license | zhengdongge/Spider | 76d538f8914eb2d071e1b540e42869c1470e20c5 | e1ae195af7a800584066fa6c5ac282ec55adad84 | refs/heads/master | 2020-03-28T05:05:44.529874 | 2018-08-30T10:19:59 | 2018-08-30T10:19:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymongo
class TaobaoPipeline(object):
def __init__(self,mongo_url,mongo_db):
self.mongo_url=mongo_url
self.mongo_db=mongo_db
@classmethod
def from_crawler(cls,crawler):
return cls(
mongo_url=crawler.settings.get("MONGO_URL"),
mongo_db=crawler.settings.get("MONGODB_DATABASE")
)
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongo_url)
self.db = self.client[self.mongo_db]
def process_item(self, item, spider):
sheet=self.db[item['goods_class']]
if sheet.find_one({'goods_url':item['goods_url']}):
print('数据已存在')
else:
sheet.insert(dict(item))
return item
def close_spider(self, spider):
self.client.close()
| [
"[email protected]"
]
| |
50dc83a0e57be73f6f4fa4c42dd70fbe8deaa155 | aaf8069252f781bc8b9b541da568f8fcc1232d6a | /camera_handler.py | b653766d68b5c3ee4f42cff97b345bd4a82853e2 | []
| no_license | KoheiEnju/streaming-cam | 4748e1543cbbbc210e95769f6cf9545d5a83957b | 0d5eb7e3c75f231d438bfa098de6cca739b3532b | refs/heads/master | 2023-07-04T09:48:47.223525 | 2021-08-04T13:33:42 | 2021-08-04T13:33:42 | 392,699,278 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | import cv2
from base_camera import BaseCamera
class Camera(BaseCamera):
def __init__(self):
super().__init__()
@staticmethod
def frames():
camera = cv2.VideoCapture(0)
if not camera.isOpened():
raise RuntimeError("Could not start camera.")
while True:
_, frame = camera.read()
yield cv2.imencode(".png", frame)[1].tobytes()
| [
"[email protected]"
]
| |
be7c7c03c5f9a32d601d32946e75bb0fa31897e3 | f73dbdd567664f94a0d954d51fb469b522386b07 | /data.py | 26ac26ce2cf642bb97198f96ff6ae30be1d92d46 | []
| no_license | SoundsSerious/BeemClient | 20a7c4f1bf3b8507be6f6174dd050b19008648d9 | 2048b49e1639d6c54a9c556293af258379e25159 | refs/heads/master | 2022-05-02T15:50:32.925779 | 2018-10-14T20:05:39 | 2018-10-14T20:05:39 | 141,741,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,187 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 5 13:26:26 2017
@author: Cabin
"""
from kivy.lang import Builder
from plyer import gps
from kivy.uix.widget import Widget
from kivy.uix.floatlayout import FloatLayout
from kivy.properties import *
from kivy.event import *
from kivy.clock import Clock, mainthread
from kivy.clock import Clock
from kivy.graphics import Color, Point, Mesh
from kivy.uix.widget import Widget
from kivy.uix.boxlayout import BoxLayout
from kivy.utils import *
from log import RingBuffer
from graph import Graph, MeshLinePlot, Plot, LinePlot
from kivy.graphics.context_instructions import *
import traceback
class RealTimeGraph(Widget):
_max = 100
_amin,_amax = -4, 4
_ytick = 1
def __init__(self,ymin=-1,ymax=1,pmax=1000,ytick = 1,**kwargs):
super(RealTimeGraph,self).__init__(**kwargs)
self._amin , self._amax = ymin, ymax
self._max = pmax
self._ytick= ytick
self._inx = 0
#self.bind(size = self.update_rect)
self._buffer = RingBuffer(self._max)
super(RealTimeGraph,self).__init__(**kwargs)
self.graph = Graph(xlabel='X', ylabel='Y', x_ticks_minor=self._max / 8,
x_ticks_major=self._max / 4, y_ticks_major=self._ytick, y_grid_label=True,
x_grid_label=True, padding=5, x_grid=True, y_grid=True,
xmin=-0, xmax=self._max, ymin=self._amin, ymax=self._amax,
label_options = {'color': [0,0,0,1]})
# with self.graph.canvas.before:
# PushMatrix()
# Rotate(angle=-90, origin=self.graph.pos)
#
# with self.graph.canvas.after:
# PopMatrix()
self.bind(size = self.update_rect, pos = self.update_rect)
self.graph.background_color = [1,1,1,1]
self.graph.border_color = [0,0,0,1]
self.graph.tick_color = [0.75,0.75,0.75,1]
self.plot_x = MeshLinePlot(color=[0.3, 1, 0.3, 1])
self.plot_x.mode ='points'
self.plot_y = MeshLinePlot(color=[1, 0, 0.3, 1])
self.plot_y.mode ='points'
self.plot_z = MeshLinePlot(color=[0.3, 0, 1, 1])
self.plot_z.mode = 'points'
self.graph.add_plot(self.plot_x)
self.graph.add_plot(self.plot_y)
self.graph.add_plot(self.plot_z)
self.add_widget(self.graph)
for i in range(100):
self.addData(i/100.0,-i/100.0+1,0)
def update_rect(self,*args):
self.graph.size = self.size
# print self.size, self.graph.size
# print self.center, self.graph.center
# size = self.graph.size
# self.graph.size = self.size[1],self.size[0]
# self.graph.pos = self.pos[0]-self.graph.size[0],self.pos[1]#+self.graph.size[1]
# print self.size, self.graph.size
# print self.center, self.graph.center
def addData(self,x,y,z):
self._inx += 1
if self._inx > self._max:
self._inx = 0
self._buffer.append( (self._inx, x, y, z) )
i_,x_,y_,z_ = zip(*self._buffer.get())
self.plot_x.points = zip(i_,x_)
self.plot_y.points = zip(i_,y_)
self.plot_z.points = zip(i_,z_)
Builder.load_string('''
<MotionData@Widget>:
BoxLayout:
id:lay
orientation: 'vertical'
Label:
text: root.gps_location
Label:
height: 10
text: root.gps_status
Label:
height: 10
text: root.ble_string
Label:
height: 10
text: root.cal_rssi
Label:
height: 10
text: root.distance_str
BoxLayout:
size_hint_y: None
height: '48dp'
padding: '4dp'
ToggleButton:
text: 'Start' if self.state == 'normal' else 'Stop'
on_state:
root.start(1000, 0) if self.state == 'down' else \
root.stop()
BoxLayout:
size_hint_y: None
height: '48dp'
padding: '4dp'
ToggleButton:
text: 'Calibrate'
on_press:
root.calibrate_ble()
''')
class MotionData(Widget):
#Utilities Logic
gps_status = StringProperty()
gps_active = False
ble = ObjectProperty()
bluetooth_active = False
ble_poll = None
#BLE Signal Strenght & Distance
ble_rssi = NumericProperty()
ble_string = StringProperty('BLE:')
rssi_cal_m1 = NumericProperty(-45.0)
#Motion Data:
lat = NumericProperty()
lon = NumericProperty()
speed = NumericProperty()
altitude = NumericProperty()
bearing = NumericProperty()
accuracy = NumericProperty()
est_distance = NumericProperty()
gps_location = StringProperty('GPS LOC:')
distance_str = StringProperty('DIST:')
cal_rssi = StringProperty('CAL:')
GPS_RATE = 1000 #ms
BLE_RATE = 10 #ms
ENF = 2.0
rssi_alpha = 0.975
app = ObjectProperty()
def __init__(self,app,**kwargs):
super(MotionData,self).__init__(**kwargs)
self.app = app
try:
#pyobjus.dylib_manager.load_framework('LibKivyBLE.framework')
if platform == 'ios':
import pyobjus
pyobjus.dylib_manager.load_dylib('LibKivyBLE.dylib')
kvble = pyobjus.autoclass('KivyBLE')
self.ble = kvble.alloc().init()
self.bluetooth_active = True
except Exception as e:
print e
self.bluetooth_active = False
try:
gps.configure(on_location=self.on_location,
on_status=self.on_status)
self.gps_active = True
except NotImplementedError:
print 'GPS is not implemented for your platform'
self.gps_active = False
self.lay = self.ids['lay']
self.bind(size = self.update_rect, pos = self.update_rect)
def poll_BLE_RSSI(self):
self.ble_poll = Clock.schedule_interval(self.get_rssi, self.BLE_RATE/1000.0)
def stop_BLE_Poll(self):
Clock.unschedule( self.ble_poll )
self.ble_poll = None
def get_rssi(self,dt):
rssi = self.ble.getFrisbeemRSSI()
if rssi:
new_rssi = float(rssi.doubleValue())
#Use Low Pass Filter
self.ble_rssi = self.rssi_alpha*self.ble_rssi + (1.0-self.rssi_alpha)*new_rssi
#Calc Distance
self.est_distance = 10.0**((self.rssi_cal_m1 - self.ble_rssi)/(10.0 * self.ENF))
self.app.distance = self.est_distance
def calibrate_ble(self):
self.rssi_cal_m1 = self.ble_rssi
self.cal_rssi = 'Calibrate Value: {}'.format(self.rssi_cal_m1)
def on_ble_rssi(self,instance ,value):
self.ble_string = "Frisbeem RSSI: {:3.4f}".format( value )
def on_est_distance(self,instance,value):
self.distance_str = "Frisbeem Dist: {:3.4f}".format( value )
def on_rssi_cal_m1(self,instance,value):
self.cal_rssi = "Cal RSSI: {:3.4f}".format( value )
def start(self, minTime, minDistance):
if self.gps_active:
gps.start(minTime, minDistance)
if self.bluetooth_active:
self.ble.startTheScan()
self.poll_BLE_RSSI()
def stop(self):
if self.gps_active:
gps.stop()
if self.bluetooth_active:
self.ble.stopTheScan()
self.stop_BLE_Poll()
@mainthread
def on_location(self, **kwargs):
self.gps_location = '\n'.join([
'{}={}'.format(k, v) for k, v in kwargs.items()])
if 'lat' in kwargs:
self.lat = kwargs['lat']
if 'lon' in kwargs:
self.lon = kwargs['lon']
if 'altitude' in kwargs:
self.altitude = kwargs['altitude']
if 'speed' in kwargs:
self.speed = kwargs['speed']
if 'bearing' in kwargs:
self.bearing = kwargs['bearing']
if 'accuracy' in kwargs:
self.gps_accuracy = kwargs['accuracy']
#Update App
self.app.lat = self.lat
self.app.lon = self.lon
@mainthread
def on_status(self, stype, status):
self.gps_status = 'type={}\n{}'.format(stype, status)
def on_pause(self):
gps.stop()
return True
def on_resume(self):
gps.start(1000, 0)
pass
def update_rect(self,instance,value):
self.lay.size = self.size
if __name__ == '__main__':
from math import sin
from kivy.app import App
class DataApp(App):
time = 0
x = 0
def build(self):
self.graph = RealTimeGraph()
Clock.schedule_interval(self.updatePlot,1)
return self.graph
def updatePlot(self,dt):
self.time += dt
self.graph.addData( sin( (self.time/5.0) / 10.) , \
sin( (self.time/3.0) / 15.), \
sin( (self.time/7.0) / 15.))
Clock.schedule_interval(self.updatePlot,0.1)
DataApp().run()
#if __name__ == '__main__':
# app = GpsTest()
# app.run() | [
"[email protected]"
]
| |
c71be37264e1828fb4eb25125e16964b72926553 | b1a3b2d113693af130595911255ec7c349fbb341 | /netbox_ddns/tables.py | 8cbef21564842cddd75e769a9918f086490449fc | [
"Apache-2.0"
]
| permissive | k4mil666/netbox-ddns | 741a426a4d4351629ede197b741884d0594143fc | c183152a8d66aff07126c5d991c08025432b8bd7 | refs/heads/main | 2023-02-04T00:02:51.044356 | 2020-12-26T18:56:53 | 2020-12-26T18:56:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,498 | py | import django_tables2 as tables
from netbox_ddns.models import ExtraDNSName
from utilities.tables import BaseTable, ToggleColumn
FORWARD_DNS = """
{% if record.forward_action is not None %}
{{ record.get_forward_action_display }}:
{{ record.get_forward_rcode_html_display }}
{% else %}
<span class="text-muted">Not created</span>
{% endif %}
"""
ACTIONS = """
{% if perms.dcim.change_extradnsname %}
<a href="{% url 'plugins:netbox_ddns:extradnsname_edit' ipaddress_pk=record.ip_address.pk pk=record.pk %}"
class="btn btn-xs btn-warning">
<i class="glyphicon glyphicon-pencil" aria-hidden="true"></i>
</a>
{% endif %}
{% if perms.dcim.delete_extradnsname %}
<a href="{% url 'plugins:netbox_ddns:extradnsname_delete' ipaddress_pk=record.ip_address.pk pk=record.pk %}"
class="btn btn-xs btn-danger">
<i class="glyphicon glyphicon-trash" aria-hidden="true"></i>
</a>
{% endif %}
"""
class PrefixTable(BaseTable):
pk = ToggleColumn()
name = tables.Column()
last_update = tables.Column()
forward_dns = tables.TemplateColumn(template_code=FORWARD_DNS)
actions = tables.TemplateColumn(
template_code=ACTIONS,
attrs={'td': {'class': 'text-right text-nowrap noprint'}},
verbose_name=''
)
class Meta(BaseTable.Meta):
model = ExtraDNSName
fields = ('pk', 'name', 'last_update', 'forward_dns', 'actions')
| [
"[email protected]"
]
| |
7cadad47c0c6452d14ef654bb3cebead5bf6b731 | a8dc53e3b078c526cd19563b98decc5acc581962 | /Newbie_experiments/Ex_Files_Programming_Realworld/Exercise Files/Ch01/01_01/start_01_01_breakfast_functions.py | e4e4a6cc50bb4ae1059eff67a4beca1da5cef645 | []
| no_license | cristianbostan/Developer | 46c7dca6949a864e25e050e72c5c5bef87b0be9e | a6ffa79dda54cc22956fdee4eba31b74416d0536 | refs/heads/master | 2021-09-11T01:21:29.605868 | 2018-04-05T15:18:19 | 2018-04-05T15:18:19 | 126,076,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | """ A Functional Breakfast """
def make_omelette():
print('Mixing the ingredients')
print('Pouring the mixture into a frying pan')
print('Cooking the first side')
print('Flipping it!')
print('Cooking the other side')
omelette = 'a tasty omelette'
return omelette
omelette1 = make_omelette()
omelette2 = make_omelette() | [
"[email protected]"
]
| |
398fc4fe8205b361ca80d37fda8461fb36a5fc9d | 48f43a3eee114a5684c2c869b4e030ba27b8540b | /BridgeDataAnalysis/PyVision/dummydata.py | a2f107801350ea4a4b73474f858388bfffe350e0 | []
| no_license | ishjain/Traffic-Inference | 294a3c3cd3b07ee389cfd336e4e0cb95ec251915 | d86dd999bf3ec1541e51f79f38622a8ed79b6764 | refs/heads/master | 2021-01-21T16:38:33.080214 | 2017-05-20T22:57:53 | 2017-05-20T22:57:53 | 91,900,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py |
'''
Ish Kumar Jain: I ran logistic regression on this dummy data set and got
train accuracy = 40%
test accuracy = 40%
validate accuracy = 60%
default learning rate (.13)
'''
import numpy as np
x_tr = np.array([[1,2,3,4,5],[2,1,4,5,4]])
y_tr = np.array([1,-1,1,1,-1])
x_te = np.array([[4,5,6,7,8],[3,4,5,8,9]])
y_te = np.array([-1,-1,-1,1,1])
x_va = np.array([[4,5,6,7,8],[3,4,5,8,9]])
y_va = np.array([-1,-1,-1,1,1])
x_tr=np.transpose(x_tr)
x_te=np.transpose(x_te)
x_va=np.transpose(x_va)
test = [x_te,y_te];
train = [x_tr,y_tr];
validate = [x_va,y_va];
print x_tr.shape, y_tr.shape, train[0].shape
| [
"[email protected]"
]
| |
daf21b1fd3bfbf1438347e86182f777cbd1d1ccc | 0f69325ad7cba43146df33955076d3fd7aee29d9 | /generate_content_df.py | e52d014f32bc3f4c8a5594fd047cea1ca6705993 | []
| no_license | I85YL64/dashboardom | 25db8357774045904d95a1acef272a746f67b9e6 | e279fbc6245edef64e6e023f20724e61a5101a33 | refs/heads/master | 2022-04-10T21:21:11.000157 | 2020-02-04T21:02:41 | 2020-02-04T21:02:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,723 | py | import pandas as pd
dashboard_df = pd.DataFrame({
'dashboard': [
'migration-population',
'gold-reserves',
'google-serp',
'twitterdash',
'trending-twitter',
'us-baby-names',
'advertools',
'boxofficemojo',
'health-spending',
'life-exp',
'massacres',
'median-age-world',
'migration-by-country',
'mothersday-map',
'pop-growth',
'terrorism',
'',
],
'title': [
'Migration and Population Density - WorldBank Data',
'Gold Reserves per Country & Quarter - IMF Data',
'Search Engine Results Pages - Google SERP Dashboard',
'Twitter Dashboard - Text Analysis & Mining - Twitter API',
'Trending on Twitter Now, Anywhere - Twitter Dashboard',
'US Baby Names Data & Trends',
'Generate SEM Keywords for AdWords Campaigns',
'BoxofficeMojo Dashboard Alltime Data',
'Healthcare Spending 2014 - CIA Factbook',
'Life Expectancy 2017 - CIA Factbook',
'World Massacres',
'Median Age 2017 - CIA Factbook',
'Migration Stats 2017 - CIA Factbook',
'Mothers Day Celebrations',
'Population Growth 2017 - CIA Factbook',
'Global Terrorism Database',
'Dashboardom',
],
'h1': [
'Migration Stats by Country and Year',
'Gold Reserves per Country & Quarter',
'Search Engine Results Pages - Google',
'Search and Analyze Twitter, Create a Dataset',
'Trending on Twitter Now, Anywhere',
'US Baby Names',
'SEM Keyword Generator',
'BoxofficeMojo Dashboard Alltime Data',
'Healthcare Spending 2014 - CIA Factbook',
'Life Expectancy 2017 - CIA Factbook',
'World Massacres',
'Median Age 2017 - CIA Factbook',
'Migration Stats 2017 - CIA Factbook',
'Mothers Day Celebrations',
'Population Growth 2017 - CIA Factbook',
'Global Terrorism Database',
'Dashboardom',
],
'h2': [
'Visualize changes in population density and migration',
'Analyze changes in official gold reserves - IMF Data',
'Get SERPs for multiple keywords and parameters, in one DataFrame',
'Search Twitter & generate a filterable downloadable dataset of tweets',
'Trending hashtags and topics on Twitter - all locations',
'Annual births by name in the US 1910 - 2016',
'Generate keywords for your campaigns on a massive scale',
'BoxofficeMojo domestic box-office data all-time',
'Healthcare spending by country in 2014',
'Life expectancy per country in 2017',
'Wikipedia\'s list of events named massacres',
'Median age and age distribution by country in 2017',
'Net migration by country in 2017',
'Mothers day celebrations in the world',
'Population growth per country in 2017',
'World terrorist attacks during 1970 - 2016',
'This website!',
],
'data': [
'WorldBank',
'International Monetary Fund',
'Google Custom Search Engine',
'Twitter API',
'Twitter API',
'Social Security Agency',
'https://github.com/eliasdabbas/advertools',
'BoxofficeMojo',
'CIA World Factbook',
'CIA World Factbook',
'Wikipedia',
'CIA World Factbook',
'CIA World Factbook',
'Wikipedia',
'CIA World Factbook',
'START Consortium',
' ',
],
'data_link': [
'https://data.worldbank.org/',
'https://data.imf.org/',
'https://developers.google.com/custom-search/v1/cse/list',
'https://developer.twitter.com',
'https://developer.twitter.com',
'https://www.ssa.gov/oact/babynames/',
'NA',
'http://www.boxofficemojo.com/alltime/domestic.htm',
'https://www.cia.gov/library/publications/the-world-factbook/fields/2225.html',
'https://www.cia.gov/library/publications/the-world-factbook/fields/2102.html',
'https://en.wikipedia.org/wiki/List_of_events_named_massacres',
'https://www.cia.gov/library/publications/the-world-factbook/fields/2010.html',
'https://www.cia.gov/library/publications/the-world-factbook/fields/2112.html',
'https://en.wikipedia.org/wiki/Mother%27s_Day',
'https://www.cia.gov/library/publications/the-world-factbook/fields/2002.html',
'https://www.kaggle.com/START-UMD/gtd',
' ',
],
'tags': [
'population, migration, world',
'gold, economics, IMF, central banks',
'google, SEO, search engine optimization, keywords',
'twitter, social media, text mining',
'twitter, social media',
'population, statistics, data-viz, names, USA',
'advertising, PPC, marketing, adwords, bingads, SEM, keywords',
'movies, hollywood, box-office',
'healthcare, world, cia-factbook',
'population, age, world, cia-factbook',
'terrorism, massacres, wikipedia, world',
'population, age, world, cia-factbook',
'migration, world, cia-factbook',
'world, mothers, wikipedia',
'population, world, cia-factbook',
'terrorism, world',
'tools, website',
],
'git_repo': [
'https://github.com/eliasdabbas/migration-population',
'https://github.com/eliasdabbas/gold-reserves',
'https://github.com/eliasdabbas/google-serp',
'https://github.com/eliasdabbas/twitterdash',
'https://github.com/eliasdabbas/trending-twitter',
'https://github.com/eliasdabbas/baby_names',
'https://github.com/eliasdabbas/advertools_app',
'https://github.com/eliasdabbas/boxofficemojo',
'https://github.com/eliasdabbas/health_spending',
'https://github.com/eliasdabbas/life_expectancy',
'https://github.com/eliasdabbas/wikipedia_list_of_massacres',
'https://github.com/eliasdabbas/median_age_dashboard',
'https://github.com/eliasdabbas/migration_dashboard',
'https://github.com/eliasdabbas/mothers_day',
'https://github.com/eliasdabbas/population_growth',
'https://github.com/eliasdabbas/terrorism',
'https://github.com/eliasdabbas/dashboardom'
],
'height': [
'1336px',
'1561px',
'1338px',
'1735px',
'850px',
'750px',
'793px',
'2500px',
'800px',
'1500px',
'1600px',
'1000px',
'1300px',
'800px',
'1500px',
'2360px',
'800px',
]
})
dashboard_df.to_csv('data/dashboards_df.csv', index=False) | [
"[email protected]"
]
| |
ae0c8a6c7c2349018e3c51f99c07684073579d6f | 5b51f7d73ee14e40017d27be91dc4fe2212c58ff | /config/wsgi.py | 0c7c70440644e97cb10eac5e47e5f609f4c562e9 | []
| no_license | yatemmma/grock | 527037d5163d4a8ee17d0a924863d9a55c5e9db6 | 9cf2b1d4889b2379a6f6ead57e04302ca245ed26 | refs/heads/master | 2021-05-22T06:01:26.550664 | 2020-04-25T16:41:40 | 2020-04-25T16:41:40 | 32,218,890 | 0 | 0 | null | 2018-05-27T11:50:07 | 2015-03-14T15:40:14 | CSS | UTF-8 | Python | false | false | 572 | py | """
WSGI config for grock project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from django.conf import settings
from wsgi_basic_auth import BasicAuth
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings')
DEBUG = getattr(settings, "DEBUG", None)
if DEBUG:
application = get_wsgi_application()
else:
application = BasicAuth(get_wsgi_application())
| [
"[email protected]"
]
| |
5f3251561f4d13abe9c031bc4b769ee6145896fe | a46f40398d398f001e78fba6f6c3606ed0a87759 | /02-Livros/IntroduçãoAProgramaçãoComPython/CapituloV/Exercicio5.10.py | b028d55a0dbcb0c58aa7e6e99c27e6c01cb536b1 | []
| no_license | jocelinoFG017/IntroducaoAoPython | b2b95b392eba14d92192f95d1a6720306223c780 | e0672bd1ed376795e13b5f44f2fc6e3bcc350a6d | refs/heads/main | 2023-08-21T10:06:32.079082 | 2021-09-19T01:03:41 | 2021-09-19T01:03:41 | 322,012,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | """
# Programa anterior
pontos = 0
questao = 1
while questao <= 3:
resposta = input("Resposta da questao {} : ".format(questao))
if questao == 1 and resposta == "b":
pontos = pontos +1
if questao == 2 and resposta == "a"
pontos = pontos + 1
if questao == 3 and resposta == "d"
pontos = pontos + 1
questao = questao +1
print("O aluno fez {} ponto(s)".format(pontos))
Modifique o programa anterior para que aceite respostas com letras
maiúsculas e minúsculas em todas as questões
"""
pontos = 0
questao = 1
while questao <= 3:
resposta = input("Resposta da questao {} : ".format(questao))
if questao == 1 and (resposta == "b" or resposta == "B"):
pontos = pontos +1
if questao == 2 and (resposta == "a" or resposta == "A"):
pontos = pontos + 1
if questao == 3 and (resposta == "d" or resposta == "D"):
pontos = pontos + 1
questao = questao +1
print("O aluno fez {} ponto(s)".format(pontos)) | [
"[email protected]"
]
| |
721e4acbc32086ba414b5491823839e1b328d854 | 0617f0626b09840805a2f4edbd1e3278c2883702 | /pytorch-工作中遇到的函数/98-transpose().py | 5b2ba5fd5e4f55f23dc1f4b21ec9b46a3266c26c | []
| no_license | yflfly/learn_pytorch | 29b920f59f18f95ee396c82442fe8cfd69fe2314 | daae77d1bf7cd9e03b236737d6ee0dd53b1831e9 | refs/heads/master | 2023-03-07T17:08:43.024056 | 2021-02-25T02:31:50 | 2021-02-25T02:31:50 | 284,202,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,203 | py | # coding:utf-8
import torch
'''
官方文档:
torch.transpose(input, dim0, dim1, out=None) → Tensor
函数返回输入矩阵input的转置。交换维度dim0和dim1
参数:
input (Tensor) – 输入张量,必填
dim0 (int) – 转置的第一维,默认0,可选
dim1 (int) – 转置的第二维,默认1,可选
'''
# 创造二维数据x,dim=0时候2,dim=1时候3
x = torch.randn(2, 3) # 'x.shape → [2,3]'
# 创造三维数据y,dim=0时候2,dim=1时候3,dim=2时候4
y = torch.randn(2, 3, 4) # 'y.shape → [2,3,4]'
print(x.size()) # ([2, 3])
print(y.size()) # ([2, 3, 4])
print('------------------')
# 对于transpose
z1 = x.transpose(0, 1) # 'shape→[3,2] '
print(x.size()) # ([2, 3])
print('z1', z1.size()) # [3, 2])
x.transpose(1, 0) # 'shape→[3,2] '
print(x.size()) # ([2, 3])
y1 = y.transpose(0, 1) # 'shape→[3,2,4]'
print(y.size()) # ([2, 3, 4])
print('y1', y1.size()) # ([3, 2, 4])
'''
输出结果如下所示:
torch.Size([2, 3])
torch.Size([2, 3, 4])
------------------
torch.Size([2, 3])
z1 torch.Size([3, 2])
torch.Size([2, 3])
torch.Size([2, 3, 4])
y1 torch.Size([3, 2, 4])
记住:转置之后的tensor进行赋值给新的变量
'''
| [
"[email protected]"
]
| |
447dd2afc75fa1a182e314360b7a37bca96e2bc3 | 1cae26aeed8f3f8ea3864521d441958cb8fa9e3f | /Email/models.py | 13d413504b2da002fa6d929194b60eaaacdcb0c2 | []
| no_license | gledong12/Email-subscribe-system | 5d13dcb08ea21a7f96f4900d042f2ab25985d4ac | ac317f16778f764ad538d1d230a7e4a7bbaea780 | refs/heads/main | 2023-05-28T21:50:58.798610 | 2021-06-09T01:21:34 | 2021-06-09T01:21:34 | 373,383,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,177 | py | from django.db import models
from user.models import models
class Category(models.Model):
name = models.CharField(max_length=50)
class Meta:
db_table = 'categories'
class Email(models.Model):
subject = models.CharField(max_length=100)
content = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
deleted_at = models.DateTimeField(null=True)
sender = models.ForeignKey('user.User', on_delete=models.CASCADE, related_name='sender')
receiver = models.ManyToManyField('user.User', through='UserEmail', related_name='receiver')
class Meta:
db_table = 'emails'
class UserCategory(models.Model):
user = models.ForeignKey('user.User', on_delete=models.CASCADE)
category = models.ForeignKey('Category', on_delete=models.CASCADE)
class Meta:
db_table = 'subscribe'
class UserEmail(models.Model):
user = models.ForeignKey('user.User', on_delete=models.CASCADE)
email = models.ForeignKey('Email', on_delete=models.CASCADE)
class Meta:
db_table = 'receive_user' | [
"[email protected]"
]
| |
8957de375ffde3cf6e3756ac194445e31145a999 | b490f3d5219812127b0af77c447e4bc80c616f73 | /aleph/index/entities.py | 9de0b0f81c226d98c88e71490a9b623bf62a27ec | [
"MIT"
]
| permissive | batman59/aleph | 30a72bec9143c437baf0ddebece4416370066ef0 | d02068ece529ce86219387f7d078b7a97e2ef643 | refs/heads/main | 2023-04-07T15:58:08.958163 | 2021-04-12T22:45:12 | 2021-04-12T22:45:12 | 356,256,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,589 | py | import logging
import fingerprints
import warnings
from pprint import pprint, pformat # noqa
from banal import ensure_list, first
from followthemoney import model
from followthemoney.types import registry
from elasticsearch.helpers import scan
from aleph.core import es, cache
from aleph.model import Entity
from aleph.index.indexes import entities_write_index, entities_read_index
from aleph.index.util import unpack_result, delete_safe
from aleph.index.util import authz_query, bulk_actions
from aleph.index.util import MAX_PAGE, NUMERIC_TYPES
from aleph.index.util import MAX_REQUEST_TIMEOUT, MAX_TIMEOUT
log = logging.getLogger(__name__)
PROXY_INCLUDES = [
"schema",
"properties",
"collection_id",
"profile_id",
"role_id",
"mutable",
"created_at",
"updated_at",
]
ENTITY_SOURCE = {"includes": PROXY_INCLUDES}
def _source_spec(includes, excludes):
includes = ensure_list(includes)
excludes = ensure_list(excludes)
return {"includes": includes, "excludes": excludes}
def _entities_query(filters, authz, collection_id, schemata):
filters = filters or []
if authz is not None:
filters.append(authz_query(authz))
if collection_id is not None:
filters.append({"term": {"collection_id": collection_id}})
# if ensure_list(schemata):
# filters.append({"terms": {"schemata": ensure_list(schemata)}})
return {"bool": {"filter": filters}}
def get_field_type(field):
field = field.split(".")[-1]
if field in registry.groups:
return registry.groups[field]
for prop in model.properties:
if prop.name == field:
return prop.type
return registry.string
def iter_entities(
authz=None,
collection_id=None,
schemata=None,
includes=PROXY_INCLUDES,
excludes=None,
filters=None,
sort=None,
randomize=False,
random_seed=None,
):
"""Scan all entities matching the given criteria."""
query = {
"_source": _source_spec(includes, excludes),
}
q = _entities_query(filters, authz, collection_id, schemata)
preserve_order = False
if randomize:
if sort is not None:
warnings.warn(
"iter_entities: randomize and sort are mutually exclusive. ignoring sort order.",
RuntimeWarning,
)
seed_q = {"field": "_seq_no"}
if random_seed:
seed_q["seed"] = random_seed
query["query"] = {"function_score": {"query": q, "random_score": seed_q}}
else:
query["query"] = q
if sort is not None:
query["sort"] = ensure_list(sort)
preserve_order = True
index = entities_read_index(schema=schemata)
for res in scan(
es,
index=index,
query=query,
timeout=MAX_TIMEOUT,
request_timeout=MAX_REQUEST_TIMEOUT,
preserve_order=preserve_order,
):
entity = unpack_result(res)
if entity is not None:
yield entity
def iter_proxies(**kw):
for data in iter_entities(**kw):
schema = model.get(data.get("schema"))
if schema is None:
continue
yield model.get_proxy(data)
def iter_adjacent(collection_id, entity_id):
"""Used for recursively deleting entities and their linked associations."""
yield from iter_entities(
includes=["collection_id"],
collection_id=collection_id,
filters=[{"term": {"entities": entity_id}}],
)
def entities_by_ids(
ids, schemata=None, cached=False, includes=PROXY_INCLUDES, excludes=None
):
"""Iterate over unpacked entities based on a search for the given
entity IDs."""
ids = ensure_list(ids)
if not len(ids):
return
cached = cached and excludes is None and includes == PROXY_INCLUDES
entities = {}
if cached:
keys = [cache.object_key(Entity, i) for i in ids]
for _, entity in cache.get_many_complex(keys):
if entity is not None:
entities[entity.get("id")] = entity
missing = [i for i in ids if entities.get(id) is None]
index = entities_read_index(schema=schemata)
query = {
"query": {"ids": {"values": missing}},
"_source": _source_spec(includes, excludes),
"size": MAX_PAGE,
}
result = es.search(index=index, body=query)
for doc in result.get("hits", {}).get("hits", []):
entity = unpack_result(doc)
if entity is not None:
entity_id = entity.get("id")
entities[entity_id] = entity
if cached:
key = cache.object_key(Entity, entity_id)
cache.set_complex(key, entity, expires=60 * 60 * 2)
for i in ids:
entity = entities.get(i)
if entity is not None:
yield entity
def get_entity(entity_id, **kwargs):
"""Fetch an entity from the index."""
for entity in entities_by_ids(entity_id, cached=True, **kwargs):
return entity
def index_entity(entity, sync=False):
"""Index an entity."""
return index_proxy(entity.collection, entity.to_proxy(), sync=sync)
def index_proxy(collection, proxy, sync=False):
delete_entity(proxy.id, exclude=proxy.schema, sync=False)
return index_bulk(collection, [proxy], sync=sync)
def index_bulk(collection, entities, sync=False):
"""Index a set of entities."""
entities = (format_proxy(p, collection) for p in entities)
entities = (e for e in entities if e is not None)
bulk_actions(entities, sync=sync)
def _numeric_values(type_, values):
values = [type_.to_number(v) for v in ensure_list(values)]
return [v for v in values if v is not None]
def format_proxy(proxy, collection):
"""Apply final denormalisations to the index."""
# Abstract entities can appear when profile fragments for a missing entity
# are present.
if proxy.schema.abstract:
return None
data = proxy.to_full_dict()
data["schemata"] = list(proxy.schema.names)
data["caption"] = proxy.caption
names = data.get("names", [])
fps = set([fingerprints.generate(name) for name in names])
fps.update(names)
data["fingerprints"] = [fp for fp in fps if fp is not None]
# Slight hack: a magic property in followthemoney that gets taken out
# of the properties and added straight to the index text.
properties = data.get("properties")
data["text"] = properties.pop("indexText", [])
# integer casting
numeric = {}
for prop in proxy.iterprops():
if prop.type in NUMERIC_TYPES:
values = proxy.get(prop)
numeric[prop.name] = _numeric_values(prop.type, values)
# also cast group field for dates
numeric["dates"] = _numeric_values(registry.date, data.get("dates"))
data["numeric"] = numeric
# Context data - from aleph system, not followthemoney.
data["collection_id"] = collection.id
data["role_id"] = first(data.get("role_id"))
data["profile_id"] = first(data.get("profile_id"))
data["mutable"] = max(ensure_list(data.get("mutable")), default=False)
data["origin"] = ensure_list(data.get("origin"))
# Logical simplifications of dates:
created_at = ensure_list(data.get("created_at"))
if len(created_at) > 0:
data["created_at"] = min(created_at)
updated_at = ensure_list(data.get("updated_at")) or created_at
if len(updated_at) > 0:
data["updated_at"] = max(updated_at)
# log.info("%s", pformat(data))
entity_id = data.pop("id")
return {
"_id": entity_id,
"_index": entities_write_index(proxy.schema),
"_source": data,
}
def delete_entity(entity_id, exclude=None, sync=False):
"""Delete an entity from the index."""
if exclude is not None:
exclude = entities_write_index(exclude)
for entity in entities_by_ids(entity_id, excludes="*"):
index = entity.get("_index")
if index == exclude:
continue
delete_safe(index, entity_id)
def checksums_count(checksums):
"""Query how many documents mention a checksum."""
schemata = model.get_type_schemata(registry.checksum)
index = entities_read_index(schemata)
body = []
for checksum in checksums:
body.append({"index": index})
query = {"term": {registry.checksum.group: checksum}}
body.append({"size": 0, "query": query})
results = es.msearch(body=body)
for checksum, result in zip(checksums, results.get("responses", [])):
total = result.get("hits", {}).get("total", {}).get("value", 0)
yield checksum, total
| [
"[email protected]"
]
| |
836bb34a019039ab8c3df90c6fe0873bb8d46d15 | c94ab5242d3ca5c95a7227219ab8e42145b37cce | /db/__init__.py | 6f868c9c9aa4ef7ef381888ff376ae57fa3aede7 | [
"MIT"
]
| permissive | kekecoco/py_text_to_audio | 7d7e84662caedd14248110c37ac18528c701a273 | 399a685318e19992cd932efe804d7508af9e4a25 | refs/heads/develop_dev | 2021-04-06T00:22:27.337565 | 2018-03-20T08:02:20 | 2018-03-20T08:02:20 | 124,878,403 | 1 | 0 | MIT | 2018-03-19T09:33:31 | 2018-03-12T11:24:28 | Python | UTF-8 | Python | false | false | 90 | py | # -*- coding: utf-8 -*-
# @author Wu Lihua
# @email [email protected]
from db import * | [
"[email protected]"
]
| |
0f4027eae87108f86b40e63e93b1c8d5a28ceee0 | eac7ae395c4832ac394087054ab014d1d6a9f6a6 | /python_experiments/run_experiments/ppscan/run_ppSCAN_gen_gt.py | 8e301fa2e3767c47b2d15889cde0cfe23caaa991 | [
"MIT"
]
| permissive | mexuaz/AccTrussDecomposition | 21be22007e1c50ca4b7df6fbbad1dfbf4c2fffae | 15a9e8fd2f123f5acace5f3b40b94f1a74eb17d4 | refs/heads/master | 2022-12-14T03:41:05.133564 | 2020-09-03T00:35:33 | 2020-09-03T00:35:33 | 291,565,779 | 0 | 0 | MIT | 2020-08-30T22:57:55 | 2020-08-30T22:57:55 | null | UTF-8 | Python | false | false | 2,983 | py | import subprocess
import socket
import sys
import time
from exec_utilities import time_out_util
from config import *
my_splitter = '-'.join(['*' for _ in range(20)])
def kill_all():
exec_name_lst = []
for exec_name in exec_name_lst:
err_code, output = subprocess.getstatusoutput("ps -ef | grep " + exec_name + " | awk '{print $2}'")
for pid in output.strip().split('\n'):
os.system('kill -9 ' + pid)
time.sleep(5)
def write_split(statistics_file_path):
with open(statistics_file_path, 'a+') as ifs:
ifs.write(my_splitter + my_splitter + '\n')
ifs.write(my_splitter + my_splitter + '\n')
def signal_handler(signal, frame):
# print 'You pressed Ctrl+C!'
kill_all()
sys.exit(0)
def run_exp(env_tag=knl_tag):
with open('config.json') as ifs:
my_config_dict = json.load(ifs)[env_tag]
# print my_config_dict
data_set_path = my_config_dict[data_set_path_tag]
data_set_lst = filter(lambda name: 'rmat' in name, my_config_dict[data_set_lst_tag])
# print data_set_path, data_set_lst
# eps_lst = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
# mu_lst = [2, 5, 10, 15]
eps_lst = [0.2]
mu_lst = [5]
root_path = my_config_dict[exp_res_root_path_tag] + '/log/'
def one_round(reorder_method='.'):
statistics_file_path = root_path + 'han-' + reorder_method + '.log'
for data_set_name in data_set_lst:
for eps in eps_lst:
for mu in mu_lst:
algorithm_path = my_config_dict[ppSCAN_exec_path_tag]
params_lst = map(str,
[algorithm_path, os.sep.join([data_set_path, data_set_name, reorder_method]),
eps, mu, 'output', '> /dev/null 2>&1'])
cmd = ' '.join(params_lst)
# print cmd
time_out = 7000
tle_flag, info, correct_info = time_out_util.run_with_timeout(cmd, timeout_sec=time_out)
with open(statistics_file_path, 'a+') as ifs:
ifs.write(info)
ifs.write(correct_info)
ifs.write('\nis_time_out:' + str(tle_flag))
ifs.write(my_splitter + time.ctime() + my_splitter)
ifs.write('\n\n\n\n')
# for reorder_method in ['cache', 'gro']:
# for reorder_method in ['hybrid', 'slashburn', 'bfsr', 'dfs']:
# for reorder_method in ['cache', 'rcm-cache']:
# for reorder_method in ['slashburn']:
for reorder_method in ['.']:
one_round(reorder_method)
if __name__ == '__main__':
hostname = socket.gethostname()
if hostname.startswith('lccpu12'):
run_exp(env_tag=lccpu12_tag)
elif hostname.startswith('gpu23'):
run_exp(env_tag=gpu23_tag)
elif hostname.startswith('gpu'):
run_exp(env_tag=gpu_other_tag)
else:
run_exp(knl_tag)
| [
"[email protected]"
]
| |
d29f550b849772338f52473d7c5d0a6fe2bc584b | b9134f1aa6e98afc52d9b08eaa1dc48921fff262 | /django_backend/project/urls.py | 9aea40e1dd5b79c63933bc3856dfc5fc7363fcc3 | []
| no_license | parvez301/document-management-system | e208e6786105001a2c12f6c294bb6205b789daac | e9aae1188c864eaf624fb1b7266f8805175257ff | refs/heads/master | 2022-12-10T14:42:34.531767 | 2018-04-17T06:00:51 | 2018-04-17T06:00:51 | 128,873,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
from rest_framework import routers
from user_profile.views import UserViewSet
router = routers.DefaultRouter()
router.register(r'user', UserViewSet,)
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
# This is used for user reset password
url(r'^', include('django.contrib.auth.urls')),
url(r'^rest-auth/', include('rest_auth.urls')),
url(r'^rest-auth/registration/', include('rest_auth.registration.urls')),
url(r'^account/', include('allauth.urls')),
url(r'^api/', include(router.urls)),
url(r'', include('doqman.urls', namespace='doqman')),
]
| [
"[email protected]"
]
| |
9b47a510e8d1feddca50084cb85b99f69648aaaf | a5a88a49c5537fb979fce54699402e76f7e6b25f | /tm_manager_backend/contrib/sites/migrations/0003_set_site_domain_and_name.py | 72b33de57583dcd1501a052e012c2b27dd81dafb | [
"MIT"
]
| permissive | nachos5/tm_manager_backend | 053bbad2fb9bedd8b4dbb844e78e1185a9b8fc68 | 27b0479b239a6ce3073b4de2f76b402ec986975c | refs/heads/master | 2020-08-28T13:41:34.184528 | 2020-04-16T07:27:28 | 2020-04-16T07:27:28 | 217,715,259 | 0 | 0 | MIT | 2019-11-28T00:51:59 | 2019-10-26T13:47:53 | Python | UTF-8 | Python | false | false | 1,016 | py | """
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "example.com",
"name": "tm-manager-backend",
},
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID, defaults={"domain": "example.com", "name": "example.com"}
)
class Migration(migrations.Migration):
dependencies = [("sites", "0002_alter_domain_unique")]
operations = [migrations.RunPython(update_site_forward, update_site_backward)]
| [
"[email protected]"
]
| |
1862703f8c01562c66bd7a1c6efecc2f6807570c | b2c8ccbf7466f7a920a2b19c64dcfb4f2b72f836 | /api/app/models.py | 9b98473ee096164d1e3ba48ded2991b1f17b7a67 | [
"MIT"
]
| permissive | duranlopes/k8s-iac | 534f056c9a076e0c7f7a2291a99c56ffbd327d39 | 5bdc872b19a9117d48c0f1777a4c5d4246ea7b3b | refs/heads/main | 2023-05-12T00:14:40.956062 | 2021-04-14T02:52:26 | 2021-04-14T02:52:26 | 338,554,678 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from .database import Base
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True, index=True)
email = Column(String(100), unique=True, index=True)
hashed_password = Column(String(100))
is_active = Column(Boolean, default=True)
items = relationship("Item", back_populates="owner")
class Item(Base):
__tablename__ = "items"
id = Column(Integer, primary_key=True, index=True)
title = Column(String(100), index=True)
description = Column(String(100), index=True)
owner_id = Column(Integer, ForeignKey("users.id"))
owner = relationship("User", back_populates="items") | [
"[email protected]"
]
| |
fdb72ad1d70d9151373c79a0df8aafb4db9eb3eb | 75af5903e789ba0c50336e802d4828d8eb69c269 | /src/variational_strategies.py | d4d9ae73fe9fab669314225c225e60849fe53a46 | [
"MIT"
]
| permissive | jackgoffinet/poe-vae | a7a990c893bb29b532c565bf1af3f654754b6f85 | 18ca2cd4cffe3259e19525c2dc65c84d7219e9d6 | refs/heads/master | 2023-09-06T09:45:30.037720 | 2021-11-15T16:39:27 | 2021-11-15T16:39:27 | 329,687,476 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 8,940 | py | """
Define strategies for combining evidence into variational distributions.
These strategies all subclass `torch.nn.Module`. Their job is to convert
parameter values straight out of the encoder into a variational posterior,
combining evidence across the different modalities in some way.
TO DO
-----
* The GaussianPoeStrategy assumes a unit normal prior. Generalize this.
"""
__date__ = "January - May 2021"
import torch
import torch.nn.functional as F
class AbstractVariationalStrategy(torch.nn.Module):
"""Abstract variational strategy class"""
def __init__(self):
super(AbstractVariationalStrategy, self).__init__()
def forward(self, *modality_params, nan_mask=None):
"""
Combine the information from each modality into prior parameters.
Parameters
----------
modality_params : ...
nan_mask : torch.Tensor
Indicates where data is missing.
Shape: [b,m]
Returns
-------
prior_parameters : ...
"""
raise NotImplementedError
class GaussianPoeStrategy(AbstractVariationalStrategy):
EPS = 1e-5
def __init__(self, **kwargs):
"""
Gaussian product of experts strategy
Note
----
* Assumes a standard normal prior!
"""
super(GaussianPoeStrategy, self).__init__()
def forward(self, means, log_precisions, nan_mask=None, collapse=True):
"""
Given means and log precisions, output the product mean and precision.
Parameters
----------
means : torch.Tensor or list of torch.Tensor
Shape:
[batch,modality,z_dim] if vectorized
[modality][batch,z_dim] otherwise
log_precisions : torch.Tensor or list of torch.Tensor
Shape:
[batch,modality,z_dim] if vectorized
[modality][batch,z_dim] otherwise
nan_mask : torch.Tensor
Indicates where data is missing.
Shape: [batch,modality]
collapse : bool, optional
Whether to collapse across modalities.
Returns
-------
if `collapse`:
prec_mean : torch.Tensor
Shape: [batch, z_dim]
precision : torch.Tensor
Shape: [batch, z_dim]
else:
prec_means : torch.Tensor
Shape: [b,m,z]
precisions : torch.Tensor
Does not include the prior expert!
Shape: [b,m,z]
"""
if isinstance(means, (tuple,list)): # not vectorized
means = torch.stack(means, dim=1) # [b,m,z]
log_precisions = torch.stack(log_precisions, dim=1) # [b,m,z]
precisions = torch.exp(log_precisions) # [b,m,z]
if nan_mask is not None:
temp_mask = nan_mask
assert len(precisions.shape) == 3, f"len({precisions.shape}) != 3"
temp_mask = (~temp_mask).float().unsqueeze(-1)
temp_mask = temp_mask.expand(-1,-1,precisions.shape[2])
precisions = precisions * temp_mask
prec_means = means * precisions
if collapse:
return self.collapse(prec_means, precisions)
return prec_means, precisions
def collapse(self, prec_means, precisions, include_prior=True):
"""
Collapse across modalities, combining evidence.
Parameters
----------
prec_means : torch.Tensor
Shape: [b,m,z]
precisions : torch.Tensor
Shape: [b,m,z]
include_prior : bool, optional
Whether to include the effect of the prior expert.
Returns
-------
prec_mean : torch.Tensor
Shape: [b,z]
precision : torch.Tensor
Shape: [b,z]
"""
precision = torch.sum(precisions, dim=1) # [b,m,z] -> [b,z]
if include_prior:
precision = precision + 1.0
prec_mean = torch.sum(prec_means, dim=1) # [b,m,z] -> [b,z]
return prec_mean, precision
class GaussianMoeStrategy(torch.nn.Module):
def __init__(self, **kwargs):
"""
Gaussian mixture of experts strategy
Note
----
* Assumes a standard normal prior!
"""
super(GaussianMoeStrategy, self).__init__()
def forward(self, means, log_precisions, nan_mask=None):
"""
Given means and log precisions, output mixture parameters.
Parameters
----------
means : torch.Tenosr or tuple of torch.Tensor
Shape:
[b,m,z] if vectorized
[m][b,z] otherwise
log_precisions : torch.Tensor ot tuple of torch.Tensor
Shape:
[b,m,z] if vectorized
[m][b,z] otherwise
nan_mask : torch.Tensor
Indicates where data is missing.
Shape: [batch,modality]
Returns
-------
mean : torch.Tensor
Shape: [batch, m, z_dim]
precision : torch.Tensor
Shape: [batch, m, z_dim]
"""
tuple_flag = isinstance(means, (tuple,list)) # not vectorized
if tuple_flag:
means = torch.stack(means, dim=1) # [b,m,z]
log_precisions = torch.stack(log_precisions, dim=1)
precisions = torch.exp(log_precisions) # [b,m,z]
# Where modalities are missing, sample from the prior.
if nan_mask is not None:
temp_mask = nan_mask
assert len(precisions.shape) == 3
temp_mask = (~temp_mask).float().unsqueeze(-1)
temp_mask = temp_mask.expand(-1,-1,precisions.shape[2])
precisions = precisions * temp_mask
means = means * temp_mask
precisions = precisions + 1.0 # Add the prior expert.
return means, precisions
class VmfPoeStrategy(AbstractVariationalStrategy):
EPS = 1e-5
def __init__(self, n_vmfs=5, vmf_dim=4, **kwargs):
"""
von Mises Fisher product of experts strategy
Parameters
----------
n_vmfs : int, optional
vmf_dim : int, optional
"""
super(VmfPoeStrategy, self).__init__()
self.n_vmfs = n_vmfs
self.vmf_dim = vmf_dim
def forward(self, kappa_mus, nan_mask=None):
"""
Multiply the vMF's given by the kappa_mus.
Parameters
----------
kappa_mus : torch.Tensor or list of torch.Tensor
Shape:
[b,m,n_vmfs*(vmf_dim+1)] if vectorized
[m][b,n_vmfs*(vmf_dim+1)] otherwise
nan_mask : torch.Tensor
Indicates where data is missing.
Shape: [b,m]
Returns
-------
kappa_mu : tuple of torch.Tensor
Shape: [1][b,n_vmfs,vmf_dim+1]
"""
tuple_flag = isinstance(kappa_mus, tuple) # not vectorized
if tuple_flag:
kappa_mus = torch.stack(kappa_mus, dim=1) # [b,m,n_vmf*(vmf_dim+1)]
assert len(kappa_mus.shape) == 3, f"len({kappa_mus.shape}) != 3"
assert kappa_mus.shape[2] == self.n_vmfs * (self.vmf_dim+1), \
f"error: {kappa_mus.shape}, {self.n_vmfs}, {self.vmf_dim}"
new_shape = kappa_mus.shape[:2]+(self.n_vmfs, self.vmf_dim+1)
kappa_mus = kappa_mus.view(new_shape) # [b,m,n_vmfs,vmf_dim+1]
if nan_mask is not None:
temp_mask = nan_mask # [b,m]
temp_mask = (~temp_mask).float().unsqueeze(-1).unsqueeze(-1)
temp_mask = temp_mask.expand(
-1,
-1,
kappa_mus.shape[2],
kappa_mus.shape[3],
) # [b,m,n_vmfs,vmf_dim+1]
kappa_mus = kappa_mus * temp_mask
# Combine all the experts.
kappa_mu = torch.sum(kappa_mus, dim=1) # [b,n_vmfs,vmf_dim+1]
return (kappa_mu,)
class LocScaleEbmStrategy(AbstractVariationalStrategy):
EPS = 1e-5
def __init__(self, **kwargs):
"""
Location/Scale EBM strategy: multiply the Gaussian proposals
"""
super(LocScaleEbmStrategy, self).__init__()
def forward(self, thetas, means, log_precisions, nan_mask=None, \
collapse=True):
"""
Mostly just pass the parameters and apply NaN mask.
Parameters
----------
thetas: torch.Tensor or tuple of torch.Tensor
Describes deviations from the Gaussian proposal
Shape:
[b,m,theta_dim] if vectorized
[m][b,theta_dim] otherwise
means : torch.Tensor or tuple of torch.Tensor
Means of the Gaussian proposals
Shape:
[batch,m,z_dim] if vectorized
[m][batch,z_dim] otherwise
log_precisions : torch.Tensor or tuple of torch.Tensor
log precisions of the Gaussian proposals
Shape:
[batch,m,z_dim] if vectorized
[m][batch,z_dim] otherwise
nan_mask : torch.Tensor
Indicates where data is missing.
Shape: [b,m]
collapse : bool, optional
Doesn't do anything. Here because AR-ELBO expects it.
Returns
-------
thetas : torch.Tensor
Shape: [b,m,theta_dim]
means : torch.Tensor
Shape: [b,m,z]
prec_means : torch.Tensor
Shape: [b,m,z]
precisions : torch.Tensor
Shape: [b,m,z]
nan_mask : torch.Tensor
Shape : [b,m]
"""
if isinstance(means, (tuple,list)):
thetas = torch.stack(thetas, dim=1) # [b,m,theta]
means = torch.stack(means, dim=1) # [b,m,z]
log_precisions = torch.stack(log_precisions, dim=1) # [b,m,z]
thetas = torch.sigmoid(thetas) # restrict range of thetas
precisions = log_precisions.exp() # [b,m,z]
precisions = torch.clamp(precisions, max=50.0)
if nan_mask is not None:
assert len(precisions.shape) == 3, f"len({precisions.shape}) != 3"
temp_mask = (~nan_mask).float().unsqueeze(-1)
temp_mask = temp_mask.expand(-1,-1,precisions.shape[2])
precisions = precisions * temp_mask
prec_means = means * precisions
if torch.isnan(precisions).sum() > 0:
print("LocScaleEbmStrategy NaN")
print("prec_means", torch.isnan(prec_means).sum())
print("thetas", torch.isnan(thetas).sum())
print("means", torch.isnan(means).sum())
print("precisions", torch.isnan(precisions).sum())
print("log_precisions", torch.isnan(log_precisions).sum())
print()
return thetas, means, prec_means, precisions, nan_mask
if __name__ == '__main__':
pass
###
| [
"[email protected]"
]
| |
29ca1331b2e363267325dd1a0b8e07a808ab8d0b | aea28642b2f680c1597bf73dd0025e5be2837919 | /fizzbuzz.py | 93786563960931a8a454b6f2663d87cbb36a327d | []
| no_license | krushigada/fizzbuzz | cd0134be0caabfb8536f5d71287743e893263f4c | bcbc0359b0e5556db42158f0cf616d4ae38e4b1a | refs/heads/master | 2020-06-15T13:52:44.395878 | 2016-12-01T12:26:08 | 2016-12-01T12:26:08 | 75,288,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,060 | py | play="start"
while(play=="start"):
print("Multiples of 3 represented by Fizz\nMultiples of 5 represented by Buzz\n")
n=input("Enter number limit:")
i=1
while(i<=n):
if(i%2!=0):
if(i%3!=0 and i%5!=0):
print "Computer: ",i
elif(i%3==0 and i%5==0):
print("Computer: FizzBuzz")
elif(i%3==0):
print("Computer: Fizz")
elif(i%5==0):
print("Computer: Buzz")
else:
j=raw_input("Player: ")
if(i%3==0 and i%5==0 and j!="FizzBuzz"):
print("Computer Wins!")
break
elif(i%5==0 and j!="Buzz"):
print("Computer Wins!")
break
elif(i%3==0 and j!="Fizz"):
print("Computer Wins!")
break
elif(i!=int(j)):
print("Computer Wins!")
break
i=i+1
print("Enter 'start' to start again!\n")
play=raw_input()
| [
"[email protected]"
]
| |
5328383760e66d74cccabfe70245b53bafd48034 | 5ae88a0a197a52086edffe9c031bc424634b55d4 | /AizuOnlineJudge/ITP1/4/C.py | 4c5a62f99eaf85c97dcbe26bf43acbd39a709a6d | []
| no_license | muck0120/contest | 1b1226f62e0fd4cf3bd483e40ad6559a6f5401cb | de3877822def9e0fe91d21fef0fecbf025bb1583 | refs/heads/master | 2022-07-17T17:31:50.934829 | 2020-05-18T09:55:30 | 2020-05-18T09:55:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | a, op, b = 0, '', 0
while op != '?':
a, op, b = input().split()
if op == '+':
print(int(a) + int(b))
if op == '-':
print(int(a) - int(b))
if op == '*':
print(int(a) * int(b))
if op == '/':
print(int(a) // int(b)) | [
"[email protected]"
]
| |
57fbff1ecd278da221595af255f9a54d9e4a0ef2 | 452c698c426c45b306b4959f89799d9560601449 | /polynomial_regression.py | 7d7d63f6a732f4852bf1f998dfc362de2a495c0e | []
| no_license | khushipathak/ML_templates | a0f54f357b108903c3e422ca5b0c56a2aba913a1 | e9928c0c81cffffa925070a062c173e0c1868d13 | refs/heads/master | 2022-04-16T07:55:30.651556 | 2020-04-16T13:05:09 | 2020-04-16T13:05:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,279 | py | # Polynomial Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# Splitting the dataset into the Training set and Test set
# WE WONT DO THIS BECAUSE OUR DATASET IS VERY SMALL
"""from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)"""
# Fitting Linear Regression to the dataset
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree = 4)
X_poly = poly_reg.fit_transform(X)
#poly_reg.fit(X_poly, y)
lin_reg_poly = LinearRegression()
lin_reg_poly.fit(X_poly, y)
# Visualising the Linear Regression results
plt.scatter(X, y, color = 'red')
plt.plot(X, lin_reg.predict(X), color = 'blue')
plt.title('Truth or Bluff (Linear Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Visualising the Polynomial Regression results
plt.scatter(X, y, color = 'red')
plt.plot(X, lin_reg_poly.predict(poly_reg.fit_transform(X)), color = 'blue')
plt.title('Truth or Bluff (Polynomial Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Visualising the Polynomial Regression results (for higher resolution and smoother curve)
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, lin_reg_poly.predict(poly_reg.fit_transform(X_grid)), color = 'blue')
plt.title('Truth or Bluff (Polynomial Regression) smoother')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Predicting a new result with Linear Regression
lin_reg.predict([[6.5]])
# Predicting a new result with Polynomial Regression
lin_reg_poly.predict(poly_reg.fit_transform([[6.5]]))
#
#linReg.predict([[6.5]])
#linReg2.predict(polyReg.fit_transform([[6.5]])) | [
"[email protected]"
]
| |
d2fec5ef863a58ae882b3eaa5bf73ef6f3839f05 | bc76caf747d4de910de768a9809c85e57ee45ebc | /nba_automation/utilities/CustomListener.py | 283a50dc6fc411591c4e4fa8aedb5487e68bbad8 | [
"MIT"
]
| permissive | sohailchd/RobotAndLocust | 48b7491f227ee97bd618b0a6df292a626c58dc67 | 9ae953e293e12129db761b553691da935f6db89f | refs/heads/master | 2020-04-17T01:30:44.986583 | 2019-01-17T17:25:01 | 2019-01-17T17:25:01 | 166,095,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,757 | py | from utilities.BrowserManager import BrowserManager
from robot.libraries.BuiltIn import BuiltIn
from robot.libraries.Screenshot import Screenshot
import conf
from utilities.CustomUtils import CustomUtils
from robot.api import logger
class CustomListener(object):
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
ROBOT_LISTENER_API_VERSION = 2
def __init__(self):
self.ROBOT_LIBRARY_LISTENER = self
print("init_listener called...")
BrowserManager.initialize_browser()
def start_suite(self,data, suite):
print("start_suite listener called...")
if not BrowserManager.get_browser():
BrowserManager.initialize_browser()
def end_suite(self,data, suite):
print("end_suite listener called...")
# def _end_suite(self, name, attrs):
# print('Suite %s (%s) ending.' % (name, attrs['id']))
def log_message(self,message):
'''
'''
if message['level'] == 'FAIL':
fname = "./failed_screenshots/" + self.test_name + ".png"
logger.info(f'<a href="{fname}"> <i> SCREENSHOT </i></a>', html=True)
def start_test(self,name,attributes):
'''
Using hooks to save the test name
to be used other methods.
'''
self.test_name = name
pass
def end_test(self, name, attributes):
""" The `end test` hook """
print(f"test ended with result : {attributes['status']} ")
if attributes['status'] == "FAIL":
CustomUtils.take_screenshot(f"{name}.png")
def close(self):
'''
'''
print("close called.........")
BrowserManager.teardown_suite() | [
"[email protected]"
]
| |
d9cf2daff3cef138e42f78dcdd0a82fc58ac3ee8 | 4cf9acba6ef4ec25eee76ad66cb595cacb09c181 | /server.py | 7cd81008afe6486f5138f1688fc885a555765e0d | []
| no_license | jscott1989/BlueBallsInc | f8c6a0b3cbe9b3701dc16f91daa05b06650331e3 | 2e8b2dfae6053dc7d286cddc26faefcf60e0d1a3 | refs/heads/master | 2016-09-05T18:13:41.568795 | 2012-08-09T06:47:20 | 2012-08-09T06:47:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,484 | py | """
Server side of Blue Balls Inc.
"""
from sys import argv
import os
import bottle
from bottle import get, post, view, static_file, route, redirect, abort, request
from couchdbkit import Server
import json
bottle.debug(True)
bottle.reload = True
bottle.TEMPLATE_PATH = ["./templates"]
root_directory = os.path.dirname(os.path.realpath(__file__))
db_host = os.environ.get('CLOUDANT_URL', "http://localhost:5984")
db = Server(db_host).get_or_create_db("blueballs")
@get('/')
@view("wrap")
def wrap_index():
return {"inner": "/inner/"}
@get('/inner/')
@view("index")
def inner_index():
# The main page
return {"level": 1, "auto_load_game": "false", "replay_mode": False}
@post('/replay/new')
def post_replay():
# Save the replay to the database
try:
# We decode then encode to ensure there's nothing bad in it
replay = {"replay_flag": True, "name": request.POST['name'], "state": json.loads(request.POST['state'])}
except: # TODO: Exception type
abort(400, "Invalid state data")
db.save_doc(replay)
return redirect('/replay/%s' % replay['_id'])
@get('/replay/:replay_id')
@view("wrap")
def wrap_replay(replay_id):
return {"inner": "/inner/replay/%s" % replay_id}
@get('/inner/replay/:replay_id')
@view("index")
def replay(replay_id):
if not db.doc_exist(replay_id):
abort(404, "Replay not found")
replay = db.get(replay_id)
if not replay.get('replay_flag'):
abort(404, "Replay not found")
return {"level": 1, "auto_load_game": "false", "replay_mode": True, "replay": json.dumps(replay)}
@get('/level/:level_name')
@view("wrap")
def wrap_level(level_name):
return {"inner": "/inner/level/%s" % level_name}
@get('/inner/level/:level_name')
@view("index")
def play_level(level_name):
# Jump to a particular level
return {"level": level_name, "auto_load_game": "true", "replay_mode": False}
@route('/css/<filepath:path>')
def static_css(filepath):
return static_file(filepath, root=root_directory + '/static/css/')
@route('/img/<filepath:path>')
def static_img(filepath):
return static_file(filepath, root=root_directory + '/static/img/')
@route('/js/<filepath:path>')
def static_js(filepath):
return static_file(filepath, root=root_directory + '/static/js/')
@route('/sound/<filepath:path>')
def static_sound(filepath):
return static_file(filepath, root=root_directory + '/static/sound/')
@route('/levels/<level>')
def level(level):
return static_file(level + '.js', root=root_directory + '/levels/')
bottle.run(host='0.0.0.0', port=argv[1]) | [
"[email protected]"
]
| |
2b880ed1f82c06098d72ea38fd63ef4dfa9eb407 | bc09d43caaa31697c8099bfafefb56e48504bfea | /occurrence_of_ME_and_MY.py | bfff98cdb76a53df5988df6df9f276e1105f805f | []
| no_license | FatemaBohra/python-program | 103f2584eb2a31de0080059d013051e415ce23c9 | ce647be15245098ae562099475cac52582e1b30f | refs/heads/main | 2023-09-04T06:22:41.012712 | 2021-11-04T15:00:25 | 2021-11-04T15:00:25 | 418,728,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | # Quetion-8(FILE HANDLING)
def occurrence_of_ME_MY(file_name):
file = open(file_name, 'r')
file_list = file.read().split()
count = 0
for i in range(0, len(file_list)):
if file_list[i] == 'MY' or file_list[i] == 'ME':
count = count + 1
file.close()
return count
print(occurrence_of_ME_MY('DATA.DAT.txt'))
| [
"[email protected]"
]
| |
6643171f45a1abb55c1f08e0b88e1f793f5b599c | 5efc306e30024bc0490bb3e22810669b590e19a4 | /baidupic/spiders/baidupic.py | 1beec79e7951c617777c0053ece0ba847e79f253 | []
| no_license | ZGC-demo/Baidupic | 2aa127ed771214dc3c07db9c198d216cb2cc8943 | 312239fda0284220304370d1397f8846afe5cb87 | refs/heads/master | 2020-03-19T20:40:00.995880 | 2018-06-11T07:58:42 | 2018-06-11T07:58:42 | 136,910,843 | 1 | 0 | null | 2018-06-11T10:27:33 | 2018-06-11T10:27:33 | null | UTF-8 | Python | false | false | 948 | py | import scrapy
import json
from ..items import BaidupicItem
class BaidupicSpider(scrapy.Spider):
name = 'baidupicspider'
allowed_domains = ['image.baidu.com']
pn = 0
def __init__(self, keywords=None, page=None, *args, **kwargs):
super(BaidupicSpider, self).__init__(*args, **kwargs)
self.init_url = 'https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord=%s&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=&z=&ic=&word=%s&s=&se=&tab=&width=&height=&face=&istype=&qc=&nc=1&fr=&rn=30&gsm=&1525406929428=&pn=0' % (keywords, keywords)
self.start_urls = [self.init_url + str(30*x) for x in range(1, int(page))]
def parse(self, response):
data = json.loads(response.text)["data"]
for each in data:
if not each.__contains__('thumbURL'):
continue
item = BaidupicItem(url=each['thumbURL'])
yield item
| [
"[email protected]"
]
| |
b08740255346a53fa8ec7c2417b89339512cad71 | c16ea32a4cddb6b63ad3bacce3c6db0259d2bacd | /google/ads/googleads/v5/googleads-py/google/ads/googleads/v5/services/types/carrier_constant_service.py | f2b6d5375d401f3038ed62a7fe4870b6286bb9b8 | [
"Apache-2.0"
]
| permissive | dizcology/googleapis-gen | 74a72b655fba2565233e5a289cfaea6dc7b91e1a | 478f36572d7bcf1dc66038d0e76b9b3fa2abae63 | refs/heads/master | 2023-06-04T15:51:18.380826 | 2021-06-16T20:42:38 | 2021-06-16T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,276 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v5.services',
marshal='google.ads.googleads.v5',
manifest={
'GetCarrierConstantRequest',
},
)
class GetCarrierConstantRequest(proto.Message):
r"""Request message for
[CarrierConstantService.GetCarrierConstant][google.ads.googleads.v5.services.CarrierConstantService.GetCarrierConstant].
Attributes:
resource_name (str):
Required. Resource name of the carrier
constant to fetch.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
]
| bazel-bot-development[bot]@users.noreply.github.com |
6a1b449d48374d0236e01a797249dc7ed36bda8f | fcd965c9333ee328ec51bc41f5bc0300cc06dc33 | /Trees/BinaryTrees/invertBinaryTree.py | ff36aca852fde299f4c691638ebff0e31c239ed8 | []
| no_license | henrylin2008/Coding_Problems | 699bb345481c14dc3faa8bab439776c7070a1cb0 | 281067e872f73a27f76ae10ab0f1564916bddd28 | refs/heads/master | 2023-01-11T11:55:47.936163 | 2022-12-24T07:50:17 | 2022-12-24T07:50:17 | 170,151,972 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,260 | py | # Invert Binary Tree
# Level: Medium
# https://www.algoexpert.io/questions/Invert%20Binary%20Tree
# Write a function that takes in a Binary Tree and inverts it. In other words, the function should swap every left node
# in the tree for its corresponding (mirrored) right node. Each Binary Tree node has a value stored in a property called
# "value" and two children nodes stored in properties called "left" and "right," respectively. Children nodes can either
# be Binary Tree nodes themselves or the None (null) value.
#
# Sample Input:
# 1
# / \
# 2 3
# / \ / \
# 4 5 6 7
# / \
# 8 9
#
# Sample Output:
# 1
# / \
# 3 2
# / \ / \
# 7 6 5 4
# / \
# 9 8
# Method 1: iterative | Breadth first search, go through nodes level by level, swap left and right nodes, then append it
# to the queue
# Time: O(n)
# Space: O(n)
def invertBinaryTree(tree):
queue = [tree] # using queue to store nodes
while len[queue]: # if there's still node/s in the queue
current = queue.pop(0) # current is first node in the queue
if current is None: # skip if the node is null node
continue
swapLeftAndRight(current) # call helper function to swap left and right nodes
queue.append(current.left) # add left node to the queue
queue.append(current.right) # add right node to the queue
def swapLeftAndRight(tree): # helper function that swap left and right nodes
tree.left, tree.right = tree.right, tree.left
# Method 2: Recursive | Efficient in Space;
# Logic: start at the root node, recursive calls on invertBinaryTree for its left and right nodes
# Time: O(n); n is number of nodes
# Space: O(d): d is depth of the tree
def invertBinaryTree(tree):
if tree is None: # if tree is null, then skip it
return
swapLeftAndRight(tree) # call helper function to swap left and right nodes
invertBinaryTree(tree.left) # recursive call on left side of the tree
invertBinaryTree(tree.right) # recursive call on right side of the tree
def swapLeftAndRight(tree): # helper function that swap left and right nodes
tree.left, tree.right = tree.right, tree.left
| [
"[email protected]"
]
| |
2d1867c309751c37b717ebd8c792178e29f3ec0b | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /_REPO/MICROSOFT/c9-python-getting-started/python-for-beginners/03_-_Comments/comments_for_debugging.py | 0e556337e3699fa7ccc4a48a6c38132b1035d9e2 | [
"MIT",
"LicenseRef-scancode-generic-cla"
]
| permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 59 | py | print('Hello world')
print('It's a small world after all')
| [
"[email protected]"
]
| |
40c900c79c7327d8f8160ca53984e624ccd1db22 | 06ee5a5d83466896bbfd1653206da0151d6aa81a | /apps/business/serializers/reason_serializer.py | 163498465bee80f27fd29bf7a66b1577347785ed | []
| no_license | fengjy96/rest_task | 201421a40ce42031223f61135d1d5e85809188e6 | db1d7c4eb2d5d229ab54c6d5775f96fc1843716e | refs/heads/master | 2020-07-22T19:48:19.940094 | 2019-09-02T13:40:11 | 2019-09-02T13:40:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 741 | py | from rest_framework import serializers
from business.models.reason import Reason
class ReasonsListSerializer(serializers.ModelSerializer):
"""
原因:增删改查
"""
sender = serializers.SerializerMethodField()
receiver = serializers.SerializerMethodField()
def get_sender(self, obj):
if obj.sender:
return {
'id': obj.sender.id,
'name': obj.sender.name,
}
def get_receiver(self, obj):
if obj.receiver:
return {
'id': obj.receiver.id,
'name': obj.receiver.name,
}
class Meta:
model = Reason
fields = '__all__'
depth = 1
| [
"[email protected]"
]
| |
6ff1780b3c83117a2da2abf8d6d4dbed1e39d2c7 | 4727dc81850451f9bb0578bccb24731f10d0b225 | /file_cache/app.py | 4d0dc4fcc58c08d187aea3502dc97c0507962e9d | [
"MIT"
]
| permissive | Chise1/file_cache | 27bac3b1e8116829f6f265d1466878cc060f01ca | 3fa3da64ffb414102c1db454fcb5abc59ac74162 | refs/heads/main | 2023-04-15T07:27:50.530564 | 2021-04-24T01:56:41 | 2021-04-24T01:56:41 | 359,651,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | from fastapi import FastAPI, File, UploadFile
from pydantic import BaseModel
from file_cache import settings
try:
import asyncio
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except:
pass
if settings.CACHE == "MEMORY":
from file_cache.memory_cache import default_file_path, get_file_path
app = FastAPI(title="文件缓存系统")
class FileID(BaseModel):
file_id: str
class FilePath(BaseModel):
file_path: str
@app.post("/file/{project_id}", response_model=FileID)
async def write_file(project_id: str, file: UploadFile = File(...,)):
"""
写入文件
"""
file_id = await default_file_path(project_id, file)
return FileID(file_id=file_id)
@app.get("/file/{project_id}/{file_id}", response_model=FilePath)
async def file_path(project_id: str, file_id: str):
file_path = await get_file_path(project_id, file_id)
return FilePath(file_path=file_path)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app)
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.