blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9aa43822098b1d19715a01e7607e3f68485f1762 | e422af828a758368fa7a16745b7ae7d0e6c800e8 | /804_UniqueMorseCodeWords.py | 3811f21858311a91bdadb404e90903673715033a | [
"MIT"
]
| permissive | xyp8023/LeetCode_Python | 8b2eb8f4d036c6510f83a3d50e34ce9383cee0f7 | 09281f9a1eafb262b06336a6969c97d2418899b4 | refs/heads/master | 2020-03-30T20:26:21.966605 | 2019-09-08T09:32:52 | 2019-09-08T09:32:52 | 151,588,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,287 | py | # class Solution:
# def uniqueMorseRepresentations(self, words):
# """
# :type words: List[str]
# :rtype: int
# """
# MorseCode = [".-", "-...", "-.-.", "-..", ".", "..-.", "--.", "....", "..", ".---", "-.-", ".-..", "--", "-.",
# "---", ".--.", "--.-", ".-.", "...", "-", "..-", "...-", ".--", "-..-", "-.--", "--.."]
# res = []
# ascii_a = ord('a') # 97
# for i in words:
# Str = ''
# for j in i:
# Str += MorseCode[ord(j) - ascii_a]
# res.append(Str)
# return len(set(res))
# another solution
class Solution:
def uniqueMorseRepresentations(self, words):
"""
:type words: List[str]
:rtype: int
"""
MorseCode = [".-", "-...", "-.-.", "-..", ".", "..-.", "--.", "....", "..", ".---", "-.-", ".-..", "--",
"-.",
"---", ".--.", "--.-", ".-.", "...", "-", "..-", "...-", ".--", "-..-", "-.--", "--.."]
ascii_a = ord('a') # 97
seen = {''.join(MorseCode[ord(c)-ascii_a] for c in word) for word in words}
return len(seen)
words = ["gin", "zen", "gig", "msg"]
sol = Solution()
res = sol.uniqueMorseRepresentations(words)
print(res) | [
"[email protected]"
]
| |
a5a47498059620de02d04269f2c99b0f5501de51 | bb986a65799c427efb36032c0e8020d0e4647eea | /ScoreCardModel/feature_selection/distribution.py | 1e0e98efae96d1c8d59adb1c8d98d51f8105a17a | []
| no_license | likaituo/ScoreCardModel | a743d4573a1627f8e248a3193ab00e3176d805ef | 1c19405e9bb2a76bb65208751ee3a6e228a85c81 | refs/heads/master | 2020-12-02T11:06:24.674264 | 2017-06-05T10:27:02 | 2017-06-05T10:27:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,387 | py | __all__ = ["Distribution"]
import numpy as np
from typing import Tuple
import matplotlib.pyplot as plt
class Distribution:
"""
Property:
x (np.ndarray): - 输入的数组
X (np.ndarray): - 数组分段的下表
Y (np.ndarray): - 数组中的元素每个分段中的出现的个数
xticks (np.ndarray): - 均分的分段信息
segment (int): - 均分的分段数
"""
@staticmethod
def calculate(x: np.ndarray, segment: int = 5)->Tuple[np.ndarray, np.ndarray]:
"""计算分布
Parameters:
x (np.ndarray): - 复杂的多层序列
segment (int): - 均分的分段数,默认100
Returns:
Tuple[np.ndarray, np.ndarray]: - 返回用于画图的X,Y
"""
min_val = min(x)
max_val = max(x)
step = (min_val - max_val) / segment
Y, X = np.histogram(x, bins=segment, range=(min_val, max_val))
return Y, X
@property
def x(self):
return self.__x
@property
def X(self):
return self.__X
@property
def Y(self):
return self.__Y
@property
def xticks(self):
return self.__xticks
@property
def segment(self)->int:
return self.__segment
@segment.setter
def segment(self, n: int):
if isinstance(n, int) and n > 0:
self.__segment = n
else:
raise AttributeError("segment must be a positive integer")
def __call__(self):
"""输出不同分段中数组中的元素每个分段中的出现的个数
"""
return dict(list(zip(self.xticks, self.Y)))
def __init__(self, x: np.ndarray, segment: int = 100)->None:
self.__x = x
self.segment = segment
self.calcul_distribution()
def calcul_distribution(self):
self.__Y, self.__X = Distribution.calculate(self.x, self.segment)
self.__xticks = [str(self.X[i]) + '~' + str(self.X[i + 1]) for i in range(len(self.Y))]
def draw(self):
"""画出分布情况
"""
xticks = self.xticks
plt.bar(range(len(self.Y)), self.Y)
for i, j in zip(range(len(self.Y)), self.Y):
plt.text(i, j + 0.5, str(float(self.Y[i]) / sum(self.Y) * 100) + "%")
plt.xlim(-1, self.segment * 1.1)
plt.xticks(range(len(self.xticks)))
plt.show()
| [
"[email protected]"
]
| |
7c6d1222f99c5f665ac6b7c7951cb4aef9e82116 | bfe81fda7d7b444e57d03571e619002d74af50c1 | /homework/hw5/layers.py | a15d5cfb4b3c3ca6240f18ea328e40971e0dc3dc | []
| no_license | irishsoul0/Comp-540-----Statistical-Machine-Learning | 562284e9eb0f3eda0418ff75e09422d9d5621ab2 | e987e0eaf53964c865374ce647acf052fdc83734 | refs/heads/master | 2020-05-18T20:55:32.027388 | 2019-05-02T20:17:43 | 2019-05-02T20:17:43 | 184,645,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,198 | py | import numpy as np
def affine_forward(x, theta, theta0):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (m, d_1, ..., d_k) and contains a minibatch of m
examples, where each example x[i] has shape (d_1, ..., d_k). We will
reshape each input into a vector of dimension d = d_1 * ... * d_k, and
then transform it to an output vector of dimension h.
Inputs:
- x: A numpy array containing input data, of shape (m, d_1, ..., d_k)
- theta: A numpy array of weights, of shape (d, h)
- theta0: A numpy array of biases, of shape (h,)
Returns a tuple of:
- out: output, of shape (m, h)
- cache: (x, theta, theta0)
"""
out = None
#############################################################################
# TODO: Implement the affine forward pass. Store the result in out. You #
# will need to reshape the input into rows. #
#############################################################################
# 2 lines of code expected
m = x.shape[0]
x_temp = x.reshape((m, -1))
out= np.dot(x_temp, theta) + theta0
#############################################################################
# END OF YOUR CODE #
#############################################################################
cache = (x, theta, theta0)
return out, cache
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (m, h)
- cache: Tuple of:
- x: Input data, of shape (m, d_1, ... d_k)
- theta: Weights, of shape (d,h)
- theta0: biases, of shape (h,)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (m, d1, ..., d_k)
- dtheta: Gradient with respect to theta, of shape (d, h)
- dtheta0: Gradient with respect to theta0, of shape (1,h)
"""
x, theta, theta0 = cache
dx, dtheta, dtheta0 = None, None, None
#############################################################################
# TODO: Implement the affine backward pass. #
#############################################################################
# Hint: do not forget to reshape x into (m,d) form
# 4-5 lines of code expected
dx = np.dot(dout,theta.T).reshape(x.shape)
m = x.shape[0]
x = x.reshape((m, -1))
dtheta = np.dot(x.T, dout)
dtheta0 = np.sum(dout, axis=0)
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx, dtheta, dtheta0
def relu_forward(x):
"""
Computes the forward pass for a layer of rectified linear units (ReLUs).
Input:
- x: Inputs, of any shape
Returns a tuple of:
- out: Output, of the same shape as x
- cache: x
"""
out = None
#############################################################################
# TODO: Implement the ReLU forward pass. #
#############################################################################
# 1 line of code expected
out = np.maximum(x, 0)
#############################################################################
# END OF YOUR CODE #
#############################################################################
cache = x
return out, cache
def relu_backward(dout, cache):
"""
Computes the backward pass for a layer of rectified linear units (ReLUs).
Input:
- dout: Upstream derivatives, of any shape
- cache: Input x, of same shape as dout
Returns:
- dx: Gradient with respect to x
"""
dx, x = None, cache
#############################################################################
# TODO: Implement the ReLU backward pass. #
#############################################################################
# 1 line of code expected. Hint: use np.where
dx = np.where(x>0, dout.copy(), 0)
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx
def dropout_forward(x, dropout_param):
"""
Performs the forward pass for (inverted) dropout.
Inputs:
- x: Input data, of any shape
- dropout_param: A dictionary with the following keys:
- p: Dropout parameter. We drop each neuron output with probability p.
- mode: 'test' or 'train'. If the mode is train, then perform dropout;
if the mode is test, then just return the input.
- seed: Seed for the random number generator. Passing seed makes this
function deterministic, which is needed for gradient checking but not in
real networks.
Outputs:
- out: Array of the same shape as x.
- cache: A tuple (dropout_param, mask). In training mode, mask is the dropout
mask that was used to multiply the input; in test mode, mask is None.
"""
p, mode = dropout_param['p'], dropout_param['mode']
if 'seed' in dropout_param:
np.random.seed(dropout_param['seed'])
mask = None
out = None
if mode == 'train':
###########################################################################
# TODO: Implement the training phase forward pass for inverted dropout. #
# Store the dropout mask in the mask variable. #
###########################################################################
# 2 lines of code expected
mask = (np.random.rand(*x.shape) < (1-p))/(1-p)
out = x*mask
###########################################################################
# END OF YOUR CODE #
###########################################################################
elif mode == 'test':
###########################################################################
# TODO: Implement the test phase forward pass for inverted dropout. #
###########################################################################
# 1 line of code expected
out = x
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (dropout_param, mask)
out = out.astype(x.dtype, copy=False)
return out, cache
def dropout_backward(dout, cache):
"""
Perform the backward pass for (inverted) dropout.
Inputs:
- dout: Upstream derivatives, of any shape
- cache: (dropout_param, mask) from dropout_forward.
"""
dropout_param, mask = cache
mode = dropout_param['mode']
dx = None
if mode == 'train':
###########################################################################
# TODO: Implement the training phase backward pass for inverted dropout. #
###########################################################################
# 1 line of code expected
dx = dout * mask
###########################################################################
# END OF YOUR CODE #
###########################################################################
elif mode == 'test':
dx = dout
return dx
def conv_forward_naive(x, theta, theta0, conv_param):
"""
A naive implementation of the forward pass for a convolutional layer.
The input consists of m data points, each with C channels, height H and width
W. We convolve each input with F different filters, where each filter spans
all C channels and has height HH and width HH.
Input:
- x: Input data of shape (m, C, H, W)
- theta: Filter weights of shape (F, C, HH, WW)
- theta0: Biases, of shape (F,)
- conv_param: A dictionary with the following keys:
- 'stride': The number of pixels between adjacent receptive fields in the
horizontal and vertical directions.
- 'pad': The number of pixels that will be used to zero-pad the input.
Returns a tuple of:
- out: Output data, of shape (m, F, H', W') where H' and W' are given by
H' = 1 + (H + 2 * pad - HH) / stride
W' = 1 + (W + 2 * pad - WW) / stride
- cache: (x, theta, theta0, conv_param)
"""
out = None
#############################################################################
# TODO: Implement the convolutional forward pass. #
# Hint: you can use the function np.pad for padding. #
#############################################################################
m,C, H, W = x.shape
F, C, HH, WW = theta.shape
stride = conv_param['stride']
pad = conv_param['pad']
H_out = 1 + (H + 2*pad - HH)/stride
W_out = 1 + (W + 2*pad - WW)/stride
out = np.zeros((m, F, H_out, W_out))
x_pad = np.pad(x, [(0,0),(0,0),(pad, pad),(pad, pad)], mode='constant')
for i in range(m):
for j in range(F):
for k in range(H_out):
for p in range(W_out):
conv = x_pad[ i, :, k*stride:k*stride + HH, p*stride:p*stride + WW]
out[i, j, k, p] = np.sum(conv*theta[j]) + theta0[j]
#############################################################################
# END OF YOUR CODE #
#############################################################################
cache = (x, theta, theta0, conv_param)
return out, cache
def conv_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a convolutional layer.
Inputs:
- dout: Upstream derivatives.
- cache: A tuple of (x, theta, theta0, conv_param) as in conv_forward_naive
Returns a tuple of:
- dx: Gradient with respect to x
- dtheta: Gradient with respect to theta
- dtheta0: Gradient with respect to theta0
"""
dx, dtheta, dtheta0 = None, None, None
#############################################################################
# TODO: Implement the convolutional backward pass. #
#############################################################################
x, theta, theta0, conv_param = cache
m, C, H, W = x.shape
F, C, HH, WW = theta.shape
stride = conv_param['stride']
pad = conv_param['pad']
dx = np.zeros_like(x)
dtheta = np.zeros_like(theta)
dtheta0 = np.zeros_like(theta0)
H_out = 1 + (H + 2*pad - HH)/stride
W_out = 1 + (W + 2*pad - WW)/stride
x_pad = np.pad(x,[(0,0),(0,0),(pad, pad),(pad, pad)], mode='constant')
dx_pad = np.pad(dx,[(0,0),(0,0),(pad, pad),(pad, pad)], mode='constant')
for i in range(m):
for j in range(F):
for k in range(H_out):
for p in range(W_out):
dx_pad[i, :, k*stride:k*stride+HH, p*stride:p*stride+WW] += theta[j,:,:,:]*dout[i,j,k,p]
dtheta[j,:,:,:] += x_pad[i,:,k*stride:k*stride+HH,p*stride:p*stride+WW]*dout[i,j,k,p]
dx = dx_pad[:, :, pad:pad+H, pad:pad+W]
dtheta0 = np.sum(dout ,axis=(0,2,3))
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx, dtheta, dtheta0
def max_pool_forward_naive(x, pool_param):
"""
A naive implementation of the forward pass for a max pooling layer.
Inputs:
- x: Input data, of shape (m, C, H, W)
- pool_param: dictionary with the following keys:
- 'pool_height': The height of each pooling region
- 'pool_width': The width of each pooling region
- 'stride': The distance between adjacent pooling regions
Returns a tuple of:
- out: Output data
- cache: (x, pool_param)
"""
out = None
#############################################################################
# TODO: Implement the max pooling forward pass #
#############################################################################
m, C, H, W = x.shape
stride = pool_param['stride']
pool_height = pool_param['pool_height']
pool_width = pool_param['pool_width']
H_out = 1 + (H - pool_height)/stride
W_out = 1 + (W - pool_width)/stride
out = np.zeros((m,C,H_out,W_out))
for i in range(m):
for j in range(C):
for k in range(H_out):
for p in range(W_out):
p_orig = (p+1-1) * stride + pool_width -1
k_orig = (k+1-1) * stride + pool_height -1
out[i, j, k, p] = np.max(x[i, j,(k_orig-pool_height+1):(k_orig+1), (p_orig-pool_width+1):(p_orig+1)])
#############################################################################
# END OF YOUR CODE #
#############################################################################
cache = (x, pool_param)
return out, cache
def max_pool_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a max pooling layer.
Inputs:
- dout: Upstream derivatives
- cache: A tuple of (x, pool_param) as in the forward pass.
Returns:
- dx: Gradient with respect to x
"""
dx = None
#############################################################################
# TODO: Implement the max pooling backward pass #
#############################################################################
x, pool_param = cache
m, C, H, W = x.shape
stride = pool_param['stride']
pool_height = pool_param['pool_height']
pool_width = pool_param['pool_width']
N, C, H_out, W_out = dout.shape
dx = np.zeros_like(x)
for i in range(m):
for j in range(C):
for k in range(H_out):
for p in range(W_out):
p_orig = (p+1-1) * stride + pool_width -1
k_orig = (k+1-1) * stride + pool_height -1
grid_x = x[i, j,(k_orig-pool_height+1):(k_orig+1), (p_orig-pool_width+1):(p_orig+1)]
grid_dx = dx[i, j,(k_orig-pool_height+1):(k_orig+1), (p_orig-pool_width+1):(p_orig+1)]
max_value_index = np.unravel_index(grid_x.argmax(), grid_x.shape)
grid_dx[max_value_index] = dout[i, j, k, p]
#############################################################################
# END OF YOUR CODE #
#############################################################################
return dx
def svm_loss(x, y):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (m, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (m,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
m = x.shape[0]
correct_class_scores = x[np.arange(m), y]
margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
margins[np.arange(m), y] = 0
loss = np.sum(margins) / m
num_pos = np.sum(margins > 0, axis=1)
dx = np.zeros_like(x)
dx[margins > 0] = 1
dx[np.arange(m), y] -= num_pos
dx /= m
return loss, dx
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (m, C) where x[i, j] is the score for the jth class
for the ith input.
- y: Vector of labels, of shape (m,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
probs = np.exp(x - np.max(x, axis=1, keepdims=True))
probs /= np.sum(probs, axis=1, keepdims=True)
m = x.shape[0]
loss = -np.sum(np.log(probs[np.arange(m), y])) / m
dx = probs.copy()
dx[np.arange(m), y] -= 1
dx /= m
return loss, dx
| [
"[email protected]"
]
| |
9a3b95bdccdb7b503d60155acf43892274ec58d9 | 62d403765fce807597b33c284a4008c59beeb3e8 | /第二组代码/爬虫/firstscrapy/firstscrapy/spiders/first.py | fedd9a7bc4ce473b56759b685e8391f742b073b9 | []
| no_license | CS1803-SE/The-second-subsystem | aa19a26128da51268b7e8e8b492b4df9487417f3 | fa0dc3d931c658bd8f42d4e95ff72049fe7db5f7 | refs/heads/main | 2023-05-26T08:49:49.599816 | 2021-05-30T15:27:49 | 2021-05-30T15:27:49 | 363,661,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,543 | py | #import scrapy
from firstscrapy.items import FirstscrapyItem
#
# class FirstSpider(scrapy.Spider):
# name = 'first'
# #allowed_domains = ['https://www.baidu.com/s?tn=news&rtt=4&bsst=1&cl=2&wd=%E5%8D%9A%E7%89%A9%E9%A6%86&medium=0']
# baseurl='https://www.baidu.com/s?tn=news&rtt=4&bsst=1&cl=2&wd=%E5%8D%9A%E7%89%A9%E9%A6%86&medium=0&x_bfe_rqs=03E80&x_bfe_tjscore=0.100000&tngroupname=organic_news&newVideo=12&rsv_dl=news_b_pn&pn='
# # 第1页为小于10的数字 10为第2页,20为第三页,30为第四页,以此类推
# for i in range(10, 50, 10):
# # 点击界面第二页可以看到网页变化截取关键部分 https://www.baidu.com/s?wd=python&pn=10
# start_urls = [baseurl+str(i)]
# #start_urls = ['https://www.baidu.com/s?tn=news&rtt=4&bsst=1&cl=2&wd=%E5%8D%9A%E7%89%A9%E9%A6%86&medium=0']
#
# def parse(self, response):
# div_list = response.xpath('//div[@class="result-op c-container xpath-log new-pmd"]')
# #print(div_list)
# for div in div_list:
# title = div.xpath('.//h3/a/text()').getall()
# title = "".join(title)
# print(title)
#
# #创建item对象
# item = FirstscrapyItem()
# item['title'] = title
#
# #返回item给pipelines
# yield item
#
# -*- coding: utf-8 -*-
import scrapy
from scrapy import Spider,Request
import re
import datetime
import time
from _datetime import timedelta
#URL = 'https://www.baidu.com/s?rtt=1&bsst=1&cl=2&tn=news&rsv_dl=ns_pc&word={museum}&bt={bt}&et={et}&x_bfe_rqs=03E80&x_bfe_tjscore=0.100000&tngroupname=organic_news&newVideo=12&pn={page}'
URL = 'https://www.baidu.com/s?tn=news&rtt=4&bsst=1&cl=2&wd={museum}&medium=2&x_bfe_rqs=03E80&x_bfe_tjscore=0.100000&tngroupname=organic_news&newVideo=12&rsv_dl=news_b_pn&pn={page}'
class FirstSpider(scrapy.Spider):
name = 'first'
#allowed_domains = ['baidu.com']
page = 0
museum = None
startTime = None
endTime = None
start_urls = []
end = False
def __init__(self, museum="博物馆", startTime="2020-01-01", endTime=datetime.datetime.now().strftime("%Y-%m-%d"), *args, **kwargs):
super(FirstSpider, self).__init__(*args, **kwargs)
self.startTime = startTime
self.endTime = endTime
self.museum = museum
self.start_urls = [URL.format(museum=museum, page=self.page * 10)]
def parse(self, response):
flag = 0
div_list = response.xpath('//div[@class="result-op c-container xpath-log new-pmd"]')
if not div_list:
self.end = True
return
for div in div_list:
s_time = div.xpath('.//span[@class ="c-color-gray2 c-font-normal"]/text()').getall()
s_time = "".join(s_time)
time = self.parse_time(s_time)
if time=="":
continue
if (time>self.startTime) and (time<self.endTime):
detail_url = div.xpath('./@mu').getall()
detail_url = "".join(detail_url)
title = div.xpath('.//h3/a//text()').getall()
title = "".join(title).replace("\n", "").replace(" ", "")
author = div.xpath('.//span[@class="c-color-gray c-font-normal c-gap-right"]//text()').getall()
author = "".join(author).replace("\n", "").replace(" ", "")
# description = div.xpath('.//span[@class ="c-font-normal c-color-text"]/text()').getall()
# description = "".join(description).replace("\n", "").replace(" ", "")
print(title)
print(author)
print(time)
# print(description)
print(detail_url)
item = FirstscrapyItem()
item['title'] = title
item['author'] = author
item['museum'] = self.museum
item['detail_url'] = detail_url
item['time'] = time
yield scrapy.Request(detail_url, callback=self.parse_detail, meta={'item': item})
elif time<self.startTime:
flag = 1
break
if flag==1:
return
print('page = {}'.format(self.page))
if (self.page<40) :
self.page += 1
new_url = URL.format(
museum=self.museum, bt=self.startTime, et=self.endTime, page=self.page * 10)
print(new_url)
yield Request(new_url, callback=self.parse, dont_filter=True)
def parse_detail(self,response):
item=response.meta['item']
content_list=response.xpath('//div[@class="index-module_textWrap_3ygOc"]')
if not content_list:
content_list = response.xpath('//p[@class="contentFont"]')
content=""
for div in content_list:
c=div.xpath('.//text()').getall()
c= "".join(c)
content+=c+"\n"
#print(content)
item['content'] = content
yield item
def parse_time(self, s_time):
result_time = ''
regex = re.compile(r"[0-9]{4}年[0-9]{1,2}月[0-9]{1,2}日")
# 1、2017年06月15日 13:41
if regex.match(s_time):
t = time.strptime(s_time, '%Y年%m月%d日')
y, m, d = t[0:3]
result_time = datetime.datetime(y, m, d).strftime("%Y-%m-%d")
# 6天前
elif u'天前' in s_time:
days = re.findall(u'(\d+)天前', s_time)[0]
result_time = (datetime.datetime.now() - timedelta(days=int(days))).strftime("%Y-%m-%d")
# 昨天 18:03
elif u'昨天' in s_time:
result_time = (datetime.datetime.now() - timedelta(days=int(1))).strftime("%Y-%m-%d")
elif u'前天' in s_time:
result_time = (datetime.datetime.now() - timedelta(days=int(2))).strftime("%Y-%m-%d")
# 28分钟前
elif u'分钟前' in s_time:
result_time = datetime.datetime.now().strftime("%Y-%m-%d")
# 1小时前
elif u'小时前' in s_time:
result_time = datetime.datetime.now().strftime("%Y-%m-%d")
elif re.match(r"(\d+)月(\d+)日", s_time):
g=re.search(r'(\d+)月(\d+)日',s_time)
result_time = str(datetime.datetime.now().year)+"-"+str(g.group(1))+"-"+str(g.group(2))
t = time.strptime(result_time, "%Y-%m-%d")
y, m, d = t[0:3]
result_time = datetime.datetime(y, m, d).strftime("%Y-%m-%d")
return result_time
| [
"[email protected]"
]
| |
5f6660405d8bf94af3a1d0da7c371a3746fb9c7b | 8a4bc47685427204365b1668b3d7b5a6fd7546f1 | /service/routing/registration.py | 87716e98f4e46cd52db262f430ddd7eebf9214b7 | []
| no_license | myronww/hello-service | 3b1705ad8c25a6763d5a9673086b01d388b7817a | 8b59054dd4cb09fb5f1697e14a050d8251b3ada8 | refs/heads/master | 2020-04-07T03:43:08.728638 | 2019-04-10T17:12:24 | 2019-04-10T17:12:24 | 158,027,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py |
import os
from flask import Blueprint
from routing.versions.v1 import apply_namespaces as apply_namespaces_v1
def register_blueprints(app, service_prefix):
bp = Blueprint(service_prefix, __name__, url_prefix="/%s" % service_prefix)
apply_namespaces_v1(bp)
app.register_blueprint(bp)
return
| [
"[email protected]"
]
| |
5e7a3f1c287c75701260d7609f3902f206f92a98 | 7e0619c2d4556659e4ae79bfc55e4902ea6ddc33 | /django_learn/asgi.py | 38f9ba67a6d1c3d7c0610d8951cf8096e3060506 | [
"MIT"
]
| permissive | nettaku2/django_learn | b2279310535a7ecc76f47b8281cf481fc6527da7 | ce07c97c8b2dd4828cae1d1b176674c843e66b35 | refs/heads/main | 2023-08-24T04:03:22.291999 | 2021-10-30T17:12:05 | 2021-10-30T17:12:05 | 413,083,446 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
ASGI config for django_learn project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_learn.settings')
application = get_asgi_application()
| [
"[email protected]"
]
| |
62a62155678e8901539b27446bc18682d86188ab | 5609fb00d4ec730384565d551063a31de85e0f72 | /tests/tester.py | 6b2ead953ef5713cbbda786096e2b25f82bac625 | []
| no_license | iron-claw-972/ScoutingApp2020 | 5bb9b5ab929f72a58d4a1cdd22a806631edfdb89 | 0b287b4405ec3cae493736c483dfd8e25d90a674 | refs/heads/master | 2020-05-30T08:57:33.565038 | 2020-03-01T02:43:37 | 2020-03-01T02:43:37 | 189,629,437 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | import unittest
from flask import Flask, request, Blueprint, render_template
from ..util import DatabaseUtil
bp = Blueprint('tester', __name__)
class TestUM(unittest.TestCase):
def setUp(self):
DatabaseUtil.DatabaseUtil
print("GOT DE DATABASE HHEHEHEHEHE")
def test_False(self):
self.assertTrue(True)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
]
| |
730f554b7dc1eae5c4c877bb1803e310e3b7c464 | 394ccaa8496af51a3302edbe3f4314f781bb3f70 | /src/syntaxAnalyser/helpers/subtreeGrouper.py | b4d3307100eb184670198f5cf5703fc53c59552b | []
| no_license | Banyc/SyntaxStructures | 056415d7314fdfc53553493a71117e177891cae0 | 795534694ebdd1b9aaffc13078683f4ce9c79a91 | refs/heads/master | 2023-05-30T19:39:58.816201 | 2021-06-25T15:24:22 | 2021-06-25T15:24:22 | 379,916,963 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,092 | py |
from typing import Dict, List
from syntaxAnalyser.models.node import *
from syntaxAnalyser.models.sentenceSet import *
from syntaxAnalyser.models.treeInfo import *
class SubtreeGrouper:
def __init__(self) -> None:
# tuple -> treeId
self.treeIds: Dict[tuple, int] = {}
# treeId -> treeInfos
self.trees: Dict[int, TreeInfoSet] = {}
def analyzeSentenceSet(self, sentenceSet: SentenceSet) -> None:
for sentence in sentenceSet.sentences:
_ = self.getTreeId(sentence.constituencyStructure, isUpdateCount=True, sourceSentence=sentence)
def analyzeManySentenceSet(self, sentenceSets: List[SentenceSet]) -> None:
for sentenceSet in sentenceSets:
self.analyzeSentenceSet(sentenceSet)
# return tree ID
def getTreeId(self, root: Node, isUpdateCount: bool = False, sourceSentence: Sentence = None) -> int:
if root is None:
return 0
childTreeIds: List[int] = []
for child in root.children:
childTreeId = self.getTreeId(child, isUpdateCount, sourceSentence)
childTreeIds.append(childTreeId)
treeIdsKey = (root.pos, tuple(childTreeIds))
treeId: int = -1
if treeIdsKey in self.treeIds.keys():
# this tree has already exists
treeId = self.treeIds[treeIdsKey]
else:
# this tree does not exist
treeId = len(self.treeIds) + 1
self.treeIds[treeIdsKey] = treeId
# update self.trees
if isUpdateCount:
# only collect trees that begin with root node with more than one child
if root.terminal is None and len(root.children) > 1:
if not treeId in self.trees.keys():
self.trees[treeId] = TreeInfoSet()
newTreeInfo = TreeInfo()
newTreeInfo.root = root
newTreeInfo.sourceSentence = sourceSentence
newTreeInfo.treeId = treeId
self.trees[treeId].treeInfos.append(newTreeInfo)
return treeId
| [
"[email protected]"
]
| |
0a315b5bbce497aaba6fa8decae9c9d6b9178f62 | babf0654a09a823693129655ba214f83ead74391 | /preprocessing_code/task3.py | 49ef0b11cf4c57d17c85f9ec34ed449624d11a4c | []
| no_license | rsriram-eth/DSPA-2019 | 6ab0e2ad48db2f60f3b903de3df80c10727e1ffd | 9459e059a4576f8bdc9c28639eb88265c189a964 | refs/heads/master | 2022-11-17T09:30:08.307810 | 2019-06-09T21:48:45 | 2019-06-09T21:48:45 | 191,052,300 | 2 | 0 | null | 2022-11-16T10:56:57 | 2019-06-09T20:29:08 | Java | UTF-8 | Python | false | false | 1,974 | py | """
Preprocessing task: DSPA-2019 : step 3 : purge likes with original timestamp earlier than their post
"""
import pandas as pd
import numpy as np
import dateutil.parser
# Common variables
IP_DIR = "ip_data/1k-users-sorted/streams/" # Input directory
MED_DIR = "scraps/" # Intermediate files
OP_DIR = "op_data/" # Final output directory
BL_DIR = "blacklist/" # Blacklisted files
NUM_THREADS = 35
post_df = pd.read_csv(IP_DIR + "post_event_stream.csv", header=0, sep="|",
usecols=['id', 'creationDate']
)
like_df = pd.read_csv(IP_DIR + "likes_event_stream.csv", header=0, sep="|",
names=['personId', 'postId', 'creationDate']
)
print(post_df.head(n=1))
print(like_df.head(n=1))
"""
Delete likes which are earlier than their post time
"""
num_likes = like_df.shape[0]
print("Original number of likes:", num_likes)
remove_ids = []
for index in range(num_likes):
like_time = like_df.at[index, 'creationDate'] # ISO 8601 format
like_time = dateutil.parser.parse(like_time)
# print("\nLike time: ", like_time)
post_id = like_df.at[index, 'postId']
# print("Post id: ", post_id)
post_row = np.where(post_df['id'] == post_id)[0] # in post_df
# print("Post row: ", post_row)
post_time = post_df.at[post_row[0], 'creationDate']
post_time = dateutil.parser.parse(post_time)
# print("Post time: ", post_time)
if like_time <= post_time: # then drop
remove_ids.append(index)
print("\nLike time: ", like_time)
print("Post time: ", post_time)
print(index)
# print(remove_ids)
print("\nRemoving " + str(len(remove_ids)) + " comments .... ")
like_df = like_df.drop(remove_ids, axis=0)
num_likes = like_df.shape[0]
print("Final number of likes:", num_likes)
# Save cleaned results
like_df.to_csv(OP_DIR + "like_stream.csv", sep='|', index=False)
| [
"[email protected]"
]
| |
d9d7d2571fc6feb5d0457b880d3af567d8dcc94e | fb50364834bed6ee1dfb0e3cfd260f13eb3c1b49 | /Section5_Recursion/exploring_maze/maze_exploring.py | 7460cf69536e06a38b3f9b39548e943499dfed65 | []
| no_license | miguel-osuna/PS-Algos-and-DS-using-Python | 521b36ceed26431f8d60c2927533fb1801bd0576 | a9e0f8a7c77ff5b6a3befca5ab93030a9ae35313 | refs/heads/master | 2020-09-27T04:55:00.313981 | 2020-04-12T02:21:55 | 2020-04-12T02:21:55 | 226,435,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,341 | py | # Standard library imports
import os
# Third party imports
import turtle
PART_OF_PATH = "o"
TRIED = "."
OBSTACLE = "+"
DEAD_END = "-"
class Maze:
""" Maze class """
def __init__(self, maze_file_name):
""" Initialices a maze"""
# Setup
maze_file = open(maze_file_name, "r")
self.maze_list = []
rows_in_maze = 0
columns_in_maze = 0
# Read file line by line
for line in maze_file:
col = 0
row_list = []
# Read character by character
for char in line[:-1]:
row_list.append(char)
if char == "S":
self.start_row = rows_in_maze
self.start_col = col
col += 1
rows_in_maze += 1
self.maze_list.append(row_list)
columns_in_maze = len(row_list)
# Sets num of rows and columns as class attributes
self.rows_in_maze = rows_in_maze
self.columns_in_maze = columns_in_maze
# Sets axis
self.x_translate = -columns_in_maze / 2
self.y_translate = rows_in_maze / 2
# Creates turtle and window object
self.t = turtle.Turtle(shape="turtle")
self.w = turtle.Screen()
# Sets coordinates for bottom left and top right corners respectively
self.w.set_world_coordinates(
-(columns_in_maze - 1) / 2 - 0.5,
-(rows_in_maze - 1) / 2 - 0.5,
(columns_in_maze - 1) / 2 + 0.5,
(rows_in_maze - 1) / 2 + 0.5,
)
def __getitem__(self, idx):
""" Operator overloading """
return self.maze_list[idx]
def draw_centered_box(self, x, y, color):
"""" Draws box """
self.t.up()
self.t.goto(x - 0.5, y - 0.5)
self.t.color(color)
self.t.fillcolor(color)
self.t.setheading(90)
self.t.down()
self.t.begin_fill()
for i in range(4):
self.t.forward(1)
self.t.right(90)
self.t.end_fill()
def draw_maze(self):
""" Displays the representation of the Maze """
self.t.speed(10)
self.w.tracer(0)
for y in range(self.rows_in_maze):
for x in range(self.columns_in_maze):
if self.maze_list[y][x] == OBSTACLE:
self.draw_centered_box(
x + self.x_translate, -y + self.y_translate, "red"
)
self.t.color("black")
self.t.fillcolor("blue")
self.w.update()
self.w.tracer(1)
def move_turtle(self, x, y):
""" Move turtle in the box """
self.t.up()
self.t.setheading(self.t.towards(x + self.x_translate, -y + self.y_translate))
self.t.goto(x + self.x_translate, -y + self.y_translate)
def drop_bread_crumb(self, color):
""" Drop a bread crumb """
self.t.dot(10, color)
def update_position(self, row, col, val=None):
""" Update position of the turtle """
if val:
self.maze_list[row][col] = val
self.move_turtle(col, row)
if val == PART_OF_PATH:
color = "green"
elif val == OBSTACLE:
color = "red"
elif val == TRIED:
color = "black"
elif val == DEAD_END:
color = "yellow"
else:
color = None
if color:
self.drop_bread_crumb(color)
def is_exit(self, row, col):
""" Check if turtle exits """
return (
row == 0
or row == self.rows_in_maze - 1
or col == 0
or col == self.columns_in_maze - 1
)
def search_from(maze, start_row, start_column):
""" Search turtle position """
maze.update_position(start_row, start_column)
# Try each of four directions from this point until we find a way out
# Base cases
# 1. If ran into obstacle, return False
if maze[start_row][start_column] == OBSTACLE:
return False
# 2. If found a square that has already been explored, return False
if (
maze[start_row][start_column] == TRIED
or maze[start_row][start_column] == DEAD_END
):
return False
# 3. If found an outside edge not occupied by an obstacle
if maze.is_exit(start_row, start_column):
maze.update_position(start_row, start_column, PART_OF_PATH)
return True
#
maze.update_position(start_row, start_column, TRIED)
# Otherwise, use logic OR to test other directions
found = (
search_from(maze, start_row - 1, start_column)
or search_from(maze, start_row + 1, start_column)
or search_from(maze, start_row, start_column - 1)
or search_from(maze, start_row, start_column + 1)
)
if found:
maze.update_position(start_row, start_column, PART_OF_PATH)
else:
maze.update_position(start_row, start_column, DEAD_END)
return found
def main():
base_dir = os.path.dirname(os.path.abspath(__file__))
maze_file = os.path.join(base_dir, "maze_test.txt")
myMaze = Maze(maze_file)
myMaze.draw_maze()
myMaze.update_position(myMaze.start_row, myMaze.start_col)
search_from(myMaze, myMaze.start_row, myMaze.start_col)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
f3ac2e67d1f36ed5a9f2c18e6e7da8a7bc004fb6 | 1f1da172cbe7d9a973c2b668340c48fb0fce3aae | /src/ReverseComplement.py | a51d3cc91a3a4118a2f7e25878fdbf9f5026c3ce | []
| no_license | erb13020/coursera-bioinformatics-specialization | 8353b21fc48bbc50a1ef7e66c21e3aa6e5179ab3 | 7a940cebe2cd7b8c69fca8e85dfec28e3a630482 | refs/heads/master | 2022-12-13T10:01:06.206965 | 2020-09-13T19:25:02 | 2020-09-13T19:25:02 | 274,309,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | def reverseComplement(genome):
'''
Solve the Reverse Complement problem
Input: A DNA string Pattern
Output: Patternrc , the reverse complement of Pattern
'''
genome = genome[::-1] # reverse text
genome = genome.upper() # ensure text is uppercase
nucleotide = { # dictionary for finding complements for nucleotides
'A': 'T',
'T': 'A',
'G': 'C',
'C': 'G'
}
complement = [] #variable to hold result
for n in genome:
complement.append(nucleotide[n])
return ''.join(complement)
| [
"[email protected]"
]
| |
db3e2088bfc29185f472eb8137fe2aecc73ab962 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /Gauss_v45r8/Gen/DecFiles/options/43900069.py | 99e22a4ef36194235a12c7cff18970ba37ba0ea4 | []
| no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,921 | py | # file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/43900069.py generated: Fri, 27 Mar 2015 15:48:12
#
# Event Type: 43900069
#
# ASCII decay Descriptor: pp->( H_20 -> ( H_30 -> b anti-b ) ( H_30 -> b anti-b) )
#
from Configurables import Generation
Generation().EventType = 43900069
Generation().SampleGenerationTool = "Special"
from Configurables import Special
Generation().addTool( Special )
Generation().Special.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Higgs_AA,bbbb=mH125GeV,mA43GeV,tA10ps,HidValley.dec"
Generation().Special.CutTool = ""
Generation().FullGenEventCutTool = "LoKi::FullGenEventCut/FourTracksFromHVPionInAcceptance"
from Configurables import LHCb__ParticlePropertySvc
LHCb__ParticlePropertySvc().Particles = [ "H_20 88 35 0.0 125.0 1.65e-22 Higgs'0 35 4.0e-03" , "H_30 89 36 0.0 43.0 1.0e-11 A0 36 0.0e+00" ]
from Gaudi.Configuration import *
importOptions( "$DECFILESROOT/options/HidValleyH.py" )
Generation().Special.PythiaProduction.Commands[:0] = [
"pyinit pdtinput $DECFILESROOT/ppfiles/HiddenValleyHiggses_bbbar.pdt"
]
Generation().Special.Pythia8Production.Commands += [
"36:onMode = off"
, "36:onIfMatch = 5 -5"
]
from Configurables import LoKi__FullGenEventCut
Generation().addTool( LoKi__FullGenEventCut, "FourTracksFromHVPionInAcceptance" )
tracksInAcc = Generation().FourTracksFromHVPionInAcceptance
tracksInAcc.Code = " count ( isGoodDVfromHVPion ) > 0 "
tracksInAcc.Preambulo += [
"from GaudiKernel.SystemOfUnits import ns, GeV, mrad"
, "isHVPion = ( 'H_30' == GID )"
, "isGoodDVDaughter = ( (~GVEV) & GCHARGED & ( GP > 2.0*GeV ) & ( GTHETA < 400.0*mrad ) )"
, "isGoodDVfromHVPion = ( isHVPion & ( GNINTREE( isGoodDVDaughter, HepMC.descendants ) > 3 ) )"
]
| [
"[email protected]"
]
| |
eaa0bc98d033d52b4c153cbc5339e5633d9227a1 | 82e9579ebe6fc76f49cb9d33351383eac6790a19 | /exs/search/index.py | 1e7da5b3e99124c7d6a08c90dcddc059b19a919b | []
| no_license | andtsa/experiments.py | f62be2b1712a756b57c5345b64b41c0c93778503 | 182de0c3b959a0a460bf6ba26e6937157e995610 | refs/heads/master | 2022-04-09T04:30:39.302430 | 2020-03-03T16:40:59 | 2020-03-03T16:40:59 | 109,993,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | ########## Search ##########
def search(query, field):
place = 'Null'
for i in range(len(field)):
if field[i] == query:
place = i
break
return place
# print( search(10, [1,2,34,10,5,67,8,9,10,24,67,3,3]) )
| [
"[email protected]"
]
| |
920c5762a8970d91b790a58679726585c544c9fc | c9d839d04befa09ef8dffd249e0691910874c42b | /exercises/chapter-10/10.2.py | 29bd51ef99b97f9a76535c5a9d55d2c8e7fa8b19 | []
| no_license | imsamuel/pcc | d514278147257c8f63e0dc2aa455996ddaea8ea2 | 962dfc55ee72fc853d5a65ca485d7c364354c9f4 | refs/heads/master | 2022-11-24T17:04:41.184796 | 2020-07-26T07:14:13 | 2020-07-26T07:14:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | """
10-2. Learning C:
You can use the replace() method to replace any word in a string with a
different word. Here’s a quick example showing how to replace 'dog' with 'cat'
in a sentence:
>>> message = "I really like dogs."
>>> message.replace('dog', 'cat')
'I really like cats.'
Read in each line from the file you just created, learning_python.txt, and
replace the word Python with the name of another language, such as C. Print
each modified line to the screen.
"""
with open("learning_python.txt") as file:
lines = file.readlines()
for line in lines:
print(line.replace("Python", "C").rstrip())
| [
"[email protected]"
]
| |
3fc3ea410505a3bbf1773b01dadc2d31cc7d4e6b | 26957ac501c8ec4d576009f40d2a8e6b561fa019 | /bandapp/migrations/0018_auto_20200516_1427.py | 187d21ed0c510f6f8e9bcc69dff97f68255de9f9 | []
| no_license | Bimsickle/HIT237_Django_Punk | b29d25f2b3e305845682790cff5feb77dba8366a | a72a5e761e3d8b3353d6a14a7e2a818a899a7181 | refs/heads/master | 2022-11-23T23:09:43.121789 | 2020-08-03T09:31:01 | 2020-08-03T09:31:01 | 284,658,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | # Generated by Django 3.0.3 on 2020-05-16 04:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bandapp', '0017_band_bio'),
]
operations = [
migrations.AlterModelOptions(
name='band',
options={'ordering': ['band_name']},
),
migrations.AlterField(
model_name='band',
name='bio',
field=models.TextField(blank=True, max_length=1000),
),
]
| [
"[email protected]"
]
| |
5855cf5c1bc23ba8483f55604d4385cc797f272d | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2180/60696/250099.py | ba5d6d479f9f6bc64cfa12e11cb5e09cf0a05adc | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | def tell_sub_strings(s):
sub_strings = []
n = len(s)
for i in range(n):
for j in range(i+1, n+1):
sub_strings.append(s[i:j])
return sub_strings
if __name__ == '__main__':
s1 = input()
s2 = input()
count = 0
if len(s1)<len(s2):
sub_strings = tell_sub_strings(s1)
for sub_string in sub_strings:
count += s2.count(sub_string)
else:
sub_strings = tell_sub_strings(s2)
for sub_string in sub_strings:
count += s1.count(sub_string)
print(count,end='') | [
"[email protected]"
]
| |
af2a2cc65f022849e70a268c54ab6efbadc98166 | 0aeefc4dee24d50b71b40f273dc59526a2e25bc2 | /trade.py | b85431546da798f776c8ea31364443db46e4a679 | [
"MIT"
]
| permissive | srirag-vuppala/Trader-bot | 36f0b2e1030bf1c0abdf847e20a5da60639ce2f8 | 28659420a9bc32be79db77dfbdfed39447c53a32 | refs/heads/master | 2022-11-23T15:03:47.480936 | 2020-07-30T22:17:20 | 2020-07-30T22:17:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,369 | py | import requests
import json
import os
import alpaca_trade_api as tradeapi
from config import *
# Setting up environmental variables
os.environ['APCA_API_KEY_ID'] = '[use your own key id here]'
os.environ['APCA_API_SECRET_KEY'] = '[use your own key here]'
os.environ['APCA_API_BASE_URL'] = 'https://paper-api.alpaca.markets'
BASE_URL = "https://paper-api.alpaca.markets"
ACCOUNT_URL = "{}/v2/account".format(BASE_URL)
ORDERS_URL = "{}/v2/orders".format(BASE_URL)
HEADERS = {'APCA-API-KEY-ID': API_KEY, 'APCA-API-SECRET-KEY': SECRET_KEY}
api = tradeapi.REST(api_version='v2')
def main():
response = create_order("VIX", 3, "buy", "limit", "gtc", 36.00)
print(response)
# orders = get_orders()
# print(orders)
account = api.get_account()
aapl = api.alpha_vantage.historic_quotes('AAPL', adjusted=True, output_format='csv', cadence='weekly')
# print(aapl)
def create_order(symbol, qty, side, type, time_in_force, limit_price=None, stop_price=None, client_order_id=None,
order_class=None, take_profit=None, stop_loss=None):
# data = {
# "symbol": symbol,
# "qty": qty,
# "side": side,
# "type": type,
# "time_in_force": time_in_force
# }
# api.submit_order(json=data)
return api.submit_order(symbol=symbol,
qty=qty,
side=side,
time_in_force=time_in_force,
type=type,
client_order_id=client_order_id,
stop_price=stop_price,
order_class=order_class,
limit_price=limit_price,
take_profit=dict(
limit_price=limit_price,
),
stop_loss=dict(
stop_price=stop_price,
limit_price=limit_price,
)
)
# r = requests.post(ORDERS_URL, json=data, headers=HEADERS)
#
# return json.loads(r.content)
def get_orders():
r = requests.get(ORDERS_URL, headers=HEADERS)
return json.loads(r.content)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
39ecdc56ce356a15ab5aa007c86b1e29b1e6a38f | e223b7652a22a79e812d4909ea25cd7689383f9b | /tooner/__init__.py | 056d14a6b2d64e44dafd1b5ed7993c7c25e20e37 | [
"MIT"
]
| permissive | jakebrehm/tooner | 89974d79d2e76e2069a1e1ef3b6db877489fc28a | 046034eeee1c5e33b713d8a4369c4d5d43a13aab | refs/heads/master | 2023-07-17T02:27:40.259431 | 2021-09-11T01:12:32 | 2021-09-11T01:12:32 | 254,235,709 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | from .invasions import *
from .launcher import *
| [
"[email protected]"
]
| |
183322315b0144ca5291afec80efa5b97449663a | 93f98dde611c138055629ae3b3b2cb1e301adc49 | /DS/settings.py | 2b725974d99e96e94544ee4b139fca8d8c4ebdf2 | [
"BSD-3-Clause"
]
| permissive | gitter-badger/djangochannel | ebd9a69bef62376d9877a5aece3f54b211880736 | f9e33254739457c461e84b66879172007512f9b0 | refs/heads/master | 2020-08-06T01:43:26.679657 | 2019-10-04T10:16:32 | 2019-10-04T10:16:32 | 212,788,313 | 0 | 0 | BSD-3-Clause | 2019-10-04T10:18:48 | 2019-10-04T10:18:47 | null | UTF-8 | Python | false | false | 5,094 | py | import os
import datetime
from .ckeditor import *
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: don't run with debug turned on in production!
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', 'djangochannel.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.staticfiles',
'django.contrib.sitemaps',
'debug_toolbar',
'bootstrap4',
'mptt',
'drf_yasg',
# Editor
'ckeditor',
'ckeditor_uploader',
'rest_framework',
# 'rest_framework.authtoken',
# Auth
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.vk',
# my apps
'backend.pages',
'backend.blog',
'backend.courses',
'backend.forum',
'backend.profile',
'backend.dc_tests',
'backend.reviews',
'backend.moderation',
'backend.pay',
'backend.contact',
'backend.message',
'backend.dc_task',
'backend.utils',
'backend.followers',
'backend.community',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DS.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'DS.wsgi.application'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
# 'rest_framework.permissions.IsAdminUser',
'rest_framework.permissions.AllowAny',
),
'PAGE_SIZE': 10,
'DEFAULT_AUTHENTICATION_CLASSES': (
# 'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
'DEFAULT_PAGINATION_CLASS':
'rest_framework_json_api.pagination.PageNumberPagination',
}
# JWT_AUTH = {
# 'JWT_EXPIRATION_DELTA': datetime.timedelta(days=2),
# 'JWT_ALLOW_REFRESH': True,
# 'JWT_REFRESH_EXPIRATION_DELTA': datetime.timedelta(days=7) # default
# }
#
# DJOSER = {
# 'SEND_ACTIVATION_EMAIL': True,
# # 'SEND_CONFIRMATION_EMAIL': True,
# 'ACTIVATION_URL': 'auth/activate/{uid}/{token}/',
# 'PASSWORD_RESET_SHOW_EMAIL_NOT_FOUND': True,
# 'PASSWORD_RESET_CONFIRM_URL': 'auth/reset/confirm/{uid}/{token}/',
# 'TOKEN_MODEL': None
# }
# Allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_UNIQUE = True
ACCOUNT_EMAIL_CONFIRMATION_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "username"
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 1
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_USERNAME_BLACKLIST = ["admin", "administrator", "moderator", "DjangoSchool"]
ACCOUNT_USERNAME_MIN_LENGTH = 4
LOGIN_REDIRECT_URL = "/"
ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = '/'
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
LANGUAGE_CODE = 'ru-ru'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SITE_ID = 1
INTERNAL_IPS = '127.0.0.1'
# CORS_ORIGIN_ALLOW_ALL = True
MY_INFO = 80
MESSAGE_LEVEL = MY_INFO
MY_PRIVAT_MESS = 81
MESSAGE_LEVEL = MY_PRIVAT_MESS
TASK_MESS = 82
MESSAGE_LEVEL = TASK_MESS
try:
from .local_settings import *
except ImportError:
from .prod_settings import *
| [
"[email protected]"
]
| |
65d7a977a623f1cd517b77b86c50f00b23882fe2 | a735ff9df57fa54eb136b0ae1d4f37cfb79a22cd | /main_app/cnn.py | a99f7e88aae1a97d296623a9ffcb08ef08f1c8c9 | []
| no_license | arascry/pollock_ai | d95e7b4209f575f413a5a9064dbcd57238f1fb40 | 57372bee39dce71e6572ea5937998d422b99a4be | refs/heads/master | 2023-07-27T03:00:44.957258 | 2020-09-22T16:13:52 | 2020-09-22T16:13:52 | 294,828,393 | 0 | 0 | null | 2020-09-11T23:01:10 | 2020-09-11T23:01:09 | null | UTF-8 | Python | false | false | 2,696 | py | import tensorflow as tf
import os
import matplotlib.pyplot as plt
from tensorflow import keras
from tensorflow.keras import layers
image_size = (500, 500)
batch_size = 2
path = ''
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
path,
validation_split=0.33,
subset='training',
seed=2000,
image_size=image_size,
batch_size=batch_size
)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
path,
validation_split=0.33,
subset='validation',
seed=2000,
image_size=image_size,
batch_size=batch_size
)
data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical"),
layers.experimental.preprocessing.RandomRotation(0.1),
]
)
def make_model(input_shape, num_classes):
inputs = keras.Input(shape=input_shape)
x = data_augmentation(inputs)
x = layers.experimental.preprocessing.Rescaling(1.0 / 255)(x)
x = layers.Conv2D(64, 3, strides=2, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.Conv2D(1024, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
previous_block_activation = x
for size in [128, 256, 512, 728]:
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.SeparableConv2D(size, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D(3, strides=2, padding="same")(x)
residual = layers.Conv2D(size, 1, strides=2, padding="same")(
previous_block_activation
)
x = layers.add([x, residual])
previous_block_activation = x
x = layers.SeparableConv2D(2048, 3, padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.GlobalAveragePooling2D()(x)
if num_classes == 2:
activation = "sigmoid"
units = 1
else:
activation = "softmax"
units = num_classes
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(units, activation=activation)(x)
return keras.Model(inputs, outputs)
model = make_model(input_shape=image_size + (3,), num_classes=2)
epochs = 75
callbacks = [
keras.callbacks.ModelCheckpoint("save_at_{epoch}.h5"),
]
model.compile(
optimizer=keras.optimizers.SGD(momentum=0.5),
loss="binary_crossentropy",
metrics=["accuracy"],
)
model.fit(
train_ds, epochs=epochs, callbacks=callbacks, validation_data=val_ds,
)
model.save(path)
| [
"[email protected]"
]
| |
eb86d2cf93f9a3d301e370ba3dc0c468bf5628b6 | 0839ca3b2d745dab0b23d2b9ccae9bd5e99f1883 | /models/purchase.py | 3f661932338674872dbda811d69fe716d0872527 | []
| no_license | quanbuinovobi/purchase_order_enhancement | f0bc267956df3a90ea5d9e8bb08e3a25c283b7b3 | 25aee5edfd4b182c75213fbeb456d855206c4fd8 | refs/heads/master | 2020-05-18T22:32:07.039674 | 2019-05-16T04:18:30 | 2019-05-16T04:18:30 | 184,693,271 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,579 | py | import datetime
from odoo import api, fields, models
from odoo.exceptions import UserError, AccessError
from logging import warning
# logging.warning(vals) => console.log
class PurchaseOrder(models.Model):
# Inherit model
_inherit = 'purchase.order'
# Define fields (variables)
active = fields.Boolean(
'Active', default=True,
help="If unchecked, it will allow you to hide the purchase order without removing it.")
# @api.multi
# def action_toggle_active(self):
# for order in self:
# logging.warning(order.state)
# if order.state not in ['cancel', 'done']:
# raise UserError("Only 'Cancel' or 'Lock' Purchase Order is allowed ")
# else:
# order.active = not order.active
@api.multi
def write(self, values):
# Check archived purchase order status
self._check_archived_state(values)
# Check user group
self._check_archive_user_role(values)
return super().write(values)
@api.model
def archive_purchase_order(self):
warning("THIS IS MY CRON")
self._check_old_purchase_order()
# Check archived purchase order status function
def _check_archived_state(self, values):
if 'active' in values:
for order in self:
warning(order.state)
if order.state not in ['cancel', 'done']:
raise UserError("Only 'Cancel' or 'Lock' Purchase Order is allowed ")
def _check_archive_user_role(self, values):
if 'active' in values :
if not self.env.user.has_group('purchase.group_purchase_manager') :
raise UserError("Only 'Manager' can archive Purchase Order ")
def _check_old_purchase_order(self):
current_date = fields.datetime.now()
for order in self.search([]):
write_date = order.write_date
# Get life span from global data
lifespan = int(self.env['ir.config_parameter'].sudo().get_param('purchase.order.lifespan'))
lifespan_unit = self.env['ir.config_parameter'].sudo().get_param('purchase.order.lifespan_unit')
# Convert lifespan_unit from string to variable
timestamp = str(lifespan_unit) + "=" + str(lifespan)
# If current date > write date + lifespan
if current_date > write_date + eval("datetime.timedelta(" + str(timestamp) + ")") :
if order.state in ['cancel', 'done'] and order.active == True:
order.active = False
| [
"[email protected]"
]
| |
9b214c98e4b30a70194d4fab96e9827e9da20c9c | 0bf34f2fef64eb6d2c8cf7185a6a1d897b7a3ff4 | /covidprojectss.py | 96e712511c9ef4702d2b65aa3e1ae1abf2b0cf0f | []
| no_license | harshitaprabhu99/Covid19-Voice-Assistant-and-Visualiser | c17af044d5a414ab5e668b4cecef83546cdfc0c5 | 2679ec53bc12a2be8c49727dcdaa9bd873ba7f21 | refs/heads/main | 2023-05-22T10:51:04.251510 | 2021-06-15T14:11:03 | 2021-06-15T14:11:03 | 377,184,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,504 | py | import streamlit as st
import pandas as pd
import numpy as np
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import matplotlib.pyplot as plt
from PIL import Image
def app():
st.title("COVID-19 DASHBOARD")
st.markdown('The dashboard will help you visualize the Covid-19 Situation across the globe')
st.sidebar.title("Visualization Selector")
st.sidebar.markdown("Select the Charts/Plots accordingly:")
df = pd.read_csv("C:/Users/Harshita/AppData/Local/Programs/Python/Python39/multi-page-app/apps/covid dataset.csv")
select = st.sidebar.selectbox('Visualization type', ['Bar plot'], key='1')
if not st.sidebar.checkbox("Hide", True, key='1'):
if select=='Bar plot':
st.title("Country wise Total Cases and Total Deaths")
fig = go.Figure(data=[
go.Bar(name='Deaths', x=df['country_name'][:10], y=df['country_total_deaths'][:10]),
go.Bar(name='Total', x=df['country_name'][:10], y=df['country_total_cases'][:10]),
go.Bar(name='Recovered', x=df['country_name'][:10], y=df['country_total_recovered'][:10])])
st.plotly_chart(fig)
df2 = pd.read_csv("C:/Users/Harshita/AppData/Local/Programs/Python/Python39/multi-page-app/apps/covid dataset.csv")
select1 = st.sidebar.selectbox('Select', ['Death', 'total','Recovered'], key='3')
if not st.sidebar.checkbox("Hide", True, key='3'):
if select1 == 'Death':
fig = px.line(df2, x="country_name", y="country_total_deaths")
st.plotly_chart(fig)
elif select1 == 'total':
fig = px.line(df2, x="country_name", y="country_total_cases")
st.plotly_chart(fig)
elif select1=='Recovered':
fig=px.line(df2,x="country_name",y="country_total_recovered")
st.plotly_chart(fig)
df = pd.read_csv("C:/Users/Harshita/AppData/Local/Programs/Python/Python39/multi-page-app/apps/state_level_latest.csv")
select = st.sidebar.selectbox('Visualization type', ['Bar plot'], key='4')
if not st.sidebar.checkbox("Hide", True, key='5'):
if select=='Bar plot':
st.title("State wise Confirmed cases, Recovered cases and Death cases")
fig = go.Figure(data=[
go.Bar(name='Confirmed', x=df['State'][:10], y=df['Confirmed'][:10]),
go.Bar(name='Recovered', x=df['State'][:10], y=df['Recovered'][:10]),
go.Bar(name='Deaths', x=df['State'][:10], y=df['Deaths'][:10])])
st.plotly_chart(fig)
df2 = pd.read_csv("C:/Users/Harshita/AppData/Local/Programs/Python/Python39/multi-page-app/apps/state_level_latest.csv")
select1 = st.sidebar.selectbox('Select', ['Confirmed', 'Recovered','Deaths'], key='6')
if not st.sidebar.checkbox("Hide", True, key='7'):
if select1 == 'Confirmed':
fig = px.line(df2, x="State", y="Confirmed")
st.plotly_chart(fig)
elif select1 == 'Recovered':
fig = px.line(df2, x="State", y="Recovered")
st.plotly_chart(fig)
elif select1=='Deaths':
fig=px.line(df2,x="State",y="Deaths")
st.plotly_chart(fig)
df2 = pd.read_csv("C:/Users/Harshita/AppData/Local/Programs/Python/Python39/multi-page-app/apps/State wise ratio.csv")
select = st.sidebar.selectbox('Visualization type', ['state ratios'], key='8')
if not st.sidebar.checkbox("Hide", True, key='9'):
if select=='state ratios':
st.title("State wise cases in ratio")
fig = go.Figure(data=[
go.Bar(name='active', x=df2['states_name'][:10], y=df2['states_active_ratio'][:10]),
go.Bar(name='discharged', x=df2['states_name'][:10], y=df2['states_discharged_ratio'][:10]),
go.Bar(name='death', x=df2['states_name'][:10], y=df2['states_death_ratio'][:10])])
st.plotly_chart(fig)
df2 = pd.read_csv("C:/Users/Harshita/AppData/Local/Programs/Python/Python39/multi-page-app/apps/State wise ratio.csv")
select1 = st.sidebar.selectbox('Select', ['active', 'discharged','Deaths'], key='10')
if not st.sidebar.checkbox("Hide", True, key='11'):
if select1 == 'active':
fig = px.line(df2, x="states_name", y="states_active_ratio")
st.plotly_chart(fig)
elif select1 == 'discharged':
fig = px.line(df2, x="states_name", y="states_discharged_ratio")
st.plotly_chart(fig)
elif select1=='Deaths':
fig=px.line(df2,x="states_name",y="states_death_ratio")
st.plotly_chart(fig)
| [
"[email protected]"
]
| |
f708463314792a83ec8427cd3f2ab0733c841f16 | c90a08909c14f69b092731b6c122f0a555029947 | /day5/part2/main.py | 0988106a157bf37f7dc7c704b509dba27647b369 | []
| no_license | tylerXMD/advent_of_code_2018 | cb97302db355ef64b40604c9580d1df10565166d | 20cc919d231f1129b346a0fb1fe2b28db450f538 | refs/heads/master | 2020-04-09T16:42:24.199678 | 2019-05-11T03:03:16 | 2019-05-11T03:03:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | #!/usr/bin/env python3
#only 1 line
line = None
with open("../input.txt", "r") as f:
for thing in f:
line = thing.strip()
reaction_diff = abs(ord('A') - ord('a'))
already_removed = []
#from part1
def compact_polymer(s):
reacted = True
while reacted:
reacted = False
i = 0
while i < len(s)-1:
if abs(ord(s[i]) - ord(s[i+1])) == reaction_diff:
s = s[:i] + s[i+2:]
reacted = True
else:
i += 1
return len(s)
min_size = len(line) #init to length of whole line, no reacting
for char in line:
if char not in already_removed: #don't try same char twice
if ord(char) >= ord('a') and ord(char) <= ord('z'):
#char is lowercase
char_complement = chr(ord(char)-ord('a')+ord('A'))
else:
#char is uppercase
char_complement = chr(ord(char)-ord('A')+ord('a'))
already_removed.append(char)
already_removed.append(char_complement)
min_size = min(min_size, compact_polymer(line.replace(char,"").replace(char_complement,"")))
print(min_size)
| [
"[email protected]"
]
| |
d1f76a5e460088e85766d92f36f6cece6cfc9b93 | 7e953f5361aae55a16b0361d44bf72ce025450aa | /Core/FFT/test_bx200_nand_rules_mixin.py | f8c40933d3e0e879a56c34164bc21bfa5d2ff3c4 | []
| no_license | MicronShanghaiDE/SEV | d2d5048212f60ae6b4b9f02376b5799d4d0fd92d | 31cd12b366b0ed8d8a6a8bb800485f68443e8ecc | refs/heads/master | 2021-01-21T13:14:23.403348 | 2015-08-11T03:02:03 | 2015-08-11T03:02:03 | 40,517,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,854 | py | import dm3
import sys
import time
from Util.decorator import method_tracer
class TestBX200NandRules:
def set_up(self):
self.unlock()
self.ThrowException = True
self.InterfaceMode = -1 # -1 indicates an unknown interface
def tear_down(self):
pass
def genericNandCommand(self, nand_cmd, mode = 0, byte_size = 0, buf = None):
ssd = self.ssd
# @todo Need to figure out why a delay is needed to make this work.
time.sleep(0.001)
# Phase 1 command
tfrBuf = dm3.Buffer(1)
tfrBuf.FillZeros()
buf_offset = 0
self.assertLess(len(nand_cmd), 32, "Only 32 commands can fit into a single sector")
# Initialize the transfer buffer with the command data.
for tfr_entry in nand_cmd:
self.assertLess(len(tfr_entry), 16, "Each command is padded to 16 bytes")
tfrBuf.SetBytes(buf_offset, tfr_entry)
buf_offset += 0x10
# Send Phase 1 command to the drive
cmd = dm3.sata.AtaCommand28BitWritePio(tfrBuf, 512, 0x10, 0x01, 0x00, 0x00, 0x00, 0xA0, 0xFA)
# Adjust the context so that no exception is generated on call
with dm3.DeviceContext(ssd.device, throwsExceptions = self.ThrowException):
rsp = ssd.AtaCall(cmd)
if not rsp.Success:
return rsp
# Send Phase 2 command to the drive
if (mode == 1):
# Read command
self.assertGreater(byte_size, 0)
sector_count = (int(byte_size) + 511) / 512 # Roundup sector count based on requested bytes
if buf is None:
buf = dm3.Buffer(sector_count)
buf.FillZeros()
cmd = dm3.sata.AtaCommand28BitReadPio(buf, sector_count * 512, 0x10, sector_count, byte_size & 0xFF, (byte_size >> 8) & 0xFF, (byte_size >> 16) & 0xFF, 0xA1, 0xFA)
with dm3.DeviceContext(ssd.device, throwsExceptions = self.ThrowException):
rsp = ssd.AtaCall(cmd)
return buf
elif (mode == 2):
# Write command
self.assertGreater(byte_size, 0)
sector_count = (int(byte_size) + 511) / 512 # Roundup sector count based on requested bytes
cmd = dm3.sata.AtaCommand28BitWritePio(buf, sector_count * 512, 0x10, sector_count, byte_size & 0xFF, (byte_size >> 8) & 0xFF, (byte_size >> 16) & 0xFF, 0xA2, 0xFA)
# Adjust the context so that no exception is generated on call
with dm3.DeviceContext(ssd.device, throwsExceptions = self.ThrowException):
rsp = ssd.AtaCall(cmd)
return rsp
else:
# None data command
return rsp # No need to send phase 2 command for no-data command
@method_tracer()
def nandResetNandInterface(self):
'''
Issues a RESET(0xFF) command to all NAND devices and sets the interface to default state (SDR)
'''
ssd = self.ssd
# When enumerating through NAND, disable exceptions in case unpopulated channel touched
exc_setting = self.ThrowException
self.ThrowException = False
# Go through all the NAND issue a RESET that will put the NAND into SDR mode
for ch in xrange(0, ssd['max_ch']):
for ce in xrange(0, ssd['max_ce']):
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
cmd += ssd.nand.genericNandCmdDirectCmd(0xFF) # Issue the Reset Command
cmd += ssd.nand.genericNandCmdModeEnd(True)
self.genericNandCommand(cmd)
self.InterfaceMode = 0 # After reset, interface is in SDR(0) mode
# Send a command to set the controller interface into SDR mode and poll for reset completion
cmd = ssd.nand.genericNandCmdModeSetup(0, 0) # Ch0/CE0 should exist on all drives
cmd += ssd.nand.genericNandCmdInterfaceMode(self.InterfaceMode) # Select the reset interface mode
cmd += ssd.nand.genericNandCmdClkRate(35) # Select the default timing mode
cmd += ssd.nand.genericPollDirectStatus() # Wait for reset to complete
cmd += ssd.nand.genericNandCmdModeEnd(True)
self.genericNandCommand(cmd)
# Resetore the exception flag
self.ThrowException = exc_setting
return
@method_tracer()
def nandSetInterfaceMode(self, mode, freq = None):
'''
Sets the NAND interface's mode (sdr/ddr/ddr2) and adjusts the interface frequency
If the current interface is not known, a reset is issued to return the NAND to a known state
Note: this changes the state across all NAND devices.
'''
ssd = self.ssd
modes = {'sdr' : 0, 'ddr' : 1, 'ddr2' : 2, 'ddr3' : 3}
self.assertTrue(modes[mode] == modes['sdr'] or modes[mode] == modes['ddr']) # Only supporting these mode in code
# Determine the default frequency and the correpsonding timing mode
if modes[mode] == modes['sdr']:
f = 35 if freq == None else freq
op_mode = ssd.nand.nandGetSDRTimingMode(f)
else:
f = 200 if freq == None else freq
op_mode = ssd.nand.nandGetDDRTimingMode(f)
# Formats op mode and target mode into the timing mode registers described in ONFI spec.
timing_mode = (modes[mode] << 4) | op_mode
# Only switch mode if not already in the selected mode
if modes[mode] != self.InterfaceMode:
# When going from nvddr back to sdr, use reset according to onfi spec
if modes[mode] == modes['sdr'] or self.InterfaceMode == -1:
self.nandResetNandInterface()
# When enumerating through NAND, disable exceptions in case unpopulated channel touched
exc_setting = self.ThrowException
self.ThrowException = False
# Go through all the NAND and update the timing mode register to reflect the target mode
for ch in xrange(0, ssd['max_ch']):
for ce in xrange(0, ssd['max_ce']):
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
# Write the Timing Mode register
cmd += ssd.nand.genericNandCmdSetFeature(0x01, timing_mode)
cmd += ssd.nand.genericNandCmdModeEnd(True)
self.genericNandCommand(cmd)
# Restore the throw excepting setting
self.ThrowException = exc_setting
# Last operation is to switch controller to new operating mode
cmd = ssd.nand.genericNandCmdModeSetup(0, 0) # Arbitrarly chose ch/ce0 which should always exist
cmd += ssd.nand.genericNandCmdInterfaceMode(mode) # Switch controller Mode to target mode
cmd += ssd.nand.genericNandCmdClkRate(f) # Switch the latching frequency
cmd += ssd.nand.genericPollDirectStatus() # Poll status for device to become ready
cmd += ssd.nand.genericNandCmdModeEnd(True)
cmd += ssd.nand.genericNandTestModeRelease()
self.genericNandCommand(cmd)
# Update the interface mode to now reflect the new state
self.InterfaceMode = modes[mode]
return
def disable_all_ch_ce(self):
ssd = self.ssd
# Disable exceptions when enumerating (expect to get errors)
exc_setting = self.ThrowException
self.ThrowException = False
# Loop through all channels
for ch in xrange(ssd['max_ch']):
cmd = ssd.nand.genericNandCmdDisableOneCh(ch)
self.genericNandCommand(cmd)
# Restore exceptions after getting through the NAND
self.ThrowException = exc_setting
return
@method_tracer()
def hmlcx7_histogram_read(self, ch, ce, lun = 0, blk = 0, page = 0, length = 74368, offset = None):
ssd = self.ssd
self.assertTrue((ch != 255) or (ce != 255) or (lun != 255) or (blk != 65535) or (page != 65535))
self.disable_all_ch_ce() # JIRA: BX200-156
tlc_start_blk = self.get_tlc_start_block()
if blk < tlc_start_blk:
print "Current block is SLC block"
else:
print "Current block is TLC block"
mode = 0x47 # ONFi 0x47 HMLCx7 Histogram Read Command
# Convert the address parameters into the corresponding row address
row_addr = (lun << ssd['LunStartBit']) | (blk << ssd['BlockStartBit']) | (page << ssd['PageStartBit'])
if offset == None:
offset = 0
req_tfr = length # Keep Track of original read request length
if length > (64 * 1024):
# Windows USB driver can only transfer 64KB,
length = 64 * 1024
# Issue the direct UID Command
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
cmd += ssd.nand.genericNandCmdDirectCmd(mode) # Issue the cmd to set the read mode
# The tMODE time duration is ~25ns.
cmd += ssd.nand.genericNandCmdDirectCmd(0x00) # Issue the read page cmd
cmd += ssd.nand.genericNandCmdDirectAddr(offset, 2) # Issue col addr
cmd += ssd.nand.genericNandCmdDirectAddr(row_addr, 3) # Issue row addr
cmd += ssd.nand.genericNandCmdDirectCmd(0x30) # Complete the read page cmd
cmd += ssd.nand.genericPollDirectStatus() # Wait for read to complete
cmd += ssd.nand.genericNandCmdDirectCmd(0x00) # Put NAND back in read mode
cmd += ssd.nand.genericNandCmdModeEnd()
# Send command and read the data from the buffer
buf = self.genericNandCommand(cmd, 1, length)
# If more data in page, then issue a col change and read data
if length < req_tfr:
# Low level API has a limit of 64KB transfers, so read partial page data
# There are only 9296 column addresses on the B95A, no matter what mode you're in.
# In HMLCx7 mode, eight bytes are output from every column address (7 for data, one dummy 0xff).
col_addr = length / 8
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
cmd += ssd.nand.genericNandCmdDirectCmd(0x05) # Change Read Col
cmd += ssd.nand.genericNandCmdDirectAddr(col_addr, 2) # Issue col addr
cmd += ssd.nand.genericNandCmdDirectCmd(0xE0) # Complete the col change
cmd += ssd.nand.genericNandCmdModeEnd()
# Send command and read the data from the buffer
tmp = self.genericNandCommand(cmd, 1, (req_tfr - length))
buf.Resize((buf.TotalBytes + tmp.TotalBytes) / 512)
tmp.CopyTo(buf, 0, ((buf.TotalBytes - tmp.TotalBytes) / 512))
# Release CE after reading the data
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
cmd += ssd.nand.genericNandCmdModeEnd(True)
cmd += ssd.nand.genericNandTestModeRelease()
self.genericNandCommand(cmd)
# Go through each chunk of histogram data
# Copy the chunk of histogram data into a buffer
data_chunk = dm3.Buffer((buf.TotalBytes + 511) / 512)
data_chunk.FillZeros()
for offset in xrange(0, req_tfr, 8):
# Go through the 7 bytes (byte 7 is dummy) of histogram data and separate the bits into 8 dac voltages
for ii in xrange(0, 7):
val = buf.GetByte(offset + ii);
for jj in xrange(0, 8):
tmp = data_chunk.GetByte(offset + jj)
tmp = (tmp << 1) | (val & 1)
val >>= 1
data_chunk.SetByte(offset + jj, tmp)
return data_chunk
# @method_tracer()
def tlc_program(self, ch, ce, lun, blk, page, dbuffer, byte_size = 27888, mode = 0x43):
ssd = self.ssd
self.assertTrue((ch != 255) or (ce != 255) or (lun != 255) or (blk != 65535) or (page != 65535))
if mode == 0x41:
msg = "TLC Coarse program"
elif mode == 0x42:
msg = "TLC Fine program"
elif mode == 0x43:
msg = "TLC Super Fine program"
elif mode == 0x40:
msg = "SLC program"
else:
print "Invalid program mode"
return
print "Program data on ch: %d, ce: %d, lun: %d, blk: %d, page: %d using %s mode" % (ch, ce, lun, blk, page, msg)
self.disable_all_ch_ce() # JIRA: BX200-156
# Convert the address parameters into the corresponding row address
row_addr = (lun << ssd['LunStartBit']) | (blk << ssd['BlockStartBit']) | (page << ssd['PageStartBit'])
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
cmd += ssd.nand.genericNandCmdDirectCmd(mode) # Issue the cmd to set the program mode
cmd += ssd.nand.genericNandCmdDirectCmd(0x80) # Issue the program page cmd
cmd += ssd.nand.genericNandCmdDirectAddr(0, 2) # Issue col addr (offset within page)
cmd += ssd.nand.genericNandCmdDirectAddr(row_addr, 3) # Issue row addr
cmd += ssd.nand.genericNandCmdModeEnd()
self.genericNandCommand(cmd, 2, byte_size, dbuffer)
# Send a command to poll status for completion
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
cmd += ssd.nand.genericNandCmdDirectCmd(0x10) # Issue the second part of the program page cmd
cmd += ssd.nand.genericPollDirectStatus()
cmd += ssd.nand.genericNandCmdModeEnd(True)
self.genericNandCommand(cmd)
return
def tlc_one_block_program(self, ch, ce, lun, blk, dBuffer):
ssd = self.ssd
for page_group, prog_type in ssd['page_table']:
for page in page_group:
if prog_type == "tlc_coarse":
mode = 0x41
byte_size = ssd['PageByteTLC']
self.tlc_program(ch, ce, lun, blk, page, dBuffer, byte_size, mode)
elif prog_type == "tlc_fine":
mode = 0x42
byte_size = ssd['PageByteTLC']
self.tlc_program(ch, ce, lun, blk, page, dBuffer, byte_size, mode)
elif prog_type == "tlc_superfine":
mode = 0x43
byte_size = ssd['PageByteTLC']
self.tlc_program(ch, ce, lun, blk, page, dBuffer, byte_size, mode)
else: # SLC program
mode = 0x40
byte_size = ssd['PageByteSLC']
slc_dBuffer = dm3.Buffer((ssd['PageByteSLC'] + 511) / 512)
slc_dBuffer.FillRandom()
self.tlc_program(ch, ce, lun, blk, page, slc_dBuffer, byte_size, mode)
@method_tracer()
def static_trim_read(self, ch, ce, lun, addr):
ssd = self.ssd
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
cmd += ssd.nand.genericPollDirectStatusEnhanced(lun << ssd['LunStartBit'])
cmd += ssd.nand.genericNandCmdEnterLvcm()
cmd += ssd.nand.genericPollDirectStatus()
cmd += ssd.nand.genericNandCmdStaticTrimSpaceAccess()
cmd += ssd.nand.genericNandCmdDirectAddr(addr)
cmd += ssd.nand.genericNandCmdModeEnd()
buf = self.genericNandCommand(cmd, mode = 1, byte_size = 1)
val = buf.GetByte(0)
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
cmd += ssd.nand.genericNandCmdStaticTrimSpaceExit()
cmd += ssd.nand.genericNandCmdExitLvcm()
cmd += ssd.nand.genericNandCmdModeEnd(True)
self.genericNandCommand(cmd)
return val
@method_tracer()
def static_trim_write(self, ch, ce, lun, addr, data):
ssd = self.ssd
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
cmd += ssd.nand.genericPollDirectStatusEnhanced(lun << ssd['LunStartBit'])
cmd += ssd.nand.genericNandCmdEnterLvcm()
cmd += ssd.nand.genericPollDirectStatus()
cmd += ssd.nand.genericNandCmdStaticTrimSpaceAccess()
cmd += ssd.nand.genericNandCmdDirectAddr(addr)
cmd += ssd.nand.genericNandCmdDirectData(data) # Value for input
cmd += ssd.nand.genericNandCmdModeEnd()
self.genericNandCommand(cmd)
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
cmd += ssd.nand.genericNandCmdStaticTrimSpaceExit()
cmd += ssd.nand.genericNandCmdExitLvcm()
cmd += ssd.nand.genericNandCmdModeEnd(True)
self.genericNandCommand(cmd)
return
@method_tracer()
def read_trim_register(self, ch, ce, lun, reg_param, reg_addr, test_mode = False):
ssd = self.ssd
if not test_mode:
# Uses the MLBi GetTrim command to access the trim registers
# Mask out the Write ALL_LUNs flag from the trim param table
reg_param &= 0x7F
# Select test mode, ce and ch.
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
# Poll LUN status to make sure its ready
cmd += ssd.nand.genericPollDirectStatusEnhanced(lun << ssd['LunStartBit'])
cmd += ssd.nand.genericNandCmdDirectCmd(0xEA)
cmd += ssd.nand.genericNandCmdDirectAddr([reg_addr, reg_param, lun << 4])
cmd += ssd.nand.genericNandCmdModeEnd()
buf = self.genericNandCommand(cmd, mode = 1, byte_size = 1)
val = buf.GetByte(0)
# Send command to release CE otherwise it will stay asserted when data read
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
cmd += ssd.nand.genericNandCmdModeEnd(True)
self.genericNandCommand(cmd)
else:
# Select test mode, ce and ch.
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
cmd += ssd.nand.genericPollDirectStatusEnhanced(lun << ssd['LunStartBit'])
cmd += ssd.nand.genericNandCmdEnterLvcm()
cmd += ssd.nand.genericNandCmdTrimValues(reg_param, reg_addr)
cmd += ssd.nand.genericNandCmdModeEnd()
# Now actually send the built command to the drive, trim value is returned
buf = self.genericNandCommand(cmd, mode = 1, byte_size = 1)
val = buf.GetByte(0)
# Create the command that disables the trim page and disables the LVCM mode, and releases CE
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
cmd += ssd.nand.genericNandEndTrimValues()
cmd += ssd.nand.genericNandCmdExitLvcm()
cmd += ssd.nand.genericNandCmdModeEnd(True)
self.genericNandCommand(cmd)
return val
@method_tracer()
def write_trim_register(self, ch, ce, lun, reg_param, reg_addr, data, test_mode = False):
ssd = self.ssd
if not test_mode:
# Uses the MLBi GetTrim command to access the trim registers
# Mask out the Write ALL_LUNs flag from the trim param table
reg_param &= 0x7F
# Select test mode, ce and ch.
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
# Poll LUN status to make sure its ready
cmd += ssd.nand.genericPollDirectStatusEnhanced(lun << ssd['LunStartBit'])
cmd += ssd.nand.genericNandCmdDirectCmd(0xEB)
cmd += ssd.nand.genericNandCmdDirectAddr([reg_addr, reg_param, lun << 4])
cmd += ssd.nand.genericNandCmdDirectData(data)
cmd += ssd.nand.genericNandCmdModeEnd(True)
self.genericNandCommand(cmd)
else:
# Select test mode, ce and ch.
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
self.genericNandCommand(cmd)
return
@method_tracer()
def unlock(self):
ssd = self.ssd
ssd.AtaCall28BitNoData(0x55, 0x41, 0x75, 0x67, 0x30, 0xA0, 0xFA)
return
def bist_execution(self):
ssd = self.ssd
ssd.AtaCall28BitNoData(0x90, 0, 0, 0, 0, 0xA0, 0xFA)
return
@method_tracer()
def get_tlc_start_block(self):
ssd = self.ssd
fwBuf = dm3.Buffer(2)
fwBuf.FillZeros()
ssd.AtaCall28BitReadPio(fwBuf, 1024, 0x21, 0x02, 0x00, 0x00, 0x00, 0xA0, 0xFA)
return ((fwBuf.GetByte(16) << 8) + fwBuf.GetByte(17))
@method_tracer()
def translate_lba(self, lba):
ssd = self.ssd
print "Translate LBA %d to physical address" % lba
lba0 = lba & 0xff
lba1 = (lba >> 8) & 0xff
lba2 = (lba >> 16) & 0xff
lba3 = (lba >> 24) & 0xff
ssd.AtaCall28BitNoData(0x34, 0x00, lba3, 0xEC, 0x48, 0xA0, 0xFA)
rBuffer = dm3.Buffer(1)
rBuffer.FillZeros()
ssd.AtaCall28BitReadPio(rBuffer, 512, 0x34, 0x01, lba0, lba1, lba2, 0xA0, 0xFA)
ch = rBuffer.GetByte(0)
ce = rBuffer.GetByte(1)
blk = rBuffer.GetByte(2) + (rBuffer.GetByte(3) << 8)
page = rBuffer.GetByte(4) + (rBuffer.GetByte(5) << 8)
lun = rBuffer.GetByte(6)
print "Physical address for LBA %d is ch: %d, ce: %d, lun: %d, blk: %d, page: %d" % (lba, ch, ce, lun, blk, page)
return (ch, ce, blk, page, lun)
@method_tracer()
def read_direct(self, ch, ce, lun, blk, page, byte_size, mode = 3):
ssd = self.ssd
ceChan = ((ce << 4) | ch)
row_addr = (lun << ssd['LunStartBit']) | (blk << ssd['BlockStartBit']) | (page << ssd['PageStartBit'])
acyc3 = row_addr & 0xFF
acyc4 = (row_addr >> 8) & 0xFF
acyc5 = (row_addr >> 16) & 0xFF
sector = (byte_size + 511) / 512
print "Read Direct Phase 1"
ssd.AtaCall28BitNoData(0x70, (byte_size & 0xFF), ((byte_size >> 8) & 0xFF), ceChan, 0xEC, 0xA0, 0xFA)
print "Read Direct Phase 2"
mode |= 0xA0
rBuffer = dm3.Buffer(sector)
rBuffer.FillZeros()
ssd.AtaCall28BitReadPio(rBuffer, sector * 512, 0x70, sector, acyc3, acyc4, acyc5, mode, 0xFA)
return rBuffer
@method_tracer()
def erase_direct(self, ch, ce, lun, blk, page = 0, mode = 3):
ssd = self.ssd
ceChan = ((ce << 4) | ch)
row_addr = (lun << ssd['LunStartBit']) | (blk << ssd['BlockStartBit']) | (page << ssd['PageStartBit'])
acyc3 = row_addr & 0xFF
acyc4 = (row_addr >> 8) & 0xFF
acyc5 = (row_addr >> 16) & 0xFF
mode |= 0xA0
print "Erase Direct on ch: %d, ce: %d, lun: %d, blk: %d" % (ch, ce, lun, blk)
ssd.AtaCall28BitNoData(0x67, ceChan, acyc3, acyc4, acyc5, mode, 0xFA)
return
@method_tracer()
def get_error_recovery_statistics(self, die_count):
ssd = self.ssd
# BX200 max_ch * max_ce * max_lun * info_per_die
sector_count = (8 * 4 * 2 * 160 + 511) / 512
rBuffer = dm3.Buffer(sector_count)
rBuffer.FillZeros()
ssd.AtaCall28BitReadPio(rBuffer, sector_count * 512, 0x94, sector_count, 0, 0, 0, 0xA0, 0xFA)
dic = self.error_recovery_statistics_parser(die_count, rBuffer)
"""
RR16 means Soft Decode
RR17 means Super Calibration-RR0
RR18 means Target Calibration-RR0-Soft Decode
RR19 means RAID
"""
for rr in xrange(20):
name = "RR%d" % rr
if name == "RR16":
name = "soft"
elif name == "RR17":
name = "sCal"
elif name == "RR18":
name = "t-pCal"
elif name == "RR19":
name = "RAID"
else:
name = name
print "%4s" % name,
print "\n",
for die in xrange(die_count):
for rr in xrange(20):
name = "Die %d Success Hard Decode-RR%x" % (die, rr)
print "%4d" % dic[name],
print "\n",
@method_tracer()
def trigger_recalibration(self):
ssd = self.ssd
rBuffer = dm3.Buffer(1)
with dm3.DeviceContext(ssd.device, commandTimeOut = 300, throwsExceptions = self.ThrowException):
ssd.AtaCall28BitReadPio(rBuffer, 512, 0x97, 1, 0, 0, 0, 0xA0, 0xFA)
return (rBuffer.GetWord(0), rBuffer.GetWord(1))
@method_tracer()
def get_target_page_calibration_info(self):
ssd = self.ssd
rBuffer = dm3.Buffer(16)
rBuffer.FillZeros()
ssd.AtaCall28BitReadPio(rBuffer, 16 * 512, 0x99, 16, 0, 0, 0, 0, 0xFA)
print rBuffer
print "%3s %2s %2s %3s %5s %5s %4s %7s %7s %10s" % ('id', 'ch', 'ce', 'lun', 'plane', 'block', 'page', 't_after', 't_before', 'offset')
for offset in xrange(0, 16 * 512, 32):
blk = rBuffer.GetWord(offset)
page = rBuffer.GetWord(offset + 2)
ch = rBuffer.GetByte(offset + 4)
ce = rBuffer.GetByte(offset + 5)
plane = rBuffer.GetByte(offset + 6)
lun = rBuffer.GetByte(offset + 7)
cal_offset = rBuffer.GetBytes(offset + 8, 7)
for i in xrange(len(cal_offset)):
if cal_offset[i] > 128:
cal_offset[i] -= 256
t_after = rBuffer.GetByte(offset + 15)
t_before = rBuffer.GetByte(offset + 16)
execute_count = rBuffer.GetWord(offset + 30)
if any((execute_count, ch, ce, lun, plane, blk, page)) != 0:
print "%3d %2d %2d %3d %5d %5d %4d %7d %7d %10s" % (execute_count, ch, ce, lun, plane, blk, page, t_after, t_before, cal_offset)
def error_recovery_statistics_parser(self, die_count, buf):
statistics_success_dic = {}
for die in xrange(die_count):
for rr in xrange(20):
name = "Die %d Success Hard Decode-RR%x" % (die, rr)
statistics_success_dic[name] = 0
die = 0
for die_offset in xrange(0, 64 * 160, 160):
# No error recovery statistics for 240/480 on LUN 1
if (((die_count == 16) or (die_count == 32)) and ((die_offset / 160) % 2 != 0)):
continue
# No ce 4~7 for 240
if ((die_count == 16) and (((die_offset / 160) / 8) % 2) == 1):
continue
for offset in xrange(0, 0x50, 0x04):
name = "Die %d Success Hard Decode-RR%x" % (die, offset / 0x04)
statistics_success_dic[name] = buf.GetByte(die_offset + offset) + (buf.GetByte(die_offset + offset + 1) << 8) \
+ (buf.GetByte(die_offset + offset + 2) << 16) + (buf.GetByte(die_offset + offset + 3) << 24)
die += 1
return statistics_success_dic
@method_tracer()
def get_one_page_histogram_valley(self, ch, ce, lun, blk, page):
ssd = self.ssd
self.disable_all_ch_ce()
mode = 0x47 # ONFi 0x47 HMLCx7 Histogram Read Command
# Convert the address parameters into the corresponding row address
row_addr = (lun << ssd['LunStartBit']) | (blk << ssd['BlockStartBit']) | (page << ssd['PageStartBit'])
req_tfr = 74368 # Keep Track of original read request length
length = 64 * 1024
# Issue the direct UID Command
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
cmd += ssd.nand.genericNandCmdDirectCmd(mode) # Issue the cmd to set the read mode
# The tMODE time duration is ~25ns.
cmd += ssd.nand.genericNandCmdDirectCmd(0x00) # Issue the read page cmd
cmd += ssd.nand.genericNandCmdDirectAddr(0, 2) # Issue col addr
cmd += ssd.nand.genericNandCmdDirectAddr(row_addr, 3) # Issue row addr
cmd += ssd.nand.genericNandCmdDirectCmd(0x30) # Complete the read page cmd
cmd += ssd.nand.genericPollDirectStatus() # Wait for read to complete
cmd += ssd.nand.genericNandCmdDirectCmd(0x00) # Put NAND back in read mode
cmd += ssd.nand.genericNandCmdModeEnd()
# Send command and read the data from the buffer
buf = self.genericNandCommand(cmd, 1, length)
col_addr = length / 8
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
cmd += ssd.nand.genericNandCmdDirectCmd(0x05) # Change Read Col
cmd += ssd.nand.genericNandCmdDirectAddr(col_addr, 2) # Issue col addr
cmd += ssd.nand.genericNandCmdDirectCmd(0xE0) # Complete the col change
cmd += ssd.nand.genericNandCmdModeEnd()
# Send command and read the data from the buffer
tmp = self.genericNandCommand(cmd, 1, (req_tfr - length))
buf.Resize((buf.TotalBytes + tmp.TotalBytes) / 512)
tmp.CopyTo(buf, 0, ((buf.TotalBytes - tmp.TotalBytes) / 512))
# Release CE after reading the data
cmd = ssd.nand.genericNandCmdModeSetup(ce, ch)
cmd += ssd.nand.genericNandCmdModeEnd(True)
cmd += ssd.nand.genericNandTestModeRelease()
self.genericNandCommand(cmd)
# Go through each chunk of histogram data
dac = {}
for i in xrange(128):
dac[i] = 0
for offset in xrange(0, 74368, 8):
# Go through the 7 bytes (byte 7 is dummy) of histogram data and separate the bits into 8 dac voltages
dac_list = [0, 0, 0, 0, 0, 0, 0, 0]
for ii in xrange(7):
val = buf.GetByte(offset + ii);
for jj in xrange(0, 8):
dac_list[jj] = (dac_list[jj] << 1) | (val & 1)
val >>= 1
for ii in xrange(8):
dac[127 - dac_list[ii]] += 1
peak = [1, 31, 49, 63, 79, 94, 109, 128]
valley = [0, 0, 0, 0, 0, 0, 0]
for i in xrange(7):
valley[i] = peak[i]
value = dac[peak[i]]
for dac_value in xrange(peak[i], peak[i + 1]):
if dac[dac_value] < value:
valley[i] = dac_value
value = dac[dac_value]
return valley, [dac[valley[0]], dac[valley[1]], dac[valley[2]], dac[valley[3]], dac[valley[4]], dac[valley[5]], dac[valley[6]]]
def get_one_block_histogram_valley(self, ch, ce, lun, blk):
ssd = self.ssd
blk_valley = []
for page in xrange(ssd['BlockPageTLC']):
blk_valley.append(self.get_one_page_histogram_valley(ch, ce, lun, blk, page))
for page in xrange(ssd['BlockPageTLC']):
print "Block %d, Page %d" % (blk, page),
print blk_valley[page]
return
def get_one_die_histogram_valley(self, ch, ce, lun):
ssd = self.ssd
tlc_start_blk = self.get_tlc_start_block()
log_name = "CH_%d_CE_%d_LUN_%d.log" % (ch, ce, lun)
saveout = sys.stdout
fsock = open(log_name, 'w')
sys.stdout = fsock
for blk in xrange(tlc_start_blk, ssd['BlockPerDie'], 1):
self.get_one_block_histogram_valley(ch, ce, lun, blk)
sys.stdout = saveout
fsock.close()
def count_vt(self, buf, length = None):
if length is None:
length = buf.TotalBytes
dic = {}
for i in xrange(length):
vt = 127 - buf.GetByte(i)
if vt in dic.keys():
dic[vt] += 1
else:
dic[vt] = 1
return dic
def draw_plot(self, *arg, **kwargs):
import re
try:
import matplotlib.pyplot as plt
except ImportError:
print "matplotlib not installed on this machine"
else:
xlim = kwargs['xlim'] if 'xlim' in kwargs.keys() else 128
ylim = kwargs['ylim'] if 'ylim' in kwargs.keys() else 3000
plt.xlim(1, xlim)
plt.ylim(1, ylim)
plt.xlabel('DAC')
plt.ylabel('Distribution')
plt.title('VT Distribution')
x = {}
y = {}
c = {} # color
timer = 0
keys = kwargs.keys()
keys.sort()
for key in keys:
if re.search('plot', key):
dic = kwargs[key]
dic = sorted(dic.iteritems(), key = lambda d:d[0], reverse = False)
x[key] = []
y[key] = []
if (timer % 3) == 0:
c[key] = 'r'
elif (timer % 3) == 1:
c[key] = 'b'
else:
c[key] = 'g'
timer += 1
for i in xrange(len(dic)):
x[key].append(dic[i][0])
y[key].append(dic[i][1])
cmd = "plt.plot("
for key in keys:
if re.search('plot', key):
cmd += "x['%s'], y['%s'], c['%s'], " % (key, key, key)
cmd += "linewidth = 2)"
exec(cmd)
plt.savefig('BX200_VT_Distribution.png', dpi = 500)
plt.show()
return
def whole_block_histo(self, ch, ce, lun, blk):
ssd = self.ssd
blk_dic = {}
for page in xrange(ssd['BlockPageTLC']):
print "Get histo data in page %d" % page
buf = self.hmlcx7_histogram_read(ch, ce, lun, blk, page)
dic = self.count_vt(buf, ssd['histo_length'])
return blk_dic
| [
"[email protected]"
]
| |
25757d0cc1d5d978228bba4c6f71607f502c84df | 4ca64aa3ace012a2baec182d868b4e5e91545e7b | /NYU/Artifacts/ImagePairCheck.py | 4965ad740083d619461e6cc468f285325f64cb7c | []
| no_license | yijun2011/tzo | 79b9e23a483587b1bc0b2d9d6c94945fd01c05b5 | e8c65af067c05a7c06690f2bd129b88886213b03 | refs/heads/master | 2020-04-10T01:05:42.135741 | 2019-05-12T16:07:07 | 2019-05-12T16:07:07 | 160,705,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,182 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 11 13:02:24 2018
@author: oscar seurat
"""
import numpy as np;
import matplotlib.pyplot as plt;
from matplotlib.widgets import TextBox;
import sys;
class verifier:
def __init__(self, original, distorted):
if (original.shape != distorted.shape):
print('verifier.verifier: the original and distorted image arrays ' +
'must have the same dimensions.', sys.stderr);
sys.exit(-1);
self.orig_arr = original;
self.dist_arr = distorted;
self.total_images = original.shape[2];
# Let's set up image display gear
self.fig, self.ax = plt.subplots(1, 2, figsize = (10, 7));
plt.subplots_adjust(bottom=0.3);
self.current_slice = 1000;
self.ax[0].imshow(self.orig_arr[:,:, self.current_slice]);
self.ax[1].imshow(self.dist_arr[:,:, self.current_slice]);
img_no_pos = plt.axes([0.48, 0.1, 0.06, 0.1]); # x, y, w, h
self.img_tbox = TextBox(img_no_pos, 'Image No.: ', initial=str(self.current_slice));
def _update(self, val):
self.current_slice = int(val);
self.ax[0].imshow(self.orig_arr[:,:, self.current_slice]);
self.ax[1].imshow(self.dist_arr[:,:, self.current_slice]);
self.fig.canvas.draw_idle();
def initialize(self):
self.img_tbox.on_submit(self._update);
###################################################################
###################################################################
MRI_PATH='/Users/yzhao11/Documents/Research/MachineLearning/MRI/zhao_dataset_20181011/sub-NC188/ses-20180825/anat/';
orig_file = MRI_PATH + 'OriginalImages_188_X.npz';
mngl_file = MRI_PATH + 'DistortedImages_188_X.npz';
# orig_file = MRI_PATH + 'orig_test.npz';
# mngl_file = MRI_PATH + 'mngl_test.npz';
ORIG = np.load(orig_file);
orig_arr = ORIG['grand_orig_x'];
MNGL = np.load(mngl_file);
mngl_arr = MNGL['grand_arr_x'];
VRF = verifier(orig_arr, mngl_arr);
VRF.initialize();
plt.show();
| [
"[email protected]"
]
| |
4587be94b0486ffa14c1ede8050b01a92b49ec20 | b730976f89ef80986292a8834b0c43a824146670 | /Lab 4/environments/gridworld.py | 1c20fc2cf34be70cc826e2326c999d0b5a897073 | []
| no_license | Matkicail/Reinforcement-Learning-Labs | d48f9013c675bc33aec13cb6b23f78fb983abfbf | 616a585139c257dd1fd9c53e1ca1e3c08f60894a | refs/heads/main | 2023-09-03T01:55:27.707404 | 2021-11-08T21:37:55 | 2021-11-08T21:37:55 | 394,694,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,650 | py | import io
import numpy as np
import sys
import gym
from gym import spaces
from gym.utils import seeding
UP = 0
RIGHT = 1
DOWN = 2
LEFT = 3
NUMBER_OF_ACTIONS = 4
class GridworldEnv(gym.Env):
"""
Grid World environment from Sutton's Reinforcement Learning book chapter 4.
You are an agent on an MxN grid and your goal is to reach the terminal state.
You can take actions in each direction (UP=0, RIGHT=1, DOWN=2, LEFT=3).
Actions going off the edge leave you in your current state.
"""
metadata = {'render.modes': ['human', 'ansi']}
def __init__(self, shape=[4,4], terminal_states=[15], terminal_reward = 0, step_reward=-1):
if not isinstance(shape, (list, tuple)) or not len(shape) == 2:
raise ValueError('shape argument must be a list/tuple of length 2')
self.shape = shape
self.observation_space = spaces.Discrete(np.prod(shape))
for t in terminal_states:
assert 0 <= t < self.observation_space.n
self.action_space = spaces.Discrete(NUMBER_OF_ACTIONS)
self.__terminal_states = terminal_states
self.__terminal_reward = terminal_reward
self.__step_reward = step_reward
MAX_Y = shape[0]
MAX_X = shape[1]
P = {}
grid = np.arange(self.observation_space.n).reshape(shape)
it = np.nditer(grid, flags=['multi_index'])
while not it.finished:
s = it.iterindex
y, x = it.multi_index
# P[s][a] = (prob, next_state, reward, is_done)
P[s] = {a : [] for a in range(self.action_space.n)}
is_done = lambda s: s in self.__terminal_states
# stuck in a terminal state
if is_done(s):
P[s][UP] = [(1.0, s, self.__terminal_reward, True)]
P[s][RIGHT] = [(1.0, s, self.__terminal_reward, True)]
P[s][DOWN] = [(1.0, s, self.__terminal_reward, True)]
P[s][LEFT] = [(1.0, s, self.__terminal_reward, True)]
# Not a terminal state
else:
reward = self.__step_reward
ns_up = s if y == 0 else s - MAX_X
ns_right = s if x == (MAX_X - 1) else s + 1
ns_down = s if y == (MAX_Y - 1) else s + MAX_X
ns_left = s if x == 0 else s - 1
P[s][UP] = [(1.0, ns_up, reward, False)]
P[s][RIGHT] = [(1.0, ns_right, reward, False)]
P[s][DOWN] = [(1.0, ns_down, reward, False)]
P[s][LEFT] = [(1.0, ns_left, reward, False)]
it.iternext()
# Initial state distribution is uniform
self.__initial_state_distribution = np.ones(self.observation_space.n) / self.observation_space.n
# We expose the model of the environment for educational purposes
# This should not be used in any model-free learning algorithm
self.P = P
super(GridworldEnv, self).__init__()
def step(self, action):
assert self.action_space.contains(action)
prob, next_state, reward, done = self.P[self.__current_state][action][0]
self.__current_state = next_state
return next_state, reward, done, None
def reset(self):
self.__current_state = np.random.choice(self.observation_space.n, p=self.__initial_state_distribution)
return self.__current_state
def render(self, mode='human', close=False):
""" Renders the current gridworld layout
For example, a 4x4 grid with the mode="human" looks like:
T o o o
o x o o
o o o o
o o o T
where x is your position and T are the two terminal states.
"""
if close:
return
outfile = io.StringIO() if mode == 'ansi' else sys.stdout
grid = np.arange(self.observation_space.n).reshape(self.shape)
it = np.nditer(grid, flags=['multi_index'])
while not it.finished:
s = it.iterindex
y, x = it.multi_index
if self.__current_state == s:
output = " x "
elif s in self.__terminal_states:
output = " T "
else:
output = " o "
if x == 0:
output = output.lstrip()
if x == self.shape[1] - 1:
output = output.rstrip()
outfile.write(output)
if x == self.shape[1] - 1:
outfile.write("\n")
it.iternext()
def seed(self, seed=None):
if(seed != None):
np.random.seed(seed)
def close(self):
pass | [
"[email protected]"
]
| |
a364c739486c3e7db3c09a309d576eba67ffbc49 | 043914df3cdc7349a9aa4091df4aacddda04bd30 | /plugins/repo/sqlmap/plugin.py | 75deb0be957a74923de1720403d9318bbff94983 | [
"DOC"
]
| permissive | fbjackson1989/faraday | 53686c942b1622083ac83c9453078cea3c4ed854 | a38e50822cd43b58c1fc2e9aaee2e6b206f1f82a | refs/heads/master | 2023-01-03T02:22:09.385895 | 2020-10-22T20:15:42 | 2020-10-22T20:15:42 | 306,442,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,692 | py | #!/usr/bin/env python
'''
Faraday Penetration Test IDE
Copyright (C) 2013 Infobyte LLC (http://www.infobytesec.com/)
See the file 'doc/LICENSE' for the license information
'''
from __future__ import with_statement
import argparse
import hashlib
import os
import pickle
import re
import shlex
import socket
import sqlite3
import sys
from BaseHTTPServer import BaseHTTPRequestHandler
from StringIO import StringIO
from urlparse import urlparse
from collections import defaultdict
from plugins.plugin import PluginTerminalOutput
from plugins.plugin_utils import get_vulnweb_url_fields
try:
import xml.etree.cElementTree as ET
import xml.etree.ElementTree as ET_ORIG
ETREE_VERSION = ET_ORIG.VERSION
except ImportError:
import xml.etree.ElementTree as ET
ETREE_VERSION = ET.VERSION
ETREE_VERSION = [int(i) for i in ETREE_VERSION.split(".")]
current_path = os.path.abspath(os.getcwd())
__author__ = "Francisco Amato"
__copyright__ = "Copyright (c) 2013, Infobyte LLC"
__credits__ = ["Francisco Amato"]
__license__ = ""
__version__ = "1.0.0"
__maintainer__ = "Francisco Amato"
__email__ = "[email protected]"
__status__ = "Development"
class Database(object):
def __init__(self, database):
self.database = database
def connect(self, who="server"):
self.connection = sqlite3.connect(
self.database, timeout=3, isolation_level=None)
self.cursor = self.connection.cursor()
def disconnect(self):
self.cursor.close()
self.connection.close()
def commit(self):
self.cursor.commit()
def execute(self, statement, arguments=None):
if arguments:
self.cursor.execute(statement, arguments)
else:
self.cursor.execute(statement)
if statement.lstrip().upper().startswith("SELECT"):
return self.cursor.fetchall()
class SqlmapPlugin(PluginTerminalOutput):
# Plugin for Sqlmap Tool
def __init__(self):
PluginTerminalOutput.__init__(self)
self.id = "Sqlmap"
self.name = "Sqlmap"
self.plugin_version = "0.0.3"
self.version = "1.0.8.15#dev"
self.framework_version = "1.0.0"
self._current_output = None
self.url = ""
self.protocol = ""
self.hostname = ""
self.port = "80"
self.params = ""
self.fullpath = ""
self.path = ""
self.ignore_parsing = False
self.addSetting("Sqlmap path", str, "/root/tools/sqlmap")
self.db_port = {
"MySQL": 3306, "PostgreSQL": "", "Microsoft SQL Server": 1433,
"Oracle": 1521, "Firebird": 3050,
"SAP MaxDB": 7210, "Sybase": 5000,
"IBM DB2": 50000, "HSQLDB": 9001}
self.ptype = {
1: "Unescaped numeric",
2: "Single quoted string",
3: "LIKE single quoted string",
4: "Double quoted string",
5: "LIKE double quoted string",
}
self._command_regex = re.compile(
r'^(python2 ./sqlmap.py|python2.7 ./sqlmap.py|sudo sqlmap|sqlmap|sudo python sqlmap|python sqlmap|\.\/sqlmap).*?')
global current_path
self._output_path = ''
class HTTPRequest(BaseHTTPRequestHandler):
def __init__(self, request_text):
self.rfile = StringIO(request_text)
self.raw_requestline = self.rfile.readline()
self.error_code = self.error_message = None
self.parse_request()
def send_error(self, code, message):
self.error_code = code
self.error_message = message
def hashKey(self, key):
# from sqlmap/lib/utils/hashdb.py
# we don't sanitize key, because we only work
# with plain string
retVal = int(hashlib.md5(key).hexdigest(), 16) & 0x7fffffffffffffff
return retVal
def hashDBRetrieve(self, key, unserialize=False, db=False):
"""
Helper function for restoring session data from HashDB
"""
key = "%s%s%s" % (self.url or "%s%s" % (
self.hostname, self.port), key, self.HASHDB_MILESTONE_VALUE)
retVal = ''
hash_ = self.hashKey(key)
if not retVal:
while True:
try:
for row in db.execute("SELECT value FROM storage WHERE id=?", (hash_,)):
retVal = row[0]
except sqlite3.OperationalError, ex:
if not 'locked' in ex.message:
raise
else:
break
return retVal if not unserialize else self.base64unpickle(retVal)
def base64decode(self, value):
"""
Decodes string value from Base64 to plain format
>>> base64decode('Zm9vYmFy')
'foobar'
"""
return value.decode("base64")
def base64encode(self, value):
"""
Encodes string value from plain to Base64 format
>>> base64encode('foobar')
'Zm9vYmFy'
"""
return value.encode("base64")[:-1].replace("\n", "")
def base64unpickle(self, value):
"""
Decodes value from Base64 to plain format and deserializes (with pickle) its content
>>> base64unpickle('gAJVBmZvb2JhcnEALg==')
'foobar'
"""
if value:
return pickle.loads(self.base64decode(value))
def xmlvalue(self, db, name, value="query"):
filepath = "%s" % os.path.join(
current_path, "plugins/repo/sqlmap/queries.xml")
with open(filepath, "r") as f:
try:
tree = ET.fromstring(f.read())
except SyntaxError, err:
self.log("SyntaxError: %s. %s" % (err, filepath), "ERROR")
return None
for node in tree.findall("dbms[@value='" + db + "']/" + name + ''):
return node.attrib[value]
def getuser(self, data):
users = re.search(
r'database management system users \[[\d]+\]:\n((\[\*\] (.*)\n)*)',
data)
if users:
return map((lambda x: x.replace("[*] ", "")), users.group(1).split("\n"))
def getdbs(self, data):
dbs = re.search(
r'available databases \[[\d]+\]:\n(((\[\*\] (.*)\n)*))',
data)
if dbs:
return map((lambda x: x.replace("[*] ", "")), dbs.group(1).split("\n"))
def getpassword(self, data):
users = {}
password = re.findall(
r"\n\[\*\] (.*) \[\d\]:\n\s*password hash: (.*)",
data)
if password:
for credential in password:
user = credential[0]
mpass = credential[1]
users[user] = mpass
return users
def _get_log_message(self, line):
"""Return the message of a log line.
If the line isn't from the log it will raise a ValueError
>>> line = '[16:59:03] [INFO] fetching tables'
>>> self._get_log_message('line')
'fetching tables'
"""
match = re.match(r'\[[0-9:]+\] \[\w+\] (.+)$', line)
if match is None:
raise ValueError('Incorrect format of line')
return match.group(1)
def _is_log_and_startswith(self, text, line):
try:
msg = self._get_log_message(line)
except ValueError:
return False
else:
return msg.startswith(text)
def _is_tables_log_line(self, line):
# [16:59:03] [INFO] fetching tables for databases: 'bWAPP, ...
return self._is_log_and_startswith('fetching tables for databases',
line)
def _is_columns_log_line(self, line):
# [16:59:03] [INFO] fetching columns for table ...
return self._is_log_and_startswith('fetching columns for table ',
line)
def _match_start_get_remaining(self, start, text):
"""
If text starts with start, return text with start stripped.
Return None if it doesn't match.
"""
if not text.startswith(start):
return
return text[len(start):]
def gettables(self, data):
"""
Return enumerated tables of the remote database.
"""
tables = defaultdict(list) # Map database names with its tables
current_database = None
status = 'find_log_line'
list_found = False
for line in data.splitlines():
if status == 'find_log_line':
# Look for the correct log line to start searching databases
if self._is_tables_log_line(line):
# Correct line, change status
status = 'find_dbname'
elif self._is_log_and_startswith('', line) and list_found:
# If another log line is reached, stop looking
break
elif status == 'find_dbname':
database = self._match_start_get_remaining('Database: ', line)
if database is not None:
current_database = database
list_found = True
status = 'find_list_start'
elif status == 'find_list_start':
# Find +--------------+ line
if re.match(r'^\+\-+\+$', line):
# Line found
status = 'find_tables'
elif status == 'find_tables':
if line.startswith('|') and line.endswith('|'):
table = line[1:-1].strip()
tables[current_database].append(table)
elif re.match(r'^\+\-+\+$', line):
# Table list for this db ended
status = 'find_dbname'
else:
raise RuntimeError('unknown status')
return tables
def getcolumns(self, data):
"""
Return enumerated columns of the remote database.
"""
columns = defaultdict(lambda: defaultdict(list))
current_table = current_database = None
status = 'find_log_line'
list_start_count = 0
list_found = False
for line in data.splitlines():
if status == 'find_log_line':
if self._is_columns_log_line(line):
status = 'find_dbname'
elif self._is_log_and_startswith('', line) and list_found:
# Don't accept log lines if the DB dump started
break
elif status == 'find_dbname':
database = self._match_start_get_remaining('Database: ', line)
if database is not None:
list_found = True
current_database = database
status = 'find_table_name'
elif status == 'find_table_name':
table = self._match_start_get_remaining('Table: ', line)
if database is not None:
current_table = table
status = 'find_two_list_starts'
elif status == 'find_two_list_starts':
if re.match(r'^\+[\-\+]+\+$', line):
list_start_count += 1
if list_start_count == 2:
# Start fetching columns
list_start_count = 0
status = 'find_columns'
elif status == 'find_columns':
if line.startswith('|') and line.endswith('|'):
(name, type_) = [val.strip()
for val in line[1:-1].split('|')]
columns[current_database][current_table].append(
(name, type_))
elif re.match(r'^\+[\-\+]+\+$', line):
status = 'find_dbname'
else:
raise RuntimeError('unknown status')
return columns
def getAddress(self, hostname):
"""
Returns remote IP address from hostname.
"""
try:
return socket.gethostbyname(hostname)
except socket.error:
return self.hostname
def parseOutputString(self, output, debug=False):
"""
This method will discard the output the shell sends, it will read it from
the xml where it expects it to be present.
NOTE: if 'debug' is true then it is being run from a test case and the
output being sent is valid.
"""
if self.ignore_parsing:
return
sys.path.append(self.getSetting("Sqlmap path"))
try:
from lib.core.settings import HASHDB_MILESTONE_VALUE
from lib.core.enums import HASHDB_KEYS
from lib.core.settings import UNICODE_ENCODING
except:
self.log('Remember set your Sqlmap Path Setting!... Abort plugin.', 'ERROR')
return
self.HASHDB_MILESTONE_VALUE = HASHDB_MILESTONE_VALUE
self.HASHDB_KEYS = HASHDB_KEYS
self.UNICODE_ENCODING = UNICODE_ENCODING
password = self.getpassword(output)
webserver = re.search("web application technology: (.*?)\n", output)
if webserver:
webserver = webserver.group(1)
users = self.getuser(output)
dbs = self.getdbs(output)
tables = self.gettables(output)
columns = self.getcolumns(output)
db = Database(self._output_path)
db.connect()
absFilePaths = self.hashDBRetrieve(
self.HASHDB_KEYS.KB_ABS_FILE_PATHS, True, db)
brute_tables = self.hashDBRetrieve(
self.HASHDB_KEYS.KB_BRUTE_TABLES, True, db)
brute_columns = self.hashDBRetrieve(
self.HASHDB_KEYS.KB_BRUTE_COLUMNS, True, db)
xpCmdshellAvailable = self.hashDBRetrieve(
self.HASHDB_KEYS.KB_XP_CMDSHELL_AVAILABLE, True, db)
dbms_version = self.hashDBRetrieve(self.HASHDB_KEYS.DBMS, False, db)
self.ip = self.getAddress(self.hostname)
h_id = self.createAndAddHost(self.ip)
i_id = self.createAndAddInterface(
h_id,
name=self.ip,
ipv4_address=self.ip,
hostname_resolution=self.hostname)
s_id = self.createAndAddServiceToInterface(
h_id,
i_id,
self.protocol,
'tcp',
[self.port],
status="open",
version=webserver)
n_id = self.createAndAddNoteToService(
h_id,
s_id,
"website",
'')
self.createAndAddNoteToNote(
h_id,
s_id,
n_id,
self.hostname,
'')
for item in self.db_port.keys():
if dbms_version.find(item) >= 0:
db_port = self.db_port[item]
s_id2 = self.createAndAddServiceToInterface(
h_id,
i_id,
name=dbms_version,
protocol="tcp",
status="closed",
version=str(dbms_version),
ports=[str(db_port)],
description="DB detect by SQLi")
# sqlmap.py --users
if users:
for v in users:
if v:
self.createAndAddCredToService(h_id, s_id2, v, '')
# sqlmap.py --passwords
if password:
for k, v in password.iteritems():
self.createAndAddCredToService(h_id, s_id2, k, v)
# sqlmap.py --file-dest
if absFilePaths:
self.createAndAddNoteToService(
h_id,
s_id2,
"sqlmap.absFilePaths",
'\n'.join(absFilePaths))
# sqlmap.py --common-tables
if brute_tables:
for item in brute_tables:
self.createAndAddNoteToService(
h_id,
s_id2,
"sqlmap.brutetables",
item[1])
# sqlmap.py --tables
if tables:
table_names = ['{}.{}'.format(db_name, table)
for (db_name, db_tables) in tables.items()
for table in db_tables]
self.createAndAddNoteToService(
h_id,
s_id2,
"sqlmap.tables",
'\n'.join(table_names)
)
# sqlmap.py --columns
if columns:
# Create one note per database
for (database, tables) in columns.items():
text = ''
for (table_name, columns) in tables.items():
columns_text = ', '.join(
'{} {}'.format(col_name, type_)
for (col_name, type_) in columns)
text += '{}: {}\n'.format(table_name, columns_text)
self.createAndAddNoteToService(
h_id,
s_id2,
"sqlmap.columns." + database,
text)
# sqlmap.py --common-columns
if brute_columns:
text = (
'Db: ' + brute_columns[0][0] +
'\nTable: ' + brute_columns[0][1] +
'\nColumns:')
for element in brute_columns:
text += str(element[2]) + '\n'
self.createAndAddNoteToService(
h_id,
s_id2,
"sqlmap.brutecolumns",
text)
# sqlmap.py --os-shell
if xpCmdshellAvailable:
self.createAndAddNoteToService(
h_id,
s_id2,
"sqlmap.xpCmdshellAvailable",
str(xpCmdshellAvailable))
# sqlmap.py --dbs
if dbs:
self.createAndAddNoteToService(
h_id,
s_id2,
"db.databases",
'\n'.join(dbs))
for inj in self.hashDBRetrieve(self.HASHDB_KEYS.KB_INJECTIONS, True, db) or []:
for k, v in inj.data.items():
self.createAndAddVulnWebToService(
h_id,
s_id,
name=inj.data[k]['title'],
desc="Payload:" + str(inj.data[k]['payload']) + "\nVector:" + str(inj.data[k]['vector']) +
"\nParam type:" + str(self.ptype[inj.ptype]),
ref=[],
pname=inj.parameter,
severity="high",
method=inj.place,
params=self.params,
**get_vulnweb_url_fields(self.fullpath))
def processCommandString(self, username, current_path, command_string):
parser = argparse.ArgumentParser(conflict_handler='resolve')
parser.add_argument('-h')
parser.add_argument('-u')
parser.add_argument('-s')
parser.add_argument('-r')
try:
args, unknown = parser.parse_known_args(
shlex.split(re.sub(r'\-h|\-\-help', r'', command_string)))
except SystemExit:
pass
if args.r:
filename = os.path.expanduser(args.r)
if not os.path.isabs(filename):
self.log('Please use an absolute path in -r option of sqlmap', 'ERROR')
self.ignore_parsing = True
return
with open(filename, 'r') as f:
request = self.HTTPRequest(f.read())
args.u = "http://" + request.headers['host'] + request.path
f.close()
if args.u:
if args.u.find('http://') < 0 and args.u.find('https://') < 0:
urlComponents = urlparse('http://' + args.u)
else:
urlComponents = urlparse(args.u)
self.protocol = urlComponents.scheme
self.hostname = urlComponents.netloc
if urlComponents.port:
self.port = urlComponents.port
else:
self.port = '80'
if urlComponents.query:
self.path = urlComponents.path
self.params = urlComponents.query
self.url = self.protocol + "://" + self.hostname + ":" + int(self.port) + self.path
self.fullpath = self.url + "?" + self.params
self._output_path = "%s%s" % (
os.path.join(self.data_path, "sqlmap_output-"),
re.sub(r'[\n\/]', r'',
args.u.encode("base64")[:-1]))
if not args.s:
return "%s -s %s" % (command_string, self._output_path)
def setHost(self):
pass
def createPlugin():
return SqlmapPlugin()
| [
"[email protected]"
]
| |
183267d2c207c3fbe629fe61c7f03d91f136e22d | 47eb66be297d912ec1dc6761574f9a2912490003 | /client/tests/init_test.py | 9fa477bdd13d7b414c9cea2d3092dfc4bab36f5f | [
"MIT"
]
| permissive | stjordanis/pyre-check | 53be121717d805e628b47d0e29d68f8141ad6192 | a634fc4c0a60f0ad8b4e0ea5ce9ba3c2f4232450 | refs/heads/master | 2020-04-22T13:49:14.930541 | 2019-02-13T01:11:43 | 2019-02-13T01:21:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,133 | py | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import MagicMock, patch
from .. import (
EnvironmentException,
__name__ as client_name,
_resolve_filter_paths,
buck,
commands,
find_configuration_root,
resolve_analysis_directory,
switch_root,
translate_paths,
)
from ..filesystem import AnalysisDirectory, SharedAnalysisDirectory
class InitTest(unittest.TestCase):
@patch("os.path.isfile")
def test_find_configuration(self, os_mock_isfile) -> None:
os_mock_isfile.side_effect = [False, False, False, True]
self.assertEqual(find_configuration_root("/a/b/c/d", "configuration"), "/a")
os_mock_isfile.side_effect = [True]
self.assertEqual(find_configuration_root("/a", "configuration"), "/a")
os_mock_isfile.side_effect = [False, False]
self.assertEqual(find_configuration_root("/a/b", "configuration"), None)
@patch("os.chdir")
def test_switch_root(self, chdir) -> None:
arguments = MagicMock()
with patch("os.path.realpath", return_value="realpath"), patch(
"os.path.isfile", return_value=False
) as isfile, patch("os.chdir"):
arguments.local_configuration = None
switch_root(arguments)
self.assertEqual(arguments.local_configuration, None)
arguments.local_configuration = None
with patch("os.getcwd", return_value="/a/b/c"):
isfile.side_effect = (
lambda directory: directory == "/a/b/.pyre_configuration.local"
)
switch_root(arguments)
self.assertEqual(arguments.original_directory, "/a/b/c")
self.assertEqual(arguments.local_configuration, "/a/b")
with patch(
"{}.find_configuration_root".format(client_name)
) as mock_find_configuation_root:
with patch("os.getcwd", return_value="/a/b"):
arguments.original_directory = "/a/b"
arguments.current_directory = "/a/b"
arguments.local_configuration = None
mock_find_configuation_root.side_effect = ["/a", "/a/b"]
switch_root(arguments)
self.assertEqual(arguments.original_directory, "/a/b")
self.assertEqual(arguments.current_directory, "/a/b")
self.assertEqual(arguments.local_configuration, None)
def test_resolve_filter_paths(self) -> None:
arguments = MagicMock()
configuration = MagicMock()
arguments.source_directories = []
arguments.targets = []
arguments.original_directory = "/project"
configuration.local_configuration_root = None
filter_paths = _resolve_filter_paths(arguments, configuration)
self.assertEqual(filter_paths, [])
arguments.source_directories = ["/project/a"]
filter_paths = _resolve_filter_paths(arguments, configuration)
self.assertEqual(filter_paths, ["/project/a"])
arguments.source_directories = ["/project/a"]
arguments.targets = ["//x/y/..."]
filter_paths = _resolve_filter_paths(arguments, configuration)
self.assertEqual(filter_paths, ["/project/a", "x/y"])
arguments.source_directories = ["/project/local/a"]
arguments.targets = ["//x/y:z"]
configuration.local_configuration_root = "project/local"
filter_paths = _resolve_filter_paths(arguments, configuration)
self.assertEqual(filter_paths, ["/project/local/a", "x/y"])
arguments.source_directories = []
arguments.targets = []
configuration.local_configuration_root = "/project/local"
filter_paths = _resolve_filter_paths(arguments, configuration)
self.assertEqual(filter_paths, ["/project/local"])
@patch.object(
buck,
"generate_source_directories",
side_effect=lambda targets, build, prompt: targets,
)
def test_resolve_analysis_directory(self, buck) -> None:
arguments = MagicMock()
arguments.build = None
arguments.original_directory = "/project"
arguments.current_directory = "/project"
def assert_analysis_directory(expected, actual) -> None:
self.assertEqual(expected.get_root(), actual.get_root())
self.assertEqual(expected.get_filter_root(), actual.get_filter_root())
configuration = MagicMock()
configuration.source_directories = []
configuration.targets = []
configuration.local_configuration_root = None
arguments.source_directories = ["a/b"]
arguments.targets = []
arguments.filter_directory = None
expected_analysis_directory = AnalysisDirectory("a/b")
analysis_directory = resolve_analysis_directory(
arguments, commands, configuration
)
assert_analysis_directory(expected_analysis_directory, analysis_directory)
arguments.source_directories = ["/symlinked/directory"]
arguments.targets = []
arguments.filter_directory = "/real/directory"
expected_analysis_directory = AnalysisDirectory(
"/symlinked/directory", ["/real/directory"]
)
analysis_directory = resolve_analysis_directory(
arguments, commands, configuration
)
assert_analysis_directory(expected_analysis_directory, analysis_directory)
arguments.source_directories = []
arguments.targets = ["//x:y"]
arguments.filter_directory = "/real/directory"
expected_analysis_directory = SharedAnalysisDirectory(
[], ["//x:y"], "/project", ["/real/directory"]
)
analysis_directory = resolve_analysis_directory(
arguments, commands, configuration
)
assert_analysis_directory(expected_analysis_directory, analysis_directory)
arguments.source_directories = ["a/b"]
arguments.targets = ["//x:y", "//y/..."]
arguments.filter_directory = "/filter"
configuration.targets = ["//overridden/..."]
expected_analysis_directory = SharedAnalysisDirectory(
["a/b"], ["//x:y", "//y:/..."], "/project", ["/filter"]
)
analysis_directory = resolve_analysis_directory(
arguments, commands, configuration
)
assert_analysis_directory(expected_analysis_directory, analysis_directory)
arguments.source_directories = []
arguments.targets = []
arguments.filter_directory = "/filter"
configuration.source_directories = []
configuration.targets = ["//not:overridden/..."]
expected_analysis_directory = SharedAnalysisDirectory(
[], ["//not:overridden/..."], "/project", ["/filter"]
)
analysis_directory = resolve_analysis_directory(
arguments, commands, configuration
)
assert_analysis_directory(expected_analysis_directory, analysis_directory)
| [
"[email protected]"
]
| |
c3d02e6e0b4ebecdb29346804444dd14259c22b5 | 947e71b34d21f3c9f5c0a197d91a880f346afa6c | /ambari-server/src/test/python/stacks/2.0.6/HBASE/test_hbase_master.py | 5162193cbdcb7cd78cd228869aed0ea9bc788593 | [
"MIT",
"Apache-2.0",
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"OFL-1.1",
"MS-PL",
"AFL-2.1",
"GPL-2.0-only",
"Python-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
]
| permissive | liuwenru/Apache-Ambari-ZH | 4bc432d4ea7087bb353a6dd97ffda0a85cb0fef0 | 7879810067f1981209b658ceb675ac76e951b07b | refs/heads/master | 2023-01-14T14:43:06.639598 | 2020-07-28T12:06:25 | 2020-07-28T12:06:25 | 223,551,095 | 38 | 44 | Apache-2.0 | 2023-01-02T21:55:10 | 2019-11-23T07:43:49 | Java | UTF-8 | Python | false | false | 36,350 | py | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, patch
from stacks.utils.RMFTestCase import *
@patch("platform.linux_distribution", new = MagicMock(return_value="Linux"))
@patch("os.path.exists", new = MagicMock(return_value=True))
class TestHBaseMaster(RMFTestCase):
COMMON_SERVICES_PACKAGE_DIR = "HBASE/0.96.0.2.0/package"
STACK_VERSION = "2.0.6"
TMP_PATH = "/hadoop"
DEFAULT_IMMUTABLE_PATHS = ['/apps/hive/warehouse', '/apps/falcon', '/mr-history/done', '/app-logs', '/tmp']
CONFIG_OVERRIDES = {"serviceName":"HBASE", "role":"HBASE_MASTER"}
def test_install_hbase_master_default_no_phx(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "install",
config_file="hbase_no_phx.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
try_install=True,
checked_call_mocks = [(0, "OK.", ""),(0, "OK.", "")],
)
self.assertResourceCalled('Package', 'hbase_2_3_*',
retry_count=5,
retry_on_repo_unavailability=False)
self.assertNoMoreResources()
def test_install_hbase_master_default_with_phx(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "install",
config_file="hbase_with_phx.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
try_install=True,
checked_call_mocks = [(0, "OK.", ""),(0, "OK.", "")],
)
self.assertResourceCalled('Package', 'hbase_2_3_*',
retry_count=5,
retry_on_repo_unavailability=False)
self.assertResourceCalled('Package', 'phoenix_2_3_*',
retry_count=5,
retry_on_repo_unavailability=False)
self.assertNoMoreResources()
@patch("ambari_commons.repo_manager.ManagerFactory.get")
def test_install_hbase_master_with_version(self, get_provider):
from ambari_commons.os_check import OSConst
from ambari_commons.repo_manager import ManagerFactory
pkg_manager = ManagerFactory.get_new_instance(OSConst.REDHAT_FAMILY)
with patch.object(pkg_manager, "all_packages") as all_packages,\
patch.object(pkg_manager, "available_packages") as available_packages, \
patch.object(pkg_manager, "installed_packages") as installed_packages:
all_packages.return_value = [["hbase_2_3_0_1_1234", "1.0", "testrepo"]]
available_packages.return_value = [["hbase_2_3_0_1_1234", "1.0", "testrepo"]]
installed_packages.return_value = [["hbase_2_3_0_1_1234", "1.0", "testrepo"]]
get_provider.return_value = pkg_manager
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/hbase_with_phx.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.1-1234'
# the json file is not a "well formed" install command
json_content['roleCommand'] = 'INSTALL'
json_content['commandParams']['version'] = version
json_content['commandParams']['package_list'] = "[{\"name\":\"hbase_${stack_version}\",\"condition\":\"\",\"skipUpgrade\":false}]"
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "install",
config_dict = json_content,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
try_install=True,
os_type=('Redhat', '6.4', 'Final'),
checked_call_mocks = [(0, "OK.", "")],
available_packages_in_repos = ['hbase_2_3_0_1_1234'],
)
# only assert that the correct package is trying to be installed
self.assertResourceCalled('Package', 'hbase_2_3_0_1_1234',
retry_count=5,
retry_on_repo_unavailability=False)
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "configure",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertNoMoreResources()
def test_start_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "start",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf start master',
not_if = 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hbase/hbase-hbase-master.pid && ps -p `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
user = 'hbase'
)
self.assertNoMoreResources()
pass
def test_start_default_bucketcache(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "start",
config_file="default_with_bucket.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default(bucketcache_ioengine_as_file=True)
self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf start master',
not_if = 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hbase/hbase-hbase-master.pid && ps -p `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
user = 'hbase'
)
self.assertNoMoreResources()
pass
def test_stop_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "stop",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop master',
only_if = 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hbase/hbase-hbase-master.pid && ps -p `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
on_timeout = '! ( ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hbase/hbase-hbase-master.pid && ps -p `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1 ) || ambari-sudo.sh -H -E kill -9 `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid`',
timeout = 30,
user = 'hbase',
)
self.assertResourceCalled('File', '/var/run/hbase/hbase-hbase-master.pid',
action = ['delete'],
)
self.assertNoMoreResources()
def test_decom_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "decommission",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('File', '/usr/lib/hbase/bin/draining_servers.rb',
content = StaticFile('draining_servers.rb'),
mode = 0755,
)
self.assertResourceCalled('Execute', ' HBASE_OPTS="$HBASE_OPTS " /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb add host1',
logoutput = True,
user = 'hbase',
)
self.assertResourceCalled('Execute', ' HBASE_OPTS="$HBASE_OPTS " /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host1',
logoutput = True,
user = 'hbase',
)
self.assertResourceCalled('Execute', ' HBASE_OPTS="$HBASE_OPTS " /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb add host2',
logoutput = True,
user = 'hbase',
)
self.assertResourceCalled('Execute', ' HBASE_OPTS="$HBASE_OPTS " /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host2',
logoutput = True,
user = 'hbase',
)
self.assertNoMoreResources()
def test_decom_default_draining_only(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "decommission",
config_file="default.hbasedecom.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('File', '/usr/lib/hbase/bin/draining_servers.rb',
content = StaticFile('draining_servers.rb'),
mode = 0755,
)
self.assertResourceCalled('Execute', ' HBASE_OPTS="$HBASE_OPTS " /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb remove host1',
logoutput = True,
user = 'hbase',
)
self.assertNoMoreResources()
def test_configure_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "configure",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
self.assertNoMoreResources()
def test_start_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "start",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf start master',
not_if = 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hbase/hbase-hbase-master.pid && ps -p `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
user = 'hbase',
)
self.assertNoMoreResources()
def test_stop_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "stop",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', '/usr/lib/hbase/bin/hbase-daemon.sh --config /etc/hbase/conf stop master',
only_if = 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hbase/hbase-hbase-master.pid && ps -p `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
on_timeout = '! ( ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hbase/hbase-hbase-master.pid && ps -p `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1 ) || ambari-sudo.sh -H -E kill -9 `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid`',
timeout = 30,
user = 'hbase',
)
self.assertResourceCalled('File', '/var/run/hbase/hbase-hbase-master.pid',
action = ['delete'],
)
self.assertNoMoreResources()
def test_decom_secure(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "decommission",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('File', '/usr/lib/hbase/bin/draining_servers.rb',
content = StaticFile('draining_servers.rb'),
mode = 0755,
)
self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.service.keytab hbase/[email protected]; HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config=/etc/hbase/conf/hbase_master_jaas.conf" /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/draining_servers.rb add host1',
logoutput = True,
user = 'hbase',
)
self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/hbase.service.keytab hbase/[email protected]; HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config=/etc/hbase/conf/hbase_master_jaas.conf" /usr/lib/hbase/bin/hbase --config /etc/hbase/conf org.jruby.Main /usr/lib/hbase/bin/region_mover.rb unload host1',
logoutput = True,
user = 'hbase',
)
self.assertNoMoreResources()
def assert_configure_default(self, bucketcache_ioengine_as_file=False):
self.assertResourceCalled('Directory', '/etc/hbase',
mode = 0755
)
self.assertResourceCalled('Directory', '/etc/hbase/conf',
owner = 'hbase',
group = 'hadoop',
create_parents = True,
)
self.assertResourceCalled('Directory', '/tmp',
create_parents = True,
mode = 0777
)
if bucketcache_ioengine_as_file:
self.assertResourceCalled('Directory', '/mnt/bucket',
create_parents = True,
owner = 'hbase',
group = 'hadoop',
mode = 0755
)
pass
self.assertResourceCalled('Directory', '/hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Execute', ('chmod', '1777', u'/hadoop'),
sudo = True,
)
self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/etc/hbase/conf',
configurations = self.getConfig()['configurations']['hbase-site'],
configuration_attributes = self.getConfig()['configurationAttributes']['hbase-site']
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/etc/hbase/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configurationAttributes']['core-site']
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/etc/hbase/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configurationAttributes']['hdfs-site']
)
self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
owner = 'hbase',
group = 'hadoop'
)
self.assertResourceCalled('File', '/etc/hbase/conf/hbase-env.sh',
owner = 'hbase',
content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
group = 'hadoop',
)
self.assertResourceCalled('Directory', '/etc/security/limits.d',
owner = 'root',
group = 'root',
create_parents = True,
)
self.assertResourceCalled('File', '/etc/security/limits.d/hbase.conf',
content = Template('hbase.conf.j2'),
owner = 'root',
group = 'root',
mode = 0644,
)
self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
owner = 'hbase',
template_tag = 'GANGLIA-MASTER',
)
self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
owner = 'hbase',
template_tag = None,
)
self.assertResourceCalled('Directory', '/var/run/hbase',
owner = 'hbase',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hbase',
owner = 'hbase',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('File',
'/etc/hbase/conf/log4j.properties',
mode=0644,
group='hadoop',
owner='hbase',
content=InlineTemplate('log4jproperties\nline2')
)
self.assertResourceCalled('HdfsResource', 'hdfs://c6401.ambari.apache.org:8020/apps/hbase/data',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
owner = 'hbase',
hadoop_conf_dir = '/etc/hadoop/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
)
self.assertResourceCalled('HdfsResource', '/apps/hbase/staging',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_conf_dir = '/etc/hadoop/conf',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
owner = 'hbase',
hadoop_bin_dir = '/usr/bin',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
mode = 0711,
)
self.assertResourceCalled('HdfsResource', None,
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/bin',
keytab = UnknownConfigurationMock(),
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name=UnknownConfigurationMock(), default_fs='hdfs://c6401.ambari.apache.org:8020',
hadoop_conf_dir = '/etc/hadoop/conf',
)
def assert_configure_secured(self):
self.assertResourceCalled('Directory', '/etc/hbase',
mode = 0755
)
self.assertResourceCalled('Directory', '/etc/hbase/conf',
owner = 'hbase',
group = 'hadoop',
create_parents = True,
)
self.assertResourceCalled('Directory', '/tmp',
create_parents = True,
mode = 0777
)
self.assertResourceCalled('Directory', '/hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Execute', ('chmod', '1777', u'/hadoop'),
sudo = True,
)
self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/etc/hbase/conf',
configurations = self.getConfig()['configurations']['hbase-site'],
configuration_attributes = self.getConfig()['configurationAttributes']['hbase-site']
)
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/etc/hbase/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configurationAttributes']['core-site']
)
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/etc/hbase/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configurationAttributes']['hdfs-site']
)
self.assertResourceCalled('File', '/etc/hbase/conf/hbase-policy.xml',
owner = 'hbase',
group = 'hadoop',
)
self.assertResourceCalled('File', '/etc/hbase/conf/hbase-env.sh',
owner = 'hbase',
content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
group = 'hadoop',
)
self.assertResourceCalled('Directory', '/etc/security/limits.d',
owner = 'root',
group = 'root',
create_parents = True,
)
self.assertResourceCalled('File', '/etc/security/limits.d/hbase.conf',
content = Template('hbase.conf.j2'),
owner = 'root',
group = 'root',
mode = 0644,
)
self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hadoop-metrics2-hbase.properties',
owner = 'hbase',
template_tag = 'GANGLIA-MASTER',
)
self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/regionservers',
owner = 'hbase',
template_tag = None,
)
self.assertResourceCalled('TemplateConfig', '/etc/hbase/conf/hbase_master_jaas.conf',
owner = 'hbase',
template_tag = None,
)
self.assertResourceCalled('Directory', '/var/run/hbase',
owner = 'hbase',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hbase',
owner = 'hbase',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('File',
'/etc/hbase/conf/log4j.properties',
mode=0644,
group='hadoop',
owner='hbase',
content=InlineTemplate('log4jproperties\nline2')
)
self.assertResourceCalled('HdfsResource', 'hdfs://c6401.ambari.apache.org:8020/apps/hbase/data',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = True,
hadoop_bin_dir = '/usr/bin',
keytab = '/etc/security/keytabs/hdfs.headless.keytab',
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
owner = 'hbase',
hadoop_conf_dir = '/etc/hadoop/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
)
self.assertResourceCalled('HdfsResource', '/apps/hbase/staging',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = True,
hadoop_conf_dir = '/etc/hadoop/conf',
keytab = '/etc/security/keytabs/hdfs.headless.keytab',
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
owner = 'hbase',
hadoop_bin_dir = '/usr/bin',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
mode = 0711,
)
self.assertResourceCalled('HdfsResource', None,
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = True,
hadoop_bin_dir = '/usr/bin',
keytab = '/etc/security/keytabs/hdfs.headless.keytab',
kinit_path_local = '/usr/bin/kinit',
user = 'hdfs',
dfs_type = '',
action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore', hdfs_site=self.getConfig()['configurations']['hdfs-site'], principal_name='hdfs', default_fs='hdfs://c6401.ambari.apache.org:8020',
hadoop_conf_dir = '/etc/hadoop/conf',
)
def test_start_default_22(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "start",
config_file="hbase-2.2.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertResourceCalled('Directory', '/etc/hbase',
mode = 0755)
self.assertResourceCalled('Directory', '/usr/hdp/current/hbase-master/conf',
owner = 'hbase',
group = 'hadoop',
create_parents = True)
self.assertResourceCalled('Directory', '/tmp',
create_parents = True,
mode = 0777
)
self.assertResourceCalled('Directory', '/hadoop',
create_parents = True,
cd_access = 'a',
)
self.assertResourceCalled('Execute', ('chmod', '1777', u'/hadoop'),
sudo = True,
)
self.assertResourceCalled('XmlConfig', 'hbase-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hbase-master/conf',
configurations = self.getConfig()['configurations']['hbase-site'],
configuration_attributes = self.getConfig()['configurationAttributes']['hbase-site'])
self.assertResourceCalled('XmlConfig', 'core-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hbase-master/conf',
configurations = self.getConfig()['configurations']['core-site'],
configuration_attributes = self.getConfig()['configurationAttributes']['core-site'])
self.assertResourceCalled('XmlConfig', 'hdfs-site.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hbase-master/conf',
configurations = self.getConfig()['configurations']['hdfs-site'],
configuration_attributes = self.getConfig()['configurationAttributes']['hdfs-site'])
self.assertResourceCalled('XmlConfig', 'hbase-policy.xml',
owner = 'hbase',
group = 'hadoop',
conf_dir = '/usr/hdp/current/hbase-master/conf',
configurations = self.getConfig()['configurations']['hbase-policy'],
configuration_attributes = self.getConfig()['configurationAttributes']['hbase-policy'])
self.assertResourceCalled('File', '/usr/hdp/current/hbase-master/conf/hbase-env.sh',
owner = 'hbase',
content = InlineTemplate(self.getConfig()['configurations']['hbase-env']['content']),
group = 'hadoop'
)
self.assertResourceCalled('Directory', '/etc/security/limits.d',
owner = 'root',
group = 'root',
create_parents = True,
)
self.assertResourceCalled('File', '/etc/security/limits.d/hbase.conf',
content = Template('hbase.conf.j2'),
owner = 'root',
group = 'root',
mode = 0644,
)
self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-master/conf/hadoop-metrics2-hbase.properties',
owner = 'hbase',
template_tag = 'GANGLIA-MASTER')
self.assertResourceCalled('TemplateConfig', '/usr/hdp/current/hbase-master/conf/regionservers',
owner = 'hbase',
template_tag = None)
self.assertResourceCalled('Directory', '/var/run/hbase',
owner = 'hbase',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('Directory', '/var/log/hbase',
owner = 'hbase',
create_parents = True,
mode = 0755,
cd_access = 'a',
)
self.assertResourceCalled('File',
'/usr/hdp/current/hbase-master/conf/log4j.properties',
mode=0644,
group='hadoop',
owner='hbase',
content=InlineTemplate('log4jproperties\nline2'))
self.assertResourceCalled('HdfsResource', 'hdfs://nn1/apps/hbase/data',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://nn1',
hdfs_site = self.getConfig()['configurations']['hdfs-site'],
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
dfs_type = '',
owner = 'hbase',
hadoop_conf_dir = '/etc/hadoop/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
)
self.assertResourceCalled('HdfsResource', '/apps/hbase/staging',
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://nn1',
hdfs_site = self.getConfig()['configurations']['hdfs-site'],
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
dfs_type = '',
owner = 'hbase',
hadoop_conf_dir = '/etc/hadoop/conf',
type = 'directory',
action = ['create_on_execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
mode = 0711,
)
self.assertResourceCalled('HdfsResource', None,
immutable_paths = self.DEFAULT_IMMUTABLE_PATHS,
security_enabled = False,
hadoop_bin_dir = '/usr/hdp/current/hadoop-client/bin',
keytab = UnknownConfigurationMock(),
default_fs = 'hdfs://nn1',
hdfs_site = self.getConfig()['configurations']['hdfs-site'],
kinit_path_local = '/usr/bin/kinit',
principal_name = UnknownConfigurationMock(),
user = 'hdfs',
dfs_type = '',
action = ['execute'], hdfs_resource_ignore_file='/var/lib/ambari-agent/data/.hdfs_resource_ignore',
hadoop_conf_dir = '/etc/hadoop/conf',
)
self.assertResourceCalled('Execute', '/usr/hdp/current/hbase-master/bin/hbase-daemon.sh --config /usr/hdp/current/hbase-master/conf start master',
not_if = 'ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E test -f /var/run/hbase/hbase-hbase-master.pid && ps -p `ambari-sudo.sh [RMF_ENV_PLACEHOLDER] -H -E cat /var/run/hbase/hbase-hbase-master.pid` >/dev/null 2>&1',
user = 'hbase')
self.assertNoMoreResources()
def test_upgrade_backup(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_upgrade.py",
classname = "HbaseMasterUpgrade",
command = "take_snapshot",
config_file="hbase-preupgrade.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertResourceCalled('Execute', " echo 'snapshot_all' | /usr/hdp/current/hbase-client/bin/hbase shell",
user = 'hbase')
self.assertNoMoreResources()
@patch("resource_management.core.shell.call")
def test_pre_upgrade_restart(self, call_mock):
call_mock.side_effects = [(0, None), (0, None)]
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.2.1.0-3242'
json_content['commandParams']['version'] = version
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "pre_upgrade_restart",
config_dict = json_content,
config_overrides = self.CONFIG_OVERRIDES,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
mocks_dict = mocks_dict)
self.assertResourceCalled('Execute',
('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hbase-master', version), sudo=True,)
self.assertFalse(call_mock.called)
self.assertNoMoreResources()
@patch("resource_management.core.shell.call")
def test_upgrade_23(self, call_mock):
call_mock.side_effects = [(0, None), (0, None)]
config_file = self.get_src_folder()+"/test/python/stacks/2.0.6/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/hbase_master.py",
classname = "HbaseMaster",
command = "pre_upgrade_restart",
config_dict = json_content,
config_overrides = self.CONFIG_OVERRIDES,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
mocks_dict = mocks_dict)
self.assertResourceCalledIgnoreEarlier('Execute', ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'hbase-master', version), sudo=True)
| [
"[email protected]"
]
| |
fca514c7a4f0ee40a26053eec26098b988257080 | cbb0bd995f5ecb64f93a30d5f1dcd106e3241214 | /Scripts/torch_mnist_dense_import.py | 9286cc72ef2a624b2f8f95f57cb61117afb3ff79 | [
"BSD-3-Clause",
"BSD-2-Clause"
]
| permissive | data61/MP-SPDZ | 324010a4caaa403f64d769a276d58931e0ed274e | 5c26feece05e13387fc9bd2ef3f09b2735d6ea4b | refs/heads/master | 2023-08-10T01:25:33.653174 | 2023-08-09T02:13:34 | 2023-08-09T02:13:34 | 152,511,277 | 724 | 277 | NOASSERTION | 2023-07-21T04:43:18 | 2018-10-11T01:16:16 | C++ | UTF-8 | Python | false | false | 1,225 | py | #!/usr/bin/env python3
# test model output by torch_mnist_dense.mpc
import torchvision
import torch
import torch.nn as nn
import numpy
net = nn.Sequential(
nn.Flatten(),
nn.Linear(28 * 28, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, 10)
)
f = open('Player-Data/Binary-Output-P0-0')
state = net.state_dict()
for name in state:
shape = state[name].shape
size = numpy.prod(shape)
var = numpy.fromfile(f, 'double', count=size)
var = var.reshape(shape)
state[name] = torch.Tensor(var)
net.load_state_dict(state)
transform = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor()])
with torch.no_grad():
ds = torchvision.datasets.MNIST(root='/tmp', transform=transform,
train=False)
total = correct_classified = 0
for data in torch.utils.data.DataLoader(ds, batch_size=128):
inputs, labels = data
outputs = net(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct_classified += (predicted == labels).sum().item()
test_acc = (100 * correct_classified / total)
print('Test accuracy of the network: %.2f %%' % test_acc)
| [
"[email protected]"
]
| |
8baef651c1c9e325c36893dbe9d0a0e5a375842e | 9f69c4c61ca2a2082643f9316354826f6144e1f5 | /TAP2014/E.py | 7b156e5cad39797e8a1be50f512dfac1aa697d0f | []
| no_license | julianferres/Competitive-Programming | 668c22cf5174c57a2f3023178b1517cb25bdd583 | c3b0be4f796d1c0d755a61a6d2f3665c86cd8ca9 | refs/heads/master | 2022-03-23T05:56:53.790660 | 2022-03-18T14:29:33 | 2022-03-18T14:29:33 | 146,931,407 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | from collections import deque
A,N = map(int,input().split())
from math import inf
a = []
for i in range(A):
a.append(list(map(int,input().split()))[1:])
dist = {i:inf for i in range(1,N+1)}
v = {i:[] for i in range(1,N+1)}
lvisto = [False]*A
dist[1]=0
for i in range(A):
for j in a[i]:
v[j].append(i)
q = deque([])
q.append(1)
dist[1]=0
while(len(q)>0):
first = q.popleft()
for lista in v[first]:
if(lvisto[lista]==False):
lvisto[lista] = True
for i in a[lista]:
if(dist[i]==inf):
q.append(i)
dist[i] = min(dist[i],dist[first]+1)
D = 0
M = -inf
S = 0
for i in dist:
if(dist[i]<inf):
D+=1
S+=dist[i]
M = max(dist[i],M)
print(D,M,S) | [
"[email protected]"
]
| |
20be9797435272b97f65017a680b236f7429c7a7 | 49a85f8e50bf91c9c358f190226d17b0d035899e | /apps/messaging/migrations/0004_auto_20161127_1007.py | a758607211b3feffdf5d4e5dc536a97bf40174e6 | []
| no_license | hpatel1/chat | f70679c2b5dd5ca49e77096572859dc1b219d1da | e9abadf3021bee36d7f2255a79f4cd890c8ecf00 | refs/heads/master | 2020-06-16T20:09:46.264555 | 2016-11-29T12:37:36 | 2016-11-29T12:37:36 | 75,069,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-11-27 10:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('messaging', '0003_auto_20161127_1006'),
]
operations = [
migrations.AlterField(
model_name='chatroom',
name='name',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
| [
"[email protected]"
]
| |
f277ff241b24a7a2719c4f8c5fda3d66d1f2d97b | f0ee740667f840250c39412736d9fa8ec45261d8 | /squares.py | 2e766bd8978a70056c011a24938ca003d32a66cc | []
| no_license | MartynasTru/Cafe-Wall | 59e4eeacda4a54678352175549b1608d1c762840 | 940913b533b8ed3f3b01b8db9c40c3a47265a89a | refs/heads/main | 2023-04-21T08:03:20.691253 | 2021-04-29T23:25:44 | 2021-04-29T23:25:44 | 362,972,366 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,029 | py | #Student Number: 19019021
import sys
from ezgraphics import GraphicsWindow
WIDTH = 500
BOARD_DIMENSION = 10
SQUARE_DIMENSION = WIDTH / BOARD_DIMENSION
MORTAR_WIDTH = 2
MORTAR_COLOR = "grey"
def draw_cafe_wall(canvas, x, y, square_dimension, board_dimension):
"""
Draw cafe wall illusion.
Parameters
----------
canvas : GraphicsCanvas
A reference to the GraphicsCanvas contained in the window
x : int
x coordinate of the top-left corner of the row
y : int
y coordinate of the top-left corner of the row
square_dimension : int
side length in pixels of a square
board_dimension : int
number of squares in one side of the board
"""
for x in range (BOARD_DIMENSION+1):
for y in range (BOARD_DIMENSION):
if(y%2 == 0):
if(x%2 ==0):
canvas.setFill("white")
else:
if(x%2 ==1):
canvas.setFill("white")
if(y%2 == 1):
canvas.setOutline(MORTAR_COLOR)
canvas.setLineWidth(MORTAR_WIDTH)
canvas.drawRectangle(x*50, y*50, SQUARE_DIMENSION, SQUARE_DIMENSION)
else:
canvas.setOutline(MORTAR_COLOR)
canvas.setLineWidth(MORTAR_WIDTH)
#drawing on top of the remaining squares after shifting it 25 pixels back
canvas.drawRectangle(((x*50)-25), ((y*50)), SQUARE_DIMENSION, SQUARE_DIMENSION)
canvas.setFill("black")
pass
def draw_chequerboard(canvas, x, y, square_dimension, board_dimension,
chequers=False):
"""
Draw the board.
Parameters
----------
canvas : GraphicsCanvas
A reference to the GraphicsCanvas contained in the window
x : int
x coordinate of the top-left corner of the row
y : int
y coordinate of the top-left corner of the row
square_dimension : int
side length in pixels of a square
board_dimension : int
number of squares in a side of the board
chequers : boolean
Used to indicate whether the squares in every row should be
alternately black and white
"""
if(chequers == False):
for x in range (BOARD_DIMENSION):
for y in range (BOARD_DIMENSION):
if(x%2 == 0):
canvas.setFill("white")
canvas.drawRectangle(x*50, y*50, SQUARE_DIMENSION, SQUARE_DIMENSION)
canvas.setFill("black")
elif(chequers == True):
for x in range (BOARD_DIMENSION):
for y in range (BOARD_DIMENSION):
if(y%2 == 0):
if(x%2 ==0):
canvas.setFill("white")
else:
if(x%2 ==1):
canvas.setFill("white")
canvas.drawRectangle(x*50, y*50, SQUARE_DIMENSION, SQUARE_DIMENSION)
canvas.setFill("black")
pass
def usage_message():
"""
Return usage message for this program.
Returns
-------
str
Usage message
"""
return 'illusion.py: incorrect usage\n' \
+ 'Use:\n' \
+ '\tillusion.py -chequers or\n' \
+ '\tillusion.py -columns or\n' \
+ '\tillusion.py -cafe or\n' \
+ '\tillusion.py'
def check_args(args):
"""
Check the arguments passed to the program.
Parameters
----------
args : list
List of strings in argv containing the options passed to this program
Returns
-------
boolean
False if option is invalid, True if option is valid
"""
# Process the command line input
valid_option = False
if len(args) == 1:
valid_option = args[0] == "-chequers" or args[0] == "-cafe"\
or args[0] == "-columns"
elif len(args) == 0:
valid_option = True
return valid_option
def main():
args = sys.argv[1:]
okay = check_args(args)
option = "-columns"
if okay:
# Create a graphics window (WIDTH x WIDTH pixels):
win = GraphicsWindow(WIDTH, WIDTH)
# Access the canvas contained in the graphics window:
canvas = win.canvas()
if len(args) == 1:
option = args[0]
if option == "-columns":
draw_chequerboard(canvas, 0, 0, SQUARE_DIMENSION, BOARD_DIMENSION)
elif option == "-chequers":
draw_chequerboard(canvas, 0, 0, SQUARE_DIMENSION, BOARD_DIMENSION,
chequers=True)
else: # args[0] == "-cafe":
draw_cafe_wall(canvas, 0, 0, SQUARE_DIMENSION, BOARD_DIMENSION)
# Wait for the user to close the window
win.wait()
else:
print(usage_message())
exit(1)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
74decdc058c850ed5d987ffa614a9bccec3acc72 | 6060d22d07bbde9d317cbd5af063b9b5015e62c1 | /EELS_KK/pyfiles/bash_train_pyfiles/KK_one_pixel.py | f41cc579f734629a1a9bb620cc12286f88939712 | []
| no_license | krishna999/CBL-ML | a1d389ad70975d2a0fdd101e66a68ce42c2fc0fe | ff850d9820d5295c3bdff88feebbd635f502fb87 | refs/heads/master | 2023-06-28T09:30:55.639732 | 2021-07-15T15:44:53 | 2021-07-15T15:44:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,756 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 16 22:59:40 2021
@author: isabel
"""
import numpy as np
import sys
import os
from mpi4py import MPI
from scipy.optimize import curve_fit
from k_means_clustering import k_means
import matplotlib.pyplot as plt
from image_class_bs import Spectral_image, smooth_1D
def median(data):
return np.nanpercentile(data, 50, axis = 0)
def low(data):
return np.nanpercentile(data, 16, axis = 0)
def high(data):
return np.nanpercentile(data, 84, axis = 0)
def summary_distribution(data):
return[median(data), low(data), high(data)]
def bandgap(x, amp, BG,b):
result = np.zeros(x.shape)
result[x<BG] = 1
result[x>=BG] = amp * (x[x>=BG] - BG)**(b)
return result
def bandgap_b(x, amp, BG):
b = 1.5 #standard value according to ..... #TODO
result = np.zeros(x.shape)
result[x<BG] = 1
result[x>=BG] = amp * (x[x>=BG] - BG)**(b)
return result
def find_pixel_cordinates(idx, n_y):
y = int(idx%n_y)
x = int((idx-y)/n_y)
return [x,y]
"""
path_to_models = "/Users/isabel/Documents/Studie/MEP/CBL-ML/EELS_KK/pyfiles/bash_train_pyfiles/models/train_004"
# im = im
im = Spectral_image.load_data('../../dmfiles/h-ws2_eels-SI_004.dm4')
im.cluster(5)
#rank = 84
rank = 64
j = 80
im.load_ZLP_models_smefit(path_to_models=path_to_models, n_rep = 500)
ZLPs = im.calc_gen_ZLPs(rank,j,path_to_models=path_to_models, n_rep = 500)
n_model = len(im.ZLP_models)
epss, ts, S_Es, IEELSs = im.KK_pixel(rank, j)
E_cross = np.zeros(im.image_shape[1], dtype = 'object')
E_bands = np.zeros(n_model)
bs = np.zeros(n_model)
E_cross_pix = np.empty(0)
n_cross_pix = np.zeros(n_model)
for i in range(n_model):
IEELS = IEELSs[i]
popt, pcov = curve_fit(bandgap, im.deltaE[(im.deltaE>0.5) & (im.deltaE<3.7)], IEELS[(im.deltaE>0.5) & (im.deltaE<3.7)], p0 = [400,1.5,0.5], bounds=([0, 0.5, 0],np.inf))
E_bands[i] = popt[1]
bs[i] = popt[2]
crossing = np.concatenate((np.array([0]),(smooth_1D(np.real(epss[i]),50)[:-1]<0) * (smooth_1D(np.real(epss[i]),50)[1:] >=0)))
deltaE_n = im.deltaE[im.deltaE>0]
#deltaE_n = deltaE_n[50:-50]
crossing_E = deltaE_n[crossing.astype('bool')]
E_cross_pix = np.append(E_cross_pix, crossing_E)
n_cross_pix[i] = len(crossing_E)
n_cross_lim = round(np.nanpercentile(n_cross_pix, 90))
# if n_cross_lim > E_cross.shape[1]:
# E_cross_new = np.zeros((im.image_shape[1],n_cross_lim,3))
# E_cross_new[:,E_cross.shape[1],:] = E_cross
# E_cross = E_cross_new
# del E_cross_new
E_cross_pix_n, r = k_means(E_cross_pix, n_cross_lim)
E_cross_pix_n = np.zeros((n_cross_lim, 3))
for i in range(n_cross_lim):
E_cross_pix_n[i, :] = summary_distribution(E_cross_pix[r[i].astype(bool)])
# E_cross[j,i,:] = summary_distribution(E_cross_pix[r[i].astype(bool)])
# eps[j,:,:] = summary_distribution(epss)
# t[j,:] = summary_distribution(ts)
E_cross[j] = E_cross_pix_n
# n_cross[j,:] = summary_distribution(n_cross_pix)
# E_band[j] = summary_distribution(E_bands)
"""
path_to_models = "/Users/isabel/Documents/Studie/MEP/CBL-ML/EELS_KK/pyfiles/bash_train_pyfiles/models/train_004"
path_to_models = "/Users/isabel/Documents/Studie/MEP/CBL-ML/EELS_KK/pyfiles/bash_train_pyfiles/dE1/E1_05/"
path_to_models = "/Users/isabel/Documents/Studie/MEP/CBL-ML/EELS_KK/pyfiles/bash_train_pyfiles/models/dE2_8_times_dE1/train_004_not_pooled"
path_to_models = 'models/dE2_3_times_dE1/train_004_pooled_5_CI_1_dE1_times_07_epochs_1e6_scale_on_pooled_clu_log_10/'
path_to_models = 'models/dE2_3_times_dE1/train_lau_pooled_5_CI_1_dE1_times_07_epochs_1e6_scale_on_pooled_clu_log_5/'
path_to_models = 'models/dE_n10-inse_SI-003/E1_05'
path_to_models = 'models/report/004_clu10_p5_final_35dE1_06dE1/'
# path_to_models = "/Users/isabel/Documents/Studie/MEP/CBL-ML/EELS_KK/pyfiles/bash_train_pyfiles/models/train_lau_log"
# im = im
im = Spectral_image.load_data('../../dmfiles/h-ws2_eels-SI_004.dm4')
# im = Spectral_image.load_data('../../dmfiles/area03-eels-SI-aligned.dm4')
# im = Spectral_image.load_data('../../dmfiles/10n-dop-inse-B1_stem-eels-SI-processed_003.dm4')
name = ""
# name = "Lau's sample, "
[n_x, n_y] = im.image_shape
row = 50
j_b = 50
if row >= n_x:
sys.exit()
im.load_ZLP_models_smefit(path_to_models, name_in_path = False)
im.set_n(4.1462, n_background = 2.1759)
im.set_n(3.0)
im.e0 = 200 #keV
im.beta = 30#67.2 #mrad
#%%
try_pixels = [30,60,90,120]
ieels = np.zeros((len(try_pixels),3,im.l))
eps = (1+1j)*np.zeros((len(try_pixels),3, np.sum(im.deltaE>0)))
ss = np.zeros((len(try_pixels),3, np.sum(im.deltaE>0)))
ssratio = np.zeros((len(try_pixels),3))
t = np.zeros((len(try_pixels),3))
E_cross = np.zeros(len(try_pixels), dtype = 'object')
n_cross = np.zeros((len(try_pixels),3))
max_ieels = np.zeros((len(try_pixels),3))
E_band_08 = np.zeros((len(try_pixels),3))
A_08 = np.zeros((len(try_pixels),3))
E_band_12 = np.zeros((len(try_pixels),3))
A_12 = np.zeros((len(try_pixels),3))
E_band_02 = np.zeros((len(try_pixels),3))
A_02 = np.zeros((len(try_pixels),3))
E_band_04 = np.zeros((len(try_pixels),3))
A_04 = np.zeros((len(try_pixels),3))
E_band_06 = np.zeros((len(try_pixels),3))
A_06 = np.zeros((len(try_pixels),3))
n_model = len(im.ZLP_models)
n_fails = 0
#%%
dia_pooled = 5
im.pool(dia_pooled)
#%%
for j in range(len(try_pixels)):# n_y):
col = try_pixels[j]
[ts, IEELSs, max_ieelss], [epss, ts_p, S_ss_p, IEELSs_p, max_ieels_p] = im.KK_pixel(col,row, signal = "pooled")
n_model = len(IEELSs_p)
E_bands_08 = np.zeros(n_model)
E_bands_12 = np.zeros(n_model)
E_bands_02 = np.zeros(n_model)
E_bands_04 = np.zeros(n_model)
E_bands_06 = np.zeros(n_model)
As_08 = np.zeros(n_model)
As_12 = np.zeros(n_model)
As_02 = np.zeros(n_model)
As_04 = np.zeros(n_model)
As_06 = np.zeros(n_model)
E_cross_pix = np.empty(0)
n_cross_pix = np.zeros(n_model)
for i in range(n_model):
IEELS = IEELSs_p[i]
boundery_onset = np.max(IEELS)/30
first_not_onset = np.min(np.argwhere(IEELS>boundery_onset))
range2 = im.deltaE[first_not_onset]
if i%100 == 0:
print("pixel:",j, ", model:", i, ", boundery onset:", boundery_onset,", range2:", range2)
try:
cluster = im.clustered[col,row]
dE1 = im.dE1[1,int(cluster)]
range1 = dE1-1
# range2 = dE1+0.8
baseline = np.average(IEELS[(im.deltaE>range1 -0.1) & (im.deltaE<range1)])
popt, pcov = curve_fit(bandgap_b, im.deltaE[(im.deltaE>range1) & (im.deltaE<range2)], IEELS[(im.deltaE>range1) & (im.deltaE<range2)]-baseline, p0 = [400,range2-0.3], bounds=([0, 0.5],np.inf))
E_bands_08[i] = popt[1]
As_08[i] = popt[0]
except:
n_fails += 1
# try:
# cluster = im.clustered[col,row]
# dE1 = im.dE1[1,int(cluster)]
# range1 = dE1-1
# range2 = dE1+1.2
# baseline = np.average(IEELS[(im.deltaE>range1 -0.1) & (im.deltaE<range1)])
# popt, pcov = curve_fit(bandgap_b, im.deltaE[(im.deltaE>range1) & (im.deltaE<range2)], IEELS[(im.deltaE>range1) & (im.deltaE<range2)]-baseline, p0 = [400,1.5], bounds=([0, 0.5],np.inf))
# E_bands_12[i] = popt[1]
# As_12[i] = popt[0]
# except:
# n_fails += 1
# try:
# cluster = im.clustered[col,row]
# dE1 = im.dE1[1,int(cluster)]
# range1 = dE1-1
# range2 = dE1+0.2
# baseline = np.average(IEELS[(im.deltaE>range1 -0.1) & (im.deltaE<range1)])
# popt, pcov = curve_fit(bandgap_b, im.deltaE[(im.deltaE>range1) & (im.deltaE<range2)], IEELS[(im.deltaE>range1) & (im.deltaE<range2)]-baseline, p0 = [400,1.5], bounds=([0, 0.5],np.inf))
# As_02[i] = popt[0]
# E_bands_02[i] = popt[1]
# except:
# n_fails += 1
# try:
# cluster = im.clustered[col,row]
# dE1 = im.dE1[1,int(cluster)]
# range1 = dE1-1
# range2 = dE1+0.4
# baseline = np.average(IEELS[(im.deltaE>range1 -0.1) & (im.deltaE<range1)])
# popt, pcov = curve_fit(bandgap_b, im.deltaE[(im.deltaE>range1) & (im.deltaE<range2)], IEELS[(im.deltaE>range1) & (im.deltaE<range2)]-baseline, p0 = [400,1.5], bounds=([0, 0.5],np.inf))
# As_04[i] = popt[0]
# E_bands_04[i] = popt[1]
# except:
# n_fails += 1
# try:
# cluster = im.clustered[col,row]
# dE1 = im.dE1[1,int(cluster)]
# range1 = dE1-1
# range2 = dE1+0.6
# baseline = np.average(IEELS[(im.deltaE>range1 -0.1) & (im.deltaE<range1)])
# popt, pcov = curve_fit(bandgap_b, im.deltaE[(im.deltaE>range1) & (im.deltaE<range2)], IEELS[(im.deltaE>range1) & (im.deltaE<range2)]-baseline, p0 = [400,1.5], bounds=([0, 0.5],np.inf))
# As_06[i] = popt[0]
# E_bands_06[i] = popt[1]
# except:
# n_fails += 1
crossing = np.concatenate((np.array([0]),(smooth_1D(np.real(epss[i]),50)[:-1]<0) * (smooth_1D(np.real(epss[i]),50)[1:] >=0)))
deltaE_n = im.deltaE[im.deltaE>0]
crossing_E = deltaE_n[crossing.astype('bool')]
E_cross_pix = np.append(E_cross_pix, crossing_E)
n_cross_pix[i] = len(crossing_E)
n_cross_lim = round(np.nanpercentile(n_cross_pix, 90))
if n_cross_lim > 0:
E_cross_pix_n, r = k_means(E_cross_pix, n_cross_lim)
E_cross_pix_n = np.zeros((n_cross_lim, 3))
for i in range(n_cross_lim):
E_cross_pix_n[i, :] = summary_distribution(E_cross_pix[r[i].astype(bool)])
else:
E_cross_pix_n = np.zeros((0))
ieelssums = np.sum(IEELSs_p[:,im.deltaE>0], axis = 1)
sssums = np.sum(S_ss_p, axis = 1)
s_ratios = sssums/ieelssums
ieels[j,:,:] = summary_distribution(IEELSs_p)
ss[j,:,:] = summary_distribution(S_ss_p)
ssratio[j,:] = summary_distribution(s_ratios)
eps[j,:,:] = summary_distribution(epss)
t[j,:] = summary_distribution(ts)
E_cross[j] = E_cross_pix_n
n_cross[j,:] = summary_distribution(n_cross_pix)
max_ieels[j,:] = summary_distribution(max_ieelss)
E_band_08[j,:] = summary_distribution(E_bands_08)
A_08[j,:] = summary_distribution(As_08)
E_band_12[j,:] = summary_distribution(E_bands_12)
A_12[j,:] = summary_distribution(As_12)
E_band_02[j,:] = summary_distribution(E_bands_02)
A_02[j,:] = summary_distribution(As_02)
E_band_04[j,:] = summary_distribution(E_bands_04)
A_04[j,:] = summary_distribution(As_04)
E_band_06[j,:] = summary_distribution(E_bands_06)
A_06[j,:] = summary_distribution(As_06)
#%%
CI_E_band08 = (E_band_08[:,2]-E_band_08[:,1])/E_band_08[:,0]
CI_E_band12 = (E_band_12[:,2]-E_band_12[:,1])/E_band_12[:,0]
CI_E_band02 = (E_band_02[:,2]-E_band_02[:,1])/E_band_02[:,0]
CI_E_band04 = (E_band_04[:,2]-E_band_04[:,1])/E_band_04[:,0]
CI_E_band06 = (E_band_06[:,2]-E_band_06[:,1])/E_band_06[:,0]
E_bands = np.vstack((E_band_08[:,0],E_band_12[:,0],E_band_02[:,0],E_band_04[:,0],E_band_06[:,0]))
As = np.vstack((A_08[:,0],A_12[:,0],A_02[:,0],A_04[:,0],A_06[:,0]))
CI_E_band = np.vstack((CI_E_band08, CI_E_band12, CI_E_band02, CI_E_band04, CI_E_band06 ))
plt.figure()
plt.title(name + "relative CI E_band, dia_pool = " +str(dia_pooled))
plt.imshow(CI_E_band)
plt.yticks([0,1,2,3,4],[0.8,1.2,0.2,0.4,0.6])
plt.xticks([0,1,2],["[64,30]","[64,60]","[64,90]"])
plt.xticks([0,1,2,3],["[64,0]","[64,30]","[64,60]","[64,90]"])
plt.ylabel("upperlimit fittingrange = dE1 + ... [eV]")
plt.xlabel("pixel")
plt.colorbar()
#%%
plt.figure()
plt.title(name + "relative CI E_band, capped at 1, dia_pool = " +str(dia_pooled))
plt.imshow(np.minimum(CI_E_band,1))
plt.yticks([0,1,2,3,4],[0.8,1.2,0.2,0.4,0.6])
plt.xticks([0,1,2],["[64,30]","[64,60]","[64,90]"])
plt.xticks([0,1,2,3],["[64,0]","[64,30]","[64,60]","[64,90]"])
plt.ylabel("upperlimit fittingrange = dE1 + ... [eV]")
plt.xlabel("pixel")
plt.colorbar()
#%%
plt.figure()
plt.title(name + "E_band, dia_pool = " +str(dia_pooled))
plt.imshow(E_bands)
plt.yticks([0,1,2,3,4],[0.8,1.2,0.2,0.4,0.6])
plt.xticks([0,1,2],["[64,30]","[64,60]","[64,90]"])
plt.xticks([0,1,2,3],["[64,0]","[64,30]","[64,60]","[64,90]"])
plt.ylabel("upperlimit fittingrange = dE1 + ... [eV]")
plt.xlabel("pixel")
plt.colorbar()
plt.figure()
plt.title(name + "E_band, dia_pool = " +str(dia_pooled) + ", capped at 3")
plt.imshow((np.minimum(E_bands, 3)))
plt.yticks([0,1,2,3,4],[0.8,1.2,0.2,0.4,0.6])
plt.xticks([0,1,2],["[64,30]","[64,60]","[64,90]"])
plt.xticks([0,1,2,3],["[64,0]","[64,30]","[64,60]","[64,90]"])
plt.ylabel("upperlimit fittingrange = dE1 + ... [eV]")
plt.xlabel("pixel")
plt.colorbar()
#%%
ranges = [0.8,1.2,0.2,0.4,0.6]
for j in range(len(try_pixels)):
cluster = im.clustered[try_pixels[j],row]
xmin = im.dE1[1,cluster] - 1
xmax = im.dE1[1,cluster] + 3.5
baseline = np.average(ieels[j,0,(im.deltaE>xmin -0.1) & (im.deltaE<xmin)])
ymax = np.max(ieels[j,0,(im.deltaE>xmin)&(im.deltaE<xmax)]) + 100
x_values = im.deltaE[(im.deltaE>xmin)&(im.deltaE<xmax)]
plt.figure()
plt.title(name + "bandgap fits pixel [64," + str(try_pixels[j]) + "], pooled with diameter " + str(dia_pooled))
plt.plot(x_values, ieels[j,0,(im.deltaE>xmin)&(im.deltaE<xmax)]-baseline, lw = 1.5)
plt.fill_between(x_values, ieels[j,1,(im.deltaE>xmin)&(im.deltaE<xmax)] - baseline, ieels[j,2,(im.deltaE>xmin)&(im.deltaE<xmax)]- baseline, alpha=0.3)
plt.vlines(im.dE1[1,cluster],0,500, linestyle = '--', color= 'black', label = 'dE1')
for i in range(len(E_bands)):
plt.plot(x_values, bandgap_b(x_values, As[i,j], E_bands[i,j]), label = str(ranges[i]))
plt.legend()
plt.ylim(-50,ymax)
#%% DEEL TWEE: met b fitten
E_band_08 = np.zeros((len(try_pixels),3))
b_08 = np.zeros((len(try_pixels),3))
A_08 = np.zeros((len(try_pixels),3))
E_band_12 = np.zeros((len(try_pixels),3))
b_12 = np.zeros((len(try_pixels),3))
A_12 = np.zeros((len(try_pixels),3))
E_band_02 = np.zeros((len(try_pixels),3))
b_02 = np.zeros((len(try_pixels),3))
A_02 = np.zeros((len(try_pixels),3))
E_band_04 = np.zeros((len(try_pixels),3))
b_04 = np.zeros((len(try_pixels),3))
A_04 = np.zeros((len(try_pixels),3))
E_band_06 = np.zeros((len(try_pixels),3))
b_06 = np.zeros((len(try_pixels),3))
A_06 = np.zeros((len(try_pixels),3))
for j in range(len(try_pixels)):# n_y):
# epss, ts, S_Es, IEELSs = im.KK_pixel(row, j, signal = "pooled")
col = try_pixels[j]
[ts, IEELSs, max_ieelss], [epss, ts_p, S_ss_p, IEELSs_p, max_ieels_p] = im.KK_pixel(col,row, signal = "pooled", iterations = 5)
n_model = len(IEELSs_p)
E_bands_08 = np.zeros(n_model)
E_bands_12 = np.zeros(n_model)
E_bands_02 = np.zeros(n_model)
E_bands_04 = np.zeros(n_model)
E_bands_06 = np.zeros(n_model)
bs_08 = np.zeros(n_model)
bs_12 = np.zeros(n_model)
bs_02 = np.zeros(n_model)
bs_04 = np.zeros(n_model)
bs_06 = np.zeros(n_model)
As_08 = np.zeros(n_model)
As_12 = np.zeros(n_model)
As_02 = np.zeros(n_model)
As_04 = np.zeros(n_model)
As_06 = np.zeros(n_model)
E_cross_pix = np.empty(0)
n_cross_pix = np.zeros(n_model)
for i in range(n_model):
IEELS = IEELSs_p[i]
try:
cluster = im.clustered[col,row]
dE1 = im.dE1[1,int(cluster)]
range1 = dE1-1
range2 = dE1+0.8
baseline = np.average(IEELS[(im.deltaE>range1 -0.1) & (im.deltaE<range1)])
popt, pcov = curve_fit(bandgap, im.deltaE[(im.deltaE>range1) & (im.deltaE<range2)], IEELS[(im.deltaE>range1) & (im.deltaE<range2)]-baseline, p0 = [400,1.5,0.5], bounds=([0, 0.5, 0],np.inf))
E_bands_08[i] = popt[1]
bs_08[i] = popt[2]
# popt, pcov = curve_fit(bandgap_b, im.deltaE[(im.deltaE>range1) & (im.deltaE<range2)], IEELS[(im.deltaE>range1) & (im.deltaE<range2)]-baseline, p0 = [400,1.5], bounds=([0, 0.5],np.inf))
# E_bands_08[i] = popt[1]
As_08[i] = popt[0]
except:
n_fails += 1
try:
cluster = im.clustered[col,row]
dE1 = im.dE1[1,int(cluster)]
range1 = dE1-1
range2 = dE1+1.2
baseline = np.average(IEELS[(im.deltaE>range1 -0.1) & (im.deltaE<range1)])
popt, pcov = curve_fit(bandgap, im.deltaE[(im.deltaE>range1) & (im.deltaE<range2)], IEELS[(im.deltaE>range1) & (im.deltaE<range2)]-baseline, p0 = [400,1.5,0.5], bounds=([0, 0.5, 0],np.inf))
E_bands_12[i] = popt[1]
bs_12[i] = popt[2]
# popt, pcov = curve_fit(bandgap_b, im.deltaE[(im.deltaE>range1) & (im.deltaE<range2)], IEELS[(im.deltaE>range1) & (im.deltaE<range2)]-baseline, p0 = [400,1.5], bounds=([0, 0.5],np.inf))
# E_bands_12[i] = popt[1]
As_12[i] = popt[0]
except:
n_fails += 1
try:
cluster = im.clustered[col,row]
dE1 = im.dE1[1,int(cluster)]
range1 = dE1-1
range2 = dE1+0.2
baseline = np.average(IEELS[(im.deltaE>range1 -0.1) & (im.deltaE<range1)])
popt, pcov = curve_fit(bandgap, im.deltaE[(im.deltaE>range1) & (im.deltaE<range2)], IEELS[(im.deltaE>range1) & (im.deltaE<range2)]-baseline, p0 = [400,1.5,0.5], bounds=([0, 0.5, 0],np.inf))
E_bands_02[i] = popt[1]
bs_02[i] = popt[2]
# popt, pcov = curve_fit(bandgap_b, im.deltaE[(im.deltaE>range1) & (im.deltaE<range2)], IEELS[(im.deltaE>range1) & (im.deltaE<range2)]-baseline, p0 = [400,1.5], bounds=([0, 0.5],np.inf))
As_02[i] = popt[0]
E_bands_02[i] = popt[1]
except:
n_fails += 1
try:
cluster = im.clustered[col,row]
dE1 = im.dE1[1,int(cluster)]
range1 = dE1-1
range2 = dE1+0.4
baseline = np.average(IEELS[(im.deltaE>range1 -0.1) & (im.deltaE<range1)])
popt, pcov = curve_fit(bandgap, im.deltaE[(im.deltaE>range1) & (im.deltaE<range2)], IEELS[(im.deltaE>range1) & (im.deltaE<range2)]-baseline, p0 = [400,1.5,0.5], bounds=([0, 0.5, 0],np.inf))
E_bands_04[i] = popt[1]
bs_04[i] = popt[2]
As_04[i] = popt[0]
except:
n_fails += 1
try:
cluster = im.clustered[col,row]
dE1 = im.dE1[1,int(cluster)]
range1 = dE1-1
range2 = dE1+0.6
baseline = np.average(IEELS[(im.deltaE>range1 -0.1) & (im.deltaE<range1)])
popt, pcov = curve_fit(bandgap, im.deltaE[(im.deltaE>range1) & (im.deltaE<range2)], IEELS[(im.deltaE>range1) & (im.deltaE<range2)]-baseline, p0 = [400,1.5,0.5], bounds=([0, 0.5, 0],np.inf))
E_bands_06[i] = popt[1]
bs_06[i] = popt[2]
As_06[i] = popt[0]
E_bands_06[i] = popt[1]
except:
n_fails += 1
crossing = np.concatenate((np.array([0]),(smooth_1D(np.real(epss[i]),50)[:-1]<0) * (smooth_1D(np.real(epss[i]),50)[1:] >=0)))
deltaE_n = im.deltaE[im.deltaE>0]
crossing_E = deltaE_n[crossing.astype('bool')]
E_cross_pix = np.append(E_cross_pix, crossing_E)
n_cross_pix[i] = len(crossing_E)
n_cross_lim = round(np.nanpercentile(n_cross_pix, 90))
if n_cross_lim > 0:
E_cross_pix_n, r = k_means(E_cross_pix, n_cross_lim)
E_cross_pix_n = np.zeros((n_cross_lim, 3))
for i in range(n_cross_lim):
E_cross_pix_n[i, :] = summary_distribution(E_cross_pix[r[i].astype(bool)])
else:
E_cross_pix_n = np.zeros((0))
ieels[j,:,:] = summary_distribution(IEELSs_p)
eps[j,:,:] = summary_distribution(epss)
t[j,:] = summary_distribution(ts)
E_cross[j] = E_cross_pix_n
n_cross[j,:] = summary_distribution(n_cross_pix)
max_ieels[j,:] = summary_distribution(max_ieelss)
E_band_08[j,:] = summary_distribution(E_bands_08)
b_08[j,:] = summary_distribution(bs_08)
A_08[j,:] = summary_distribution(As_08)
E_band_12[j,:] = summary_distribution(E_bands_12)
b_12[j,:] = summary_distribution(bs_12)
A_12[j,:] = summary_distribution(As_12)
E_band_02[j,:] = summary_distribution(E_bands_02)
b_02[j,:] = summary_distribution(bs_02)
A_02[j,:] = summary_distribution(As_02)
E_band_04[j,:] = summary_distribution(E_bands_04)
b_04[j,:] = summary_distribution(bs_04)
A_04[j,:] = summary_distribution(As_04)
E_band_06[j,:] = summary_distribution(E_bands_06)
b_06[j,:] = summary_distribution(bs_06)
A_06[j,:] = summary_distribution(As_06)
#%%
CI_b08 = (b_08[:,2]-b_08[:,1])/b_08[:,0]
CI_b12 = (b_12[:,2]-b_12[:,1])/b_12[:,0]
CI_b02 = (b_02[:,2]-b_02[:,1])/b_02[:,0]
CI_b04 = (b_04[:,2]-b_04[:,1])/b_04[:,0]
CI_b06 = (b_06[:,2]-b_06[:,1])/b_06[:,0]
CI_E_band08 = (E_band_08[:,2]-E_band_08[:,1])/E_band_08[:,0]
CI_E_band12 = (E_band_12[:,2]-E_band_12[:,1])/E_band_12[:,0]
CI_E_band02 = (E_band_02[:,2]-E_band_02[:,1])/E_band_02[:,0]
CI_E_band04 = (E_band_04[:,2]-E_band_04[:,1])/E_band_04[:,0]
CI_E_band06 = (E_band_06[:,2]-E_band_06[:,1])/E_band_06[:,0]
bs = np.vstack((b_08[:,0],b_12[:,0],b_02[:,0],b_04[:,0],b_06[:,0]))
E_bands = np.vstack((E_band_08[:,0],E_band_12[:,0],E_band_02[:,0],E_band_04[:,0],E_band_06[:,0]))
As = np.vstack((A_08[:,0],A_12[:,0],A_02[:,0],A_04[:,0],A_06[:,0]))
CI_b = np.vstack((CI_b08, CI_b12, CI_b02, CI_b04, CI_b06 ))
CI_E_band = np.vstack((CI_E_band08, CI_E_band12, CI_E_band02, CI_E_band04, CI_E_band06 ))
plt.figure()
plt.title(name + "relative CI b, dia_pool = " +str(dia_pooled))
plt.imshow(CI_b)
plt.yticks([0,1,2,3,4],[0.8,1.2,0.2,0.4,0.6])
plt.xticks([0,1,2],["[64,30]","[64,60]","[64,90]"])
plt.xticks([0,1,2,3],["[64,0]","[64,30]","[64,60]","[64,90]"])
plt.ylabel("upperlimit fittingrange = dE1 + ... [eV]")
plt.xlabel("pixel")
plt.colorbar()
plt.figure()
plt.title(name + "relative CI E_band, dia_pool = " +str(dia_pooled))
plt.imshow(CI_E_band)
plt.yticks([0,1,2,3,4],[0.8,1.2,0.2,0.4,0.6])
plt.xticks([0,1,2],["[64,30]","[64,60]","[64,90]"])
plt.xticks([0,1,2,3],["[64,0]","[64,30]","[64,60]","[64,90]"])
plt.ylabel("upperlimit fittingrange = dE1 + ... [eV]")
plt.xlabel("pixel")
plt.colorbar()
#%%
plt.figure()
plt.title(name + "relative CI b, capped at 1, dia_pool = " +str(dia_pooled))
plt.imshow(np.minimum(CI_b,1))
plt.yticks([0,1,2,3,4],[0.8,1.2,0.2,0.4,0.6])
plt.xticks([0,1,2],["[64,30]","[64,60]","[64,90]"])
plt.xticks([0,1,2,3],["[64,0]","[64,30]","[64,60]","[64,90]"])
plt.ylabel("upperlimit fittingrange = dE1 + ... [eV]")
plt.xlabel("pixel")
plt.colorbar()
plt.figure()
plt.title(name + "relative CI E_band, capped at 1, dia_pool = " +str(dia_pooled))
plt.imshow(np.minimum(CI_E_band,1))
plt.yticks([0,1,2,3,4],[0.8,1.2,0.2,0.4,0.6])
plt.xticks([0,1,2],["[64,30]","[64,60]","[64,90]"])
plt.xticks([0,1,2,3],["[64,0]","[64,30]","[64,60]","[64,90]"])
plt.ylabel("upperlimit fittingrange = dE1 + ... [eV]")
plt.xlabel("pixel")
plt.colorbar()
#%%
plt.figure()
plt.title(name + "b, dia_pool = " +str(dia_pooled))
plt.imshow(bs)
plt.yticks([0,1,2,3,4],[0.8,1.2,0.2,0.4,0.6])
plt.xticks([0,1,2],["[64,30]","[64,60]","[64,90]"])
plt.xticks([0,1,2,3],["[64,0]","[64,30]","[64,60]","[64,90]"])
plt.ylabel("upperlimit fittingrange = dE1 + ... [eV]")
plt.xlabel("pixel")
plt.colorbar()
plt.figure()
plt.title(name + "b, dia_pool = " +str(dia_pooled) + ", capped at 1")
plt.imshow((np.minimum(bs,1)))
plt.yticks([0,1,2,3,4],[0.8,1.2,0.2,0.4,0.6])
plt.xticks([0,1,2],["[64,30]","[64,60]","[64,90]"])
plt.xticks([0,1,2,3],["[64,0]","[64,30]","[64,60]","[64,90]"])
plt.ylabel("upperlimit fittingrange = dE1 + ... [eV]")
plt.xlabel("pixel")
plt.colorbar()
plt.figure()
plt.title(name + "E_band, dia_pool = " +str(dia_pooled) + ", capped at 3")
plt.imshow(E_bands)
plt.yticks([0,1,2,3,4],[0.8,1.2,0.2,0.4,0.6])
plt.xticks([0,1,2],["[64,30]","[64,60]","[64,90]"])
plt.xticks([0,1,2,3],["[64,0]","[64,30]","[64,60]","[64,90]"])
plt.ylabel("upperlimit fittingrange = dE1 + ... [eV]")
plt.xlabel("pixel")
plt.colorbar()
plt.figure()
plt.title(name + "E_band, dia_pool = " +str(dia_pooled) + ", capped at 3")
plt.imshow((np.minimum(E_bands, 3)))
plt.yticks([0,1,2,3,4],[0.8,1.2,0.2,0.4,0.6])
plt.xticks([0,1,2],["[64,30]","[64,60]","[64,90]"])
plt.xticks([0,1,2,3],["[64,0]","[64,30]","[64,60]","[64,90]"])
plt.ylabel("upperlimit fittingrange = dE1 + ... [eV]")
plt.xlabel("pixel")
plt.colorbar()
#%%
ranges = [0.8,1.2,0.2,0.4,0.6]
for j in range(len(try_pixels)):
cluster = im.clustered[try_pixels[j],row]
xmin = im.dE1[1,cluster] - 1
xmax = im.dE1[1,cluster] + 3.5
baseline = np.average(ieels[j,0,(im.deltaE>xmin -0.1) & (im.deltaE<xmin)])
ymax = np.max(ieels[j,0,(im.deltaE>xmin)&(im.deltaE<xmax)]) + 100
x_values = im.deltaE[(im.deltaE>xmin)&(im.deltaE<xmax)]
plt.figure()
plt.title(name + "bandgap fits pixel [64," + str(try_pixels[j]) + "], pooled with diameter " + str(dia_pooled))
plt.plot(x_values, ieels[j,0,(im.deltaE>xmin)&(im.deltaE<xmax)]-baseline, lw = 1.5)
plt.fill_between(x_values, ieels[j,1,(im.deltaE>xmin)&(im.deltaE<xmax)] - baseline, ieels[j,2,(im.deltaE>xmin)&(im.deltaE<xmax)]- baseline, alpha=0.3)
plt.vlines(im.dE1[1,cluster],0,500, linestyle = '--', color= 'black', label = 'dE1')
for i in range(len(E_bands)):
plt.plot(x_values, bandgap(x_values, As[i,j], E_bands[i,j], bs[i,j]), label = str(ranges[i]))
# plt.plot(x_values, bandgap_b(x_values, As[i,j], E_bands[i,j]), label = str(ranges[i]))
plt.legend()
plt.ylim(-50,ymax)
| [
"[email protected]"
]
| |
7cb3d636c58d86cdc2cefe708b06c997c54f9e12 | a37e93d23ea659bafaac8034dc7aeed65bc5eb38 | /stock_landed_costs_extend/models/stock_landed_cost.py | d1a3f0aeec36ad5dd0efc875329cccc986d2f7da | []
| no_license | kit9/sct-ecommerce | 71831541264809715b31d802d260474975d04536 | 6c440a514eac8101474b6720e409a81c58bd3b7c | refs/heads/master | 2022-10-03T23:35:31.563670 | 2020-06-09T12:35:06 | 2020-06-09T12:35:06 | 271,020,689 | 0 | 4 | null | 2020-06-09T14:10:49 | 2020-06-09T14:10:48 | null | UTF-8 | Python | false | false | 821 | py | #-*- coding: utf-8 -*-
from odoo import models, fields, api, _
class LandedCost(models.Model):
_inherit = 'stock.landed.cost'
def _default_account_journal_id(self):
"""Take the journal configured in the company, else fallback on the stock journal."""
lc_journal = self.env['account.journal']
ir_property = self.env['ir.property'].search([
('name', '=', 'property_stock_journal'),
('company_id', '=', self.env.user.company_id.id)
], limit=1)
if ir_property:
lc_journal = ir_property.get_by_record()
return lc_journal
account_journal_id = fields.Many2one(
'account.journal', 'Account Journal',
required=True, states={'done': [('readonly', True)]}, default=lambda self: self._default_account_journal_id())
| [
"[email protected]"
]
| |
be128babb87f429822e3dd3ac730543b9f9ae91c | d190750d6cb34e9d86ae96724cf4b56a2f57a74a | /tests/r/test_ant111b.py | 9d0bf1c1aea84feb40beba133832bb43a7e48cfa | [
"Apache-2.0"
]
| permissive | ROAD2018/observations | a119f61a48213d791de0620804adb8d21c2ad9fb | 2c8b1ac31025938cb17762e540f2f592e302d5de | refs/heads/master | 2021-09-24T04:28:02.725245 | 2018-09-16T23:06:30 | 2018-09-16T23:06:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.ant111b import ant111b
def test_ant111b():
"""Test module ant111b.py by downloading
ant111b.csv and testing shape of
extracted data has 32 rows and 9 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = ant111b(test_path)
try:
assert x_train.shape == (32, 9)
except:
shutil.rmtree(test_path)
raise()
| [
"[email protected]"
]
| |
3f0d512ffc2d5ae6e83bb3809a709242677010da | 3aae4f5187d4600e3fbe72d15e27ac7ea64c3608 | /hyranote/cmd_simple.py | a7ac073bed91cb7cba100d96ae548246942fb676 | [
"MIT"
]
| permissive | huuhoa/hyranote | 96335081569271ee61bb8dba7c8f8aab646feaf7 | bbd91256790a0fb19699761697d0121c93ffbe88 | refs/heads/main | 2023-07-27T13:08:07.074709 | 2021-09-07T23:35:14 | 2021-09-07T23:35:14 | 318,357,069 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 837 | py | import os
import plistlib
from hyranote.hyranote import SimpleGenerator
from hyranote.hutil import copy_resources
def simple_generate_contents(args):
input_dir = os.path.expanduser(args.input)
copy_resources(input_dir, os.path.join(args.output, 'images'))
with open(os.path.join(input_dir, 'contents.xml'), 'rb') as fp:
data = plistlib.load(fp)
canvas = data['canvas']
mind_maps = canvas['mindMaps']
for main_node in mind_maps:
generator = SimpleGenerator(main_node,
{
'input': args.input,
'output_dir': args.output,
'author': args.author,
'logging': args.verbose,
})
generator.generate()
| [
"[email protected]"
]
| |
3426a8908d8572f4ffd915d0b142d8a4369c5ece | a760e489c8b4d46e0af0c5e0711e0b6da38335f5 | /Examples/qfem-0010/src/postprocess.py | 310bbeaed870531639034205216610ad896ddb66 | [
"BSD-2-Clause-Views"
]
| permissive | NHERI-SimCenter/quoFEM | b042e30badad9551c89a5f355639891644e4487e | 18866db16a9d9025c73e90c5523087a9e1f3eccf | refs/heads/master | 2023-08-08T22:07:49.364185 | 2023-07-20T17:50:40 | 2023-07-20T17:50:40 | 102,532,135 | 10 | 26 | NOASSERTION | 2023-08-18T22:43:39 | 2017-09-05T21:37:34 | C++ | UTF-8 | Python | false | false | 2,045 | py | #!/usr/bin/python
# written: fmk 01/18
import sys
import re
#print 'Number of arguments:', len(sys.argv), 'arguments.'
#print 'Argument List:', str(sys.argv)
inputArgs = sys.argv
#outFile = open('results.out','w')
#
# process output file "SimCenterOut.txt" for nodal displacements
#
ndm = 0
with open ('SimCenterOut.txt', 'rt') as inFile:
line = inFile.readline()
notFound = None
while line and notFound == None:
line = inFile.readline()
notFound = re.search("Spatial Dimension of Mesh - - - - - :",line)
words = line.split()
ndm = int(words[len(words)-1])
notFound = None
while line and notFound == None:
line = inFile.readline()
notFound = re.search("N o d a l D i s p l a c e m e n t s",line)
line = inFile.readline()
line = inFile.readline()
line = inFile.readline()
line = inFile.readline()
displ = []
displ.append(line.split())
numNode = 1
while True:
line2 = inFile.readline()
words = line2.split()
if (len(words) != 5):
break;
displ.append(words)
numNode += 1
inFile.close
#
# now process the input args and write the results file
#
outFile = open('results.out','w')
#
# note for now assuming no ERROR in user data
#
for i in inputArgs[1:]:
theList=i.split('_')
if (theList[0] == 'Node'):
nodeTag = int(theList[1])
if (nodeTag > 0 and nodeTag <= numNode):
if (theList[2] == 'Disp'):
dof = int(theList[3])
numNodalDOF = len(displ[nodeTag-1])-1-ndm;
if (dof > 0 and dof <= numNodalDOF):
nodeDisp = displ[nodeTag-1]
nodeDisp = displ[nodeTag-1][ndm+dof]
outFile.write(nodeDisp)
outFile.write(' ')
else:
outFile.write('0. ')
else:
outFile.write('0. ')
else:
outFile.write('0. ')
else:
outFile.write('0. ')
outFile.close
| [
"[email protected]"
]
| |
d01afb3ac01213c93a2d43e8c810482b1db65f15 | 57dbccc0df6a999a9c081396a1c84d802a18008e | /src/utils/DubinsPath.py | 993762cc2a143e4b5039d6f9139e1e2541e2d237 | [
"MIT"
]
| permissive | the-jojo/SpiderBUG | 309cc6bddc9fd8b724606eca0171c4c90e068c15 | 6ed77b0896966e90afcaf695e68a546a1921f8f6 | refs/heads/master | 2023-03-08T22:33:19.313283 | 2021-03-06T15:12:39 | 2021-03-06T15:12:39 | 291,644,894 | 2 | 1 | null | 2020-09-15T18:55:38 | 2020-08-31T07:24:55 | Jupyter Notebook | UTF-8 | Python | false | false | 1,955 | py | import math
import dubins
from src.geom.Node import Node
from src.utils.config import default_config
def find_path(pos_0: Node, heading_0: float, pos_1: Node, pos_2: Node or None, turn_radius: float,
step_size=default_config['PATH_RES'], resolution=math.pi / 64):
"""Finds the shortest dubins path that passes through the first 2 path points"""
config_0 = (*pos_0.as_list_2d(), heading_0)
pos_2 = pos_2.as_list_2d() if pos_2 is not None else None
path_0, path_1 = dubins.shortest_paths_2(config_0, pos_1.as_list_2d(), pos_2, turn_radius, resolution)
points, t = path_0.sample_many(step_size)
if path_1 is not None:
points_, t_ = path_1.sample_many(step_size)
points = points + points_
t = t + t_
return points, t
def find_path_complete(pos_0: Node, heading_0: float, path_points: [Node], turn_radius: float,
step_size=default_config['PATH_RES']):
assert len(path_points) > 0
pos_1 = path_points[0]
pos_2 = path_points[1] if len(path_points) > 1 else None
points, _ = find_path(pos_0, heading_0, pos_1, pos_2, turn_radius, step_size)
points = [Node.from_tuple(p) for p in points]
for path_p in path_points[2:]:
points.append(path_p)
return points
def find_path_weight(pos_0: Node, heading_0: float, path_points: [Node], turn_radius: float, resolution=math.pi / 64):
config_0 = (*pos_0.as_list_2d(), heading_0)
assert len(path_points) > 0
pos_1 = path_points[0].as_list_2d()
pos_2 = path_points[1].as_list_2d() if len(path_points) > 1 else None
path_0, path_1 = dubins.shortest_paths_2(config_0, pos_1, pos_2, turn_radius, resolution)
length = path_0.path_length()
if path_1 is not None:
length += path_1.path_length()
if len(path_points) > 2:
p0 = path_points[2]
for p1 in path_points[3:]:
length += p0.dist_2d(p1)
p0 = p1
return length
| [
"[email protected]"
]
| |
a9157605e769380a311a8c4c02e792653fc2caf4 | 787ee36fa303f968fdf846fe0f385c7018a33f31 | /octavia/tests/unit/api/v2/types/test_pools.py | 066e958d1718648718fbc07af28d0d65c15454f9 | [
"Apache-2.0"
]
| permissive | sajuptpm/octavia | b68bb502ff07cb8f7ee6721a6b0325be6c764d52 | fde4ebe822072a79bb74497b504ca3f0a6a6518d | refs/heads/master | 2020-03-10T06:58:57.557469 | 2018-04-12T02:46:35 | 2018-04-12T02:46:35 | 129,251,833 | 0 | 0 | Apache-2.0 | 2018-04-12T12:59:35 | 2018-04-12T12:59:34 | null | UTF-8 | Python | false | false | 5,850 | py | # Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
from wsme import exc
from wsme.rest import json as wsme_json
from wsme import types as wsme_types
from octavia.api.v2.types import pool as pool_type
from octavia.common import constants
from octavia.tests.unit.api.common import base
class TestSessionPersistence(object):
_type = None
def test_session_persistence(self):
body = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE}
sp = wsme_json.fromjson(self._type, body)
self.assertIsNotNone(sp.type)
def test_invalid_type(self):
body = {"type": "source_ip"}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_invalid_cookie_name(self):
body = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE,
"cookie_name": 10}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
class TestPoolPOST(base.BaseTypesTest):
_type = pool_type.PoolPOST
def test_pool(self):
body = {
"loadbalancer_id": uuidutils.generate_uuid(),
"protocol": constants.PROTOCOL_HTTP,
"lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN}
pool = wsme_json.fromjson(self._type, body)
self.assertTrue(pool.admin_state_up)
def test_load_balancer_mandatory(self):
body = {"loadbalancer_id": uuidutils.generate_uuid()}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_protocol_mandatory(self):
body = {"lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_lb_algorithm_mandatory(self):
body = {"protocol": constants.PROTOCOL_HTTP}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_invalid_name(self):
body = {"name": 10,
"loadbalancer_id": uuidutils.generate_uuid(),
"protocol": constants.PROTOCOL_HTTP,
"lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_invalid_description(self):
body = {"description": 10,
"loadbalancer_id": uuidutils.generate_uuid(),
"protocol": constants.PROTOCOL_HTTP,
"lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_invalid_load_balacer_id(self):
body = {"loadbalancer_id": 10,
"protocol": constants.PROTOCOL_HTTP,
"lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_invalid_protocol(self):
body = {"loadbalancer_id": uuidutils.generate_uuid(),
"protocol": "http",
"lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_invalid_lb_algorithm(self):
body = {"loadbalancer_id": uuidutils.generate_uuid(),
"protocol": constants.PROTOCOL_HTTP,
"lb_algorithm": "source_ip"}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_non_uuid_project_id(self):
body = {"loadbalancer_id": uuidutils.generate_uuid(),
"protocol": constants.PROTOCOL_HTTP,
"lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN,
"project_id": "non-uuid"}
pool = wsme_json.fromjson(self._type, body)
self.assertEqual(pool.project_id, body['project_id'])
class TestPoolPUT(base.BaseTypesTest):
_type = pool_type.PoolPUT
def test_pool(self):
body = {"name": "test_name"}
pool = wsme_json.fromjson(self._type, body)
self.assertEqual(wsme_types.Unset, pool.admin_state_up)
def test_invalid_name(self):
body = {"name": 10}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_invalid_description(self):
body = {"description": 10}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
def test_invalid_lb_algorithm(self):
body = {"lb_algorithm": "source_ip"}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
class TestSessionPersistencePOST(base.BaseTypesTest, TestSessionPersistence):
_type = pool_type.SessionPersistencePOST
def test_type_mandatory(self):
body = {"cookie_name": "test_name"}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
class TestSessionPersistencePUT(base.BaseTypesTest, TestSessionPersistence):
_type = pool_type.SessionPersistencePUT
| [
"[email protected]"
]
| |
1bcf9cfae1a9ee36fa60cef6a7710ff6110e247d | f5b3f1cb6f4e360a096bda679cb05d39364d0930 | /maskrcnn_benchmark/modeling/roi_heads/relation_head/loss.py | 85ca0a2e5cf2e06c2c3425ba40d455c2d51b5831 | [
"Python-2.0",
"MIT"
]
| permissive | PeterZhouSZ/recovering-unbiased-scene-graphs | fa44b2ba4b27c3023a30489bf981dd6326e72fcd | 50807a1eadab588c99d7ce7b819748065398e6b6 | refs/heads/main | 2023-08-26T09:56:32.786965 | 2021-10-23T06:15:13 | 2021-10-23T06:15:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,806 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
import torch.nn as nn
from torch.nn import functional as F
import numpy as np
import numpy.random as npr
from maskrcnn_benchmark.layers import smooth_l1_loss, Label_Smoothing_Regression, CenterLoss
from maskrcnn_benchmark.modeling.box_coder import BoxCoder
from maskrcnn_benchmark.modeling.matcher import Matcher
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
from maskrcnn_benchmark.modeling.utils import cat, get_constr_out
import itertools
import pickle
import scipy, math
from scipy import optimize
def soft_cross_entropy_loss(x, target, reduction='mean'):
assert reduction in ['sum', 'mean']
logprobs = torch.nn.functional.log_softmax(x, dim = 1)
loss = -(target * logprobs).sum()
return loss if reduction == 'sum' else (loss / x.shape[0])
# for center loss
# https://github.com/louis-she/center-loss.pytorch/blob/master/loss.py
def compute_center_loss(features, centers, targets):
features = features.view(features.size(0), -1)
target_centers = centers[targets]
criterion = torch.nn.MSELoss()
center_loss = criterion(features, target_centers)
return center_loss
def weighted_mse_loss(input, target, weight=None, reduction='mean'):
if weight is None:
loss = torch.sum((input - target) ** 2)
else:
loss = torch.sum(weight * (input - target) ** 2)
return loss if reduction == 'sum' else (loss / input.shape[0])
def softXEnt(input, target):
logprobs = F.log_softmax(input, dim = 1)
return -(target * logprobs).sum() / input.shape[0]
class RelationLossComputation(object):
"""
Computes the loss for relation triplet.
Also supports FPN
"""
def __init__(
self,
attri_on,
num_attri_cat,
max_num_attri,
attribute_sampling,
attribute_bgfg_ratio,
use_label_smoothing,
predicate_proportion,
stl_train,
loss_relation_stl_alpha,
bg_soft_loss,
use_balanced_norm,
pcpl_center_loss,
center_loss_lambda,
num_classes,
feat_dim,
unbiased_training,
predicate_weights_path,
weight_factor,
use_gt_box,
balnorm_reweight_label_distrib,
balnorm_reweight_label_prob,
balnorm_reweight_inv_label_prob,
norm_predicate_counts_path,
bal_reweight_only,
balanced_norm_train_gx,
balanced_norm_learnable,
balanced_norm_normalized_probs,
balanced_norm_test_only,
balanced_norm_fixed,
balanced_norm_fixed_mseloss,
balanced_norm_as_soft_label,
balanced_norm_as_soft_label_sll,
balanced_norm_as_soft_label_mll,
train_avg_belief_to_one,
train_avg_belief_to_one_only_this_loss,
balanced_norm_use_bceloss,
balanced_norm_use_mseloss,
multi_label_training,
multi_label_norm_loss,
c_hmc_train,
debug,
):
"""
Arguments:
bbox_proposal_matcher (Matcher)
rel_fg_bg_sampler (RelationPositiveNegativeSampler)
"""
self.attri_on = attri_on
self.num_attri_cat = num_attri_cat
self.max_num_attri = max_num_attri
self.attribute_sampling = attribute_sampling
self.attribute_bgfg_ratio = attribute_bgfg_ratio
self.use_label_smoothing = use_label_smoothing
self.pred_weight = (1.0 / torch.FloatTensor([0.5,] + predicate_proportion)).cuda()
self.stl_train = stl_train
self.bg_soft_loss = bg_soft_loss
self.debug = debug
# BalNorm
self.use_balanced_norm = False if balanced_norm_test_only else use_balanced_norm
# PCPL & Center Loss
self.pcpl_center_loss = pcpl_center_loss
self.num_classes = num_classes
self.bal_reweight_only = bal_reweight_only
# BalNorm + Reweighting by labeling probability
self.balnorm_reweight_label_prob = balnorm_reweight_label_prob
if self.balnorm_reweight_label_prob:
assert unbiased_training == 'reweight' # make sure this is enabled!
# BalNorm + Reweighting by inverse labeling probability
self.balnorm_reweight_inv_label_prob = balnorm_reweight_inv_label_prob
if self.balnorm_reweight_inv_label_prob:
assert unbiased_training == 'reweight' # make sure this is enabled!
# BalNorm + Reweighting by estimated label distrib.
self.balnorm_reweight_label_distrib = balnorm_reweight_label_distrib
if self.balnorm_reweight_label_distrib:
assert unbiased_training == 'reweight' # make sure this is enabled!
self.norm_predicate_counts_path = norm_predicate_counts_path
# Whether to train against g(x) in addition to f(x), against the training set p(s|x)
if balanced_norm_train_gx:
assert use_balanced_norm
self.balanced_norm_train_gx = balanced_norm_train_gx
# whether to use learnable BalNorm
if balanced_norm_learnable:
assert use_balanced_norm
self.balanced_norm_learnable = balanced_norm_learnable
if balanced_norm_normalized_probs:
assert use_balanced_norm
self.balanced_norm_normalized_probs = balanced_norm_normalized_probs
self.balanced_norm_fixed = balanced_norm_fixed
if balanced_norm_fixed_mseloss:
assert use_balanced_norm
self.balanced_norm_fixed_mseloss = balanced_norm_fixed_mseloss
self.balanced_norm_use_bceloss = balanced_norm_use_bceloss
self.balanced_norm_use_mseloss = balanced_norm_use_mseloss
# multi-label training
self.multi_label_training = multi_label_training
if self.multi_label_training:
self.multi_label_norm_loss = multi_label_norm_loss
self.c_hmc_train = c_hmc_train
if balanced_norm_as_soft_label:
if not self.balanced_norm_fixed:
assert use_balanced_norm
assert balanced_norm_as_soft_label_sll ^ balanced_norm_as_soft_label_mll
self.balanced_norm_as_soft_label = balanced_norm_as_soft_label
self.balanced_norm_as_soft_label_sll = balanced_norm_as_soft_label_sll
self.balanced_norm_as_soft_label_mll = balanced_norm_as_soft_label_mll
if train_avg_belief_to_one:
assert use_balanced_norm
self.train_avg_belief_to_one = train_avg_belief_to_one
self.train_avg_belief_to_one_only_this_loss = train_avg_belief_to_one_only_this_loss
self.weight = None
self.unbiased_training = unbiased_training
self.use_gt_box = use_gt_box
if unbiased_training != '':
assert not self.pcpl_center_loss
if unbiased_training == 'reweight':
if self.multi_label_training:
with open(predicate_weights_path, 'rb') as f:
self.weight = pickle.load(f)
# import pdb; pdb.set_trace()
assert len(self.weight) == 51
elif balnorm_reweight_label_prob or balnorm_reweight_inv_label_prob: # Type 0: reweighting by estimated labeling prob (only used with Balnorm)
pass # do nothing here (labeling prob. is estimated & passed dynamically)
elif self.balnorm_reweight_label_distrib: # Type I: reweighting by estimated label distrib. (only used with Balnorm)
with open(self.norm_predicate_counts_path, 'rb') as f:
self.norm_predicate_counts = pickle.load(f) # f
assert len(self.norm_predicate_counts) == 50
elif self.bal_reweight_only: # Type II: reweight by estimated labeling probability (1/p)
pass # do nothing here
else: # Type III: normal reweighting
assert predicate_weights_path.split('.')[-1] == 'pkl'
with open(predicate_weights_path, 'rb') as f:
self.weight = pickle.load(f)
assert len(self.weight) == 51
elif unbiased_training == 'reweight_vrd':
assert predicate_weights_path.split('.')[-1] == 'npy'
with open(predicate_weights_path, 'rb') as f:
self.weight = np.load(f)
assert self.weight.shape[0] == 151 and self.weight.shape[1] == 151 and self.weight.shape[2] == 51
elif unbiased_training == 'resample':
raise NotImplementedError
elif unbiased_training == 'focal_loss':
raise NotImplementedError
else:
raise NotImplementedError
if not self.balnorm_reweight_label_distrib and not self.bal_reweight_only and not self.balnorm_reweight_label_prob and not self.balnorm_reweight_inv_label_prob:
self.weight = torch.tensor(self.weight).cuda() * weight_factor
elif self.pcpl_center_loss:
assert not self.use_label_smoothing # not implemented for use with label smoothing
self.center_loss_lambda = center_loss_lambda
# Center Loss version 1: https://github.com/KaiyangZhou/pytorch-center-loss
# self.center_loss = CenterLoss(num_classes - 1, feat_dim)
# Center loss version 2: https://github.com/louis-she/center-loss.pytorch
self.centers = nn.Parameter(torch.Tensor(num_classes, feat_dim).normal_(), requires_grad=False).cuda()
self.corr_order = torch.tensor([(i, j) for i in range(num_classes) for j in range(num_classes)])
# For PCPL we put the weight in F.cross_entropy(weight=weight) as the weight
# is dynamically changing
if self.stl_train:
if self.weight is not None:
raise NotImplementedError # haven't implemented
self.criterion_loss = nn.CrossEntropyLoss()
self.loss_relation_stl_alpha = loss_relation_stl_alpha
elif self.use_label_smoothing:
assert self.weight is None # not to be used with other reweighting methods
self.criterion_loss = Label_Smoothing_Regression(e=0.01)
# elif self.use_balanced_norm:
# if self.unbiased_training != '':
# assert self.weight is not None
# self.loss_relation_balanced_norm = nn.NLLLoss(weight=self.weight)
# self.criterion_loss = nn.CrossEntropyLoss()
else:
self.criterion_loss = nn.CrossEntropyLoss()
if self.multi_label_training:
# import pdb; pdb.set_trace()
if self.c_hmc_train:
self.criterion_loss_relation = nn.BCELoss(weight=self.weight)
else:
self.criterion_loss_relation = nn.BCEWithLogitsLoss(weight=self.weight)
else:
self.criterion_loss_relation = nn.CrossEntropyLoss(weight=self.weight)
if self.use_balanced_norm and not self.balnorm_reweight_label_distrib and not self.bal_reweight_only and not self.balnorm_reweight_label_prob and not self.balanced_norm_normalized_probs and not self.balnorm_reweight_inv_label_prob and not self.balanced_norm_as_soft_label:
if self.unbiased_training != '':
assert self.weight is not None
if self.balanced_norm_use_bceloss:
assert self.weight is None # NOT IMPLEMENTED TO RUN TOGETHER YET
self.loss_relation_balanced_norm = nn.BCELoss()
elif self.balanced_norm_use_mseloss:
assert self.weight is None # NOT IMPLEMENTED TO RUN TOGETHER YET
self.loss_relation_balanced_norm = nn.MSELoss()
else:
self.loss_relation_balanced_norm = nn.NLLLoss(weight=self.weight)
def __call__(self, proposals, rel_labels, relation_logits, refine_logits, stl_labels=None, relation_probs_norm=None, relation_logits_raw=None, rel_pair_idxs=None, labeling_prob=None, matrix_of_ancestor=None):
"""
Computes the loss for relation triplet.
This requires that the subsample method has been called beforehand.
Arguments:
relation_logits (list[Tensor])
refine_obj_logits (list[Tensor])
Returns:
predicate_loss (Tensor)
finetune_obj_loss (Tensor)
"""
if self.attri_on:
if isinstance(refine_logits[0], (list, tuple)):
refine_obj_logits, refine_att_logits = refine_logits
else:
# just use attribute feature, do not actually predict attribute
self.attri_on = False
refine_obj_logits = refine_logits
else:
refine_obj_logits = refine_logits
weight = self.weight
if self.unbiased_training == 'reweight_vrd':
raise NotImplementedError # not completed yet
assert not self.pcpl_center_loss # cannot be used together
weights = []
for refine_obj_logit, rel_pair_idx in zip(refine_obj_logits, rel_pair_idxs):
obj_class_prob = F.softmax(refine_obj_logit, -1)
if self.use_gt_box:
_, obj_pred = obj_class_prob[:, 1:].max(dim=1)
obj_pred = obj_pred + 1
else:
raise NotImplementedError # haven't implemented for SGDet
# NOTE: by kaihua, apply late nms for object prediction
obj_pred = obj_prediction_nms(box.get_field('boxes_per_cls'), obj_logit, self.later_nms_pred_thres)
obj_score_ind = torch.arange(num_obj_bbox, device=obj_logit.device) * num_obj_class + obj_pred
sub_cls = obj_pred[rel_pair_idx[:, 0]]
obj_cls = obj_pred[rel_pair_idx[:, 1]]
weight = self.weight[sub_cls, obj_cls]
weights.append(weight)
weights = torch.cat(weights)
relation_logits = cat(relation_logits, dim=0)
refine_obj_logits = cat(refine_obj_logits, dim=0)
rel_labels = cat(rel_labels, dim=0)
fg_idxs, bg_idxs = (rel_labels != 0), (rel_labels == 0)
fg_labels = cat([proposal.get_field("labels") for proposal in proposals], dim=0)
loss_refine_obj = self.criterion_loss(refine_obj_logits, fg_labels.long())
assert not (self.stl_train and self.bg_soft_loss), 'Simultaneous usage of stl_train and bg_soft_loss are not supported!'
assert not (self.use_balanced_norm and self.bg_soft_loss), 'Simultaneous usage of use_balanced_norm and bg_soft_loss are not supported!'
loss_center = loss_gx = rel_features = rel_targets = loss_avg_belief = None
if self.pcpl_center_loss:
assert relation_logits_raw is not None
# compute center loss
# Way 1
# loss_center = self.center_loss(relation_logits_raw[fg_idxs].detach().clone(), rel_labels.long()[fg_idxs]) # only compute loss for non-bg classes
# loss_center = self.center_loss(relation_logits_raw.detach().clone(), rel_labels.long()) # also compute loss for bg class (class 0)
# Way 2
rel_features = relation_logits_raw.clone().detach()
loss_center = compute_center_loss(rel_features, self.centers, rel_labels.long()) * self.center_loss_lambda
rel_targets = rel_labels.long()
# (eq. 2) compute e_{kj}
corr = torch.norm((self.centers[self.corr_order[:,0]] - self.centers[self.corr_order[:,1]]), dim=1)
# (eq. 3 compute u_i)
global_corr = torch.cat([(torch.sum(corr_class, dim=0) / self.num_classes).reshape(-1) for corr_class in torch.split(corr, self.num_classes)])
# (eq. 4 compute correlation factor tao_i as weight)
eps = 0.09
max_corr, min_corr = max(global_corr), min(global_corr)
corr_factor = (global_corr - min_corr + eps) / (max_corr - min_corr)
weight = corr_factor.detach()
if self.stl_train:
assert stl_labels is not None
stl_labels = cat(stl_labels, dim=0)
if self.balanced_norm_train_gx:
loss_gx = F.cross_entropy(relation_logits, rel_labels.long())
if self.pcpl_center_loss:
loss_relation = F.cross_entropy(relation_logits[fg_idxs], rel_labels.long()[fg_idxs], weight=weight)
elif self.bal_reweight_only:
# use estimated labeling probabilitiy for reweighting
assert labeling_prob is not None
import pdb; pdb.set_trace()
elif self.balnorm_reweight_label_prob:
assert labeling_prob is not None
with torch.no_grad():
weight = 1 / labeling_prob.detach()
loss_relation = F.nll_loss(torch.log(relation_probs_norm), rel_labels.long(), weight=weight)
elif self.balnorm_reweight_label_distrib:
# estimate label distribution
assert labeling_prob is not None
with torch.no_grad():
f = self.norm_predicate_counts
q = np.array(labeling_prob[1:].detach().cpu())
a = np.array([[qq * (ff - 1) if i == j else qq * ff for i, qq in enumerate(q)] for j, ff in enumerate(f)])
a[-1] = np.array([1] * len(a))
b = np.array([0] * 49 + [1])
p = scipy.optimize.lsq_linear(a, b, (0, float('inf')))
p = p.x
# counts = np.array([0.0] * 51)
# counts[0] = 3.0
# counts[1:] = p
weight = torch.tensor([math.sqrt(1/p[idx - 1]) if idx != 0 else math.sqrt(1/(3*sum(p))) for idx in range(len(p) + 1)]).cuda()
loss_relation = F.nll_loss(torch.log(relation_probs_norm[fg_idxs]), rel_labels.long()[fg_idxs], weight=weight)
elif self.use_balanced_norm:
loss_relation = self.loss_relation_balanced_norm(torch.log(relation_probs_norm[fg_idxs]), rel_labels.long()[fg_idxs])
# loss_relation = self.criterion_loss(relation_logits[fg_idxs], rel_labels.long()[fg_idxs])
else:
loss_relation = self.criterion_loss(relation_logits[fg_idxs], rel_labels.long()[fg_idxs])
assert relation_logits[bg_idxs].shape == stl_labels.shape
loss_relation_stl = soft_cross_entropy_loss(relation_logits[bg_idxs], stl_labels, reduction='mean') * self.loss_relation_stl_alpha
# loss_relation = (loss_relation + loss_relation_stl * self.loss_relation_stl_alpha) / len(relation_logits)
# loss_relation_stl = None # set to None (for now) as it should be combined into loss_relation. Delete this variable if it's correct to do so.
else:
if self.use_balanced_norm:
assert relation_probs_norm is not None
if self.balanced_norm_train_gx:
loss_gx = F.cross_entropy(relation_logits, rel_labels.long())
if self.balanced_norm_as_soft_label:
# import pdb; pdb.set_trace()
if self.balanced_norm_as_soft_label_sll:
logprobs = F.log_softmax(relation_logits, dim=1)[range(relation_logits.shape[0]), rel_labels]
loss_relation = -(labeling_prob.detach()[rel_labels] * logprobs).sum() / logprobs.shape[0]
elif self.balanced_norm_as_soft_label_mll:
loss_relation = softXEnt(relation_logits, labeling_prob.detach())
else:
raise NotImplementedError
elif self.pcpl_center_loss:
raise NotImplementedError
loss_relation = F.nll_loss(torch.log(relation_probs_norm), rel_labels.long())
elif self.bal_reweight_only:
# use estimated labeling probabilitiy for reweighting
assert labeling_prob is not None
loss_relation = F.cross_entropy(relation_logits, rel_labels.long(), weight=(1 / labeling_prob))
elif self.balnorm_reweight_inv_label_prob:
assert labeling_prob is not None
if self.balanced_norm_normalized_probs:
weight = labeling_prob
loss_relation = weighted_mse_loss(relation_probs_norm[range(relation_probs_norm.shape[0]), rel_labels], torch.ones(len(rel_labels)).cuda(), weight=weight[rel_labels])
else:
loss_relation = F.nll_loss(torch.log(relation_probs_norm), rel_labels.long(), weight=labeling_prob.detach())
elif self.balnorm_reweight_label_prob:
assert labeling_prob is not None
if self.balanced_norm_normalized_probs:
# import pdb; pdb.set_trace()
weight = 1 / labeling_prob
loss_relation = weighted_mse_loss(relation_probs_norm[range(relation_probs_norm.shape[0]), rel_labels], torch.ones(len(rel_labels)).cuda(), weight=weight[rel_labels])
else:
# if self.balanced_norm_learnable: # use label prob as weight with gradient
# weight = 1 / labeling_prob
# else:
with torch.no_grad():
weight = 1 / labeling_prob.detach()
loss_relation = F.nll_loss(torch.log(relation_probs_norm), rel_labels.long(), weight=weight)
elif self.balnorm_reweight_label_distrib:
# estimate label distribution
assert labeling_prob is not None
with torch.no_grad():
f = self.norm_predicate_counts
q = np.array(labeling_prob[1:].detach().cpu())
a = np.array([[qq * (ff - 1) if i == j else qq * ff for i, qq in enumerate(q)] for j, ff in enumerate(f)])
a[-1] = np.array([1] * len(a))
b = np.array([0] * 49 + [1])
p = scipy.optimize.lsq_linear(a, b, (0, float('inf')))
p = p.x
# counts = np.array([0.0] * 51)
# counts[0] = 3.0
# counts[1:] = p
weight = torch.tensor([math.sqrt(1/p[idx - 1]) if idx != 0 else math.sqrt(1/(3*sum(p))) for idx in range(len(p) + 1)]).cuda()
loss_relation = F.nll_loss(torch.log(relation_probs_norm), rel_labels.long(), weight=weight)
else: # use balanced norm
if self.balanced_norm_normalized_probs or self.balanced_norm_fixed_mseloss:
# import pdb; pdb.set_trace()
loss_relation = weighted_mse_loss(relation_probs_norm[range(relation_probs_norm.shape[0]), rel_labels], torch.ones(len(rel_labels)).cuda())
else:
if self.train_avg_belief_to_one:
nonzero_idxs = torch.nonzero(rel_labels)
# n_unique_cls = len(torch.unique(rel_labels[nonzero_idxs]))
unique_class_to_example_idxs = {obj_cls:[] for obj_cls in torch.unique(rel_labels[nonzero_idxs]).detach().cpu().numpy().tolist()}
for i in range(relation_probs_norm.shape[0]):
obj_cls = rel_labels[i].detach().cpu().item()
if obj_cls != 0:
unique_class_to_example_idxs[obj_cls].append(i)
avg_beliefs = []
for obj_cls in sorted(unique_class_to_example_idxs.keys()):
avg_belief = relation_probs_norm[unique_class_to_example_idxs[obj_cls]].sum(0) / len(relation_probs_norm[unique_class_to_example_idxs[obj_cls]])
avg_beliefs.append(avg_belief.view(1, -1))
avg_beliefs = torch.cat(avg_beliefs)
# import pdb; pdb.set_trace()
loss_avg_belief = F.nll_loss(torch.log(avg_beliefs), torch.unique(rel_labels[nonzero_idxs]))
if self.train_avg_belief_to_one_only_this_loss:
loss_relation = loss_avg_belief
loss_avg_belief = None
if not self.train_avg_belief_to_one_only_this_loss:
if self.balanced_norm_use_bceloss:
import pdb; pdb.set_trace()
loss_relation = self.loss_relation_balanced_norm(relation_probs_norm[range(relation_probs_norm.shape[0]), rel_labels], torch.ones(len(rel_labels)).cuda())
elif self.balanced_norm_use_mseloss:
# loss_relation = self.loss_relation_balanced_norm(relation_probs_norm[range(relation_probs_norm.shape[0]), rel_labels], torch.ones(len(rel_labels)).cuda())
import pdb; pdb.set_trace()
loss_relation = self.loss_relation_balanced_norm(relation_probs_norm, rel_labels)
else:
loss_relation = self.loss_relation_balanced_norm(torch.log(relation_probs_norm), rel_labels.long())
elif self.balanced_norm_fixed and self.balanced_norm_as_soft_label:
if self.balanced_norm_as_soft_label_sll:
logprobs = F.log_softmax(relation_logits, dim=1)[range(relation_logits.shape[0]), rel_labels]
loss_relation = -(labeling_prob.detach()[rel_labels] * logprobs).sum() / logprobs.shape[0]
elif self.balanced_norm_as_soft_label_mll:
loss_relation = softXEnt(relation_logits, labeling_prob.detach())
else:
raise NotImplementedError
elif self.bg_soft_loss:
raise NotImplementedError
hard_loss_fg_relation = self.criterion_loss(relation_logits[:len(fg_labels)], rel_labels.long()[:len(fg_labels)])
soft_loss_bg_relation = soft_cross_entropy_loss(relation_logits[len(fg_labels):], rel_labels.float()[len(fg_labels):, None], reduction='mean')
# Note that this section is not implemented for self.pcpl_center_loss
loss_relation = hard_loss_fg_relation + soft_loss_bg_relation
elif self.pcpl_center_loss:
loss_relation = F.cross_entropy(relation_logits, rel_labels.long(), weight=weight)
elif self.unbiased_training == 'reweight_vrd':
raise NotImplementedError
else:
if self.multi_label_training:
if self.c_hmc_train:
# import pdb; pdb.set_trace()
assert isinstance(self.criterion_loss_relation, nn.BCELoss) # sanity check
relation_logits = torch.sigmoid(relation_logits)
constr_output = get_constr_out(relation_logits, matrix_of_ancestor)
train_output = rel_labels * relation_logits.double()
train_output = get_constr_out(train_output, matrix_of_ancestor)
relation_logits = (1 - rel_labels) * constr_output.double() + rel_labels * train_output
else:
assert isinstance(self.criterion_loss_relation, nn.BCEWithLogitsLoss) # sanity check
if self.multi_label_norm_loss:
rel_labels = rel_labels / rel_labels.sum(dim=1).view(-1, 1)
loss_relation = self.criterion_loss_relation(relation_logits, rel_labels.double()).float()
# if self.multi_label_norm_loss:
# if rel_labels.sum() < rel_labels.shape[0]:
# import pdb; pdb.set_trace()
# nonzero_idxs = rel_labels.sum(dim=1).nonzero()
# rel_labels = rel_labels[nonzero_idxs].squeeze()
# loss_relation = loss_relation[nonzero_idxs].squeeze()
# # elif rel_labels.sum() > rel_labels.shape[0]:
# import pdb; pdb.set_trace()
# loss_relation = (loss_relation / rel_labels.sum(dim=1).view(-1, 1)).sum() / loss_relation.shape[0]
else:
loss_relation = self.criterion_loss_relation(relation_logits, rel_labels.long())
loss_relation_stl = None
# The following code is used to calcaulate sampled attribute loss
if self.attri_on:
refine_att_logits = cat(refine_att_logits, dim=0)
fg_attributes = cat([proposal.get_field("attributes") for proposal in proposals], dim=0)
attribute_targets, fg_attri_idx = self.generate_attributes_target(fg_attributes)
if float(fg_attri_idx.sum()) > 0:
# have at least one bbox got fg attributes
refine_att_logits = refine_att_logits[fg_attri_idx > 0]
attribute_targets = attribute_targets[fg_attri_idx > 0]
else:
refine_att_logits = refine_att_logits[0].view(1, -1)
attribute_targets = attribute_targets[0].view(1, -1)
loss_refine_att = self.attribute_loss(refine_att_logits, attribute_targets,
fg_bg_sample=self.attribute_sampling,
bg_fg_ratio=self.attribute_bgfg_ratio)
return loss_relation, (loss_refine_obj, loss_refine_att), loss_relation_stl, loss_center, loss_gx, loss_avg_belief, rel_features, rel_targets
else:
return loss_relation, loss_refine_obj, loss_relation_stl, loss_center, loss_gx, loss_avg_belief, rel_features, rel_targets
def generate_attributes_target(self, attributes):
"""
from list of attribute indexs to [1,0,1,0,0,1] form
"""
assert self.max_num_attri == attributes.shape[1]
device = attributes.device
num_obj = attributes.shape[0]
fg_attri_idx = (attributes.sum(-1) > 0).long()
attribute_targets = torch.zeros((num_obj, self.num_attri_cat), device=device).float()
for idx in torch.nonzero(fg_attri_idx).squeeze(1).tolist():
for k in range(self.max_num_attri):
att_id = int(attributes[idx, k])
if att_id == 0:
break
else:
attribute_targets[idx, att_id] = 1
return attribute_targets, fg_attri_idx
def attribute_loss(self, logits, labels, fg_bg_sample=True, bg_fg_ratio=3):
if fg_bg_sample:
loss_matrix = F.binary_cross_entropy_with_logits(logits, labels, reduction='none').view(-1)
fg_loss = loss_matrix[labels.view(-1) > 0]
bg_loss = loss_matrix[labels.view(-1) <= 0]
num_fg = fg_loss.shape[0]
# if there is no fg, add at least one bg
num_bg = max(int(num_fg * bg_fg_ratio), 1)
perm = torch.randperm(bg_loss.shape[0], device=bg_loss.device)[:num_bg]
bg_loss = bg_loss[perm]
return torch.cat([fg_loss, bg_loss], dim=0).mean()
else:
attri_loss = F.binary_cross_entropy_with_logits(logits, labels)
attri_loss = attri_loss * self.num_attri_cat / 20.0
return attri_loss
class FocalLoss(nn.Module):
def __init__(self, gamma=0, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
self.size_average = size_average
def forward(self, input, target):
target = target.view(-1)
logpt = F.log_softmax(input)
logpt = logpt.index_select(-1, target).diag()
logpt = logpt.view(-1)
pt = logpt.exp()
logpt = logpt * self.alpha * (target > 0).float() + logpt * (1 - self.alpha) * (target <= 0).float()
loss = -1 * (1-pt)**self.gamma * logpt
if self.size_average: return loss.mean()
else: return loss.sum()
def make_roi_relation_loss_evaluator(cfg):
loss_evaluator = RelationLossComputation(
cfg.MODEL.ATTRIBUTE_ON,
cfg.MODEL.ROI_ATTRIBUTE_HEAD.NUM_ATTRIBUTES,
cfg.MODEL.ROI_ATTRIBUTE_HEAD.MAX_ATTRIBUTES,
cfg.MODEL.ROI_ATTRIBUTE_HEAD.ATTRIBUTE_BGFG_SAMPLE,
cfg.MODEL.ROI_ATTRIBUTE_HEAD.ATTRIBUTE_BGFG_RATIO,
cfg.MODEL.ROI_RELATION_HEAD.LABEL_SMOOTHING_LOSS,
cfg.MODEL.ROI_RELATION_HEAD.REL_PROP,
cfg.MODEL.STL_TRAIN,
cfg.MODEL.STL_TRAIN_LOSS_ALPHA,
cfg.MODEL.BG_SOFT_LOSS,
cfg.MODEL.BALANCED_NORM,
cfg.MODEL.PCPL_CENTER_LOSS,
cfg.MODEL.CENTER_LOSS_LAMBDA,
cfg.MODEL.ROI_RELATION_HEAD.NUM_CLASSES,
cfg.MODEL.ROI_RELATION_HEAD.CONTEXT_POOLING_DIM,
cfg.TRAIN.UNBIASED_TRAINING,
cfg.TRAIN.PREDICATE_WEIGHTS_PATH,
cfg.TRAIN.WEIGHT_FACTOR,
cfg.MODEL.ROI_RELATION_HEAD.USE_GT_BOX,
cfg.TRAIN.BALNORM_REWEIGHT_LABEL_DISTRIB,
cfg.TRAIN.BALNORM_REWEIGHT_LABEL_PROB,
cfg.TRAIN.BALNORM_REWEIGHT_INV_LABEL_PROB,
cfg.TRAIN.NORM_PREDICATE_COUNTS_PATH,
cfg.TRAIN.BAL_REWEIGHT_ONLY,
cfg.MODEL.BALANCED_NORM_TRAIN_GX,
cfg.MODEL.BALANCED_NORM_LEARNABLE,
cfg.MODEL.BALANCED_NORM_NORMALIZED_PROBS,
cfg.MODEL.BALANCED_NORM_TEST_ONLY,
cfg.MODEL.BALANCED_NORM_FIXED,
cfg.MODEL.BALANCED_NORM_FIXED_MSELOSS,
cfg.MODEL.BALANCED_NORM_AS_SOFT_LABEL,
cfg.MODEL.BALANCED_NORM_AS_SOFT_LABEL_SLL,
cfg.MODEL.BALANCED_NORM_AS_SOFT_LABEL_MLL,
cfg.MODEL.TRAIN_AVG_BELIEF_TO_ONE,
cfg.MODEL.TRAIN_AVG_BELIEF_TO_ONE_ONLY_THIS_LOSS,
cfg.MODEL.BALANCED_NORM_USE_BCELOSS,
cfg.MODEL.BALANCED_NORM_USE_MSELOSS,
cfg.TRAIN.MULTI_LABEL_TRAINING,
cfg.TRAIN.MULTI_LABEL_NORM_LOSS,
cfg.HMC.C_HMC_TRAIN,
cfg.DEBUG,
)
return loss_evaluator
| [
"[email protected]"
]
| |
3726bbe2896a068f5be60d5d92e14539400d68c1 | 0623ec7befa8bf52ba2eba97502190ae8e2b40a5 | /sargeparse/_parser/data.py | aaaa7abb9cba7ede6c80367872af999e59033d35 | [
"Apache-2.0"
]
| permissive | DiegoPomares/sargeparse | f235bb58d8e6f537b954b5b34cd5e6c5310e3e1d | 32424cd1a87c8efba4a2e2c08540478bec9f63a2 | refs/heads/master | 2020-03-18T15:54:22.456121 | 2018-07-11T12:52:17 | 2018-07-11T12:52:17 | 134,936,150 | 1 | 1 | Apache-2.0 | 2018-07-11T12:52:18 | 2018-05-26T06:37:44 | Python | UTF-8 | Python | false | false | 5,966 | py | import sys
from collections import ChainMap
import sargeparse.consts
from sargeparse._parser.parser import Parser
class ArgumentData(ChainMap):
_default_precedence = ['cli', 'environment', 'configuration', 'defaults']
def __init__(self, parser: Parser, precedence=None):
super().__init__()
self._parser = parser
self._config_data = {}
self._data_sources = {}
self.callbacks = []
for source in self._format_precedence_list(self._default_precedence):
self._data_sources[source] = {}
self._override = self._data_sources['override']
self.cli = self._data_sources['cli']
self.environment = self._data_sources['environment']
self.configuration = self._data_sources['configuration']
self.defaults = self._data_sources['defaults']
self._arg_default = self._data_sources['arg_default']
self.parser_data = {}
self.set_precedence(precedence)
def set_precedence(self, precedence):
precedence = precedence or self._default_precedence
difference = set(self._default_precedence).symmetric_difference(set(precedence))
if difference:
msg = "Precedence must contain all and only these elements: {}"
raise TypeError(msg.format(self._default_precedence))
precedence = self._format_precedence_list(precedence)
self.maps = [self._data_sources[k] for k in precedence]
def clear_all(self):
for d in self._data_sources.values():
d.clear()
def dispatch(self, *, obj=None):
last_callback = len(self.callbacks) - 1
return_value = None
for i, fn in enumerate(self.callbacks):
parser_data = self.parser_data[fn.parser.parser_key()]
last = (i == last_callback)
ctx = Context(
data=self,
obj=obj,
parser_data=parser_data,
last=last,
return_value=return_value,
)
return_value = fn(ctx)
if return_value == sargeparse.die:
sys.exit(return_value.value)
elif return_value == sargeparse.stop:
return_value = return_value.value
break
return return_value
@staticmethod
def _format_precedence_list(precedence):
return ['override'] + precedence + ['arg_default']
def _remove_unset_from_data_sources_cli(self):
for k, v in list(self.cli.items()):
if v == sargeparse.unset:
self.cli.pop(k)
def _move_defaults_from_data_sources_cli(self, parser=None):
parser = parser or self._parser
key = parser.parser_key()
if key not in self.cli:
return
defaults = self.cli[key].get('defaults', {})
self.defaults.update(defaults)
for subparser in parser.subparsers:
self._move_defaults_from_data_sources_cli(subparser)
def _parse_callbacks(self):
self.callbacks = self._get_callbacks()
def _get_callbacks(self, parser=None):
parser = parser or self._parser
callback_list = []
key = parser.parser_key()
if key not in self.cli:
return []
callback = self.cli[key].get('callback')
if callback:
callback_list.append(callback)
for subparser in parser.subparsers:
callback_list.extend(
self._get_callbacks(subparser)
)
return callback_list
def _parse_envvars_and_defaults(self, parser=None):
parser = parser or self._parser
# No point in adding data from subcommands that did not run
key = parser.parser_key()
if key not in self.cli:
return
for argument in parser.arguments:
dest = argument.dest
envvar = argument.get_value_from_envvar(default=sargeparse.unset)
if envvar != sargeparse.unset:
self.environment[dest] = envvar
default = argument.get_default_value(default=sargeparse.unset, apply_type=True)
if default != sargeparse.unset:
self.defaults[dest] = default
self._arg_default[dest] = parser.argument_parser_kwargs['argument_default']
for subparser in parser.subparsers:
self._parse_envvars_and_defaults(subparser)
def _parse_config(self, config, parser=None):
parser = parser or self._parser
self._config_data = config
# No point in adding data from subcommands that did not run
key = parser.parser_key()
if key not in self.cli:
return
for argument in parser.arguments:
dest = argument.dest
config_value = argument.get_value_from_config(config, default=sargeparse.unset)
if config_value != sargeparse.unset:
self.configuration[dest] = config_value
self._arg_default[dest] = parser.argument_parser_kwargs['argument_default']
for subparser in parser.subparsers:
self._parse_config(config, subparser)
def _remove_parser_key_from_data_sources_cli(self, parser=None):
parser = parser or self._parser
key = parser.parser_key()
if key not in self.cli:
return
self.cli.pop(key)
for subparser in parser.subparsers:
self._remove_parser_key_from_data_sources_cli(subparser)
class Context:
def __init__(self, **kwargs):
self.data = kwargs.get('data')
self.obj = kwargs.get('obj')
self.parser = ParserData(**kwargs.get('parser_data'))
self.last = kwargs.get('last')
self.return_value = kwargs.get('return_value')
class ParserData:
def __init__(self, **parser_data):
self.prog = parser_data['prog']
self.help = parser_data['help']
self.usage = parser_data['usage']
| [
"[email protected]"
]
| |
1f628490e35afe54b6975ea324fe54ff9696d96a | a46c3cb15f8e92ca909274c2361f2dee62bb4f8f | /rx/linq/takeWhile.py | a20593c3a97baf323688b4f3bfe24f96a245fb84 | [
"MIT"
]
| permissive | akuendig/RxPython | d58cbf359b613f88bc9d83b4fef394161e140725 | 3dd7228519fa2121e39f43d142fe64e73ac77b80 | refs/heads/master | 2020-05-07T08:30:33.839781 | 2013-04-02T13:05:06 | 2013-04-02T13:05:06 | 8,301,392 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,290 | py | from rx.observable import Producer
import rx.linq.sink
class TakeWhile(Producer):
def __init__(self, source, predicate, withIndex):
self.source = source
self.predicate = predicate
self.withIndex = withIndex
def run(self, observer, cancel, setSink):
sink = self.Sink(self, observer, cancel)
setSink(sink)
return self.source.subscribeSafe(sink)
class Sink(rx.linq.sink.Sink):
def __init__(self, parent, observer, cancel):
super(TakeWhile.Sink, self).__init__(observer, cancel)
self.parent = parent
self.running = True
self.index = -1
def onNext(self, value):
if self.running:
try:
if self.parent.withIndex:
self.index += 1
self.running = self.parent.predicate(value, self.index)
else:
self.running = self.parent.predicate(value)
except Exception as e:
self.observer.onError(e)
self.dispose()
return
if self.running:
self.observer.onNext(value)
else:
self.observer.onCompleted()
self.dispose()
def onError(self, exception):
self.observer.onError(exception)
self.dispose()
def onCompleted(self):
self.observer.onCompleted()
self.dispose() | [
"[email protected]"
]
| |
fc29850a9c25a51075b30b9f10dd800fad76832b | 179af4c581daaa03995f6bd30c95a6583acee7e8 | /KNN.py | 85890b299ddd9781439cd5317773fa0366377516 | []
| no_license | Harold1994/scikitlearnDemos | 20b29df07125b82100498300e292ba7de87763ea | 075cd8447b5dff59802ecda955f453ac6f78995e | refs/heads/master | 2022-11-10T02:46:21.410865 | 2018-01-19T10:41:35 | 2018-01-19T10:41:35 | 118,110,608 | 2 | 0 | null | 2022-10-23T19:59:10 | 2018-01-19T10:18:22 | Python | UTF-8 | Python | false | false | 273 | py | from sklearn import neighbors
import numpy as np
knn = neighbors.KNeighborsClassifier()
data = np.array([[3, 104], [2, 100], [1, 81], [101, 10], [99, 5], [98, 2]])
lables = np.array([1, 1, 1, 2, 2, 2])
knn.fit(data, lables.ravel())
print(knn.predict(np.array([[58, 9]])))
| [
"[email protected]"
]
| |
6a0dd13e2b196b94a54d161eb74dd636a1bd9537 | a72cb4d00528fb3d2d47f99a1ccca1b8b9b41ff7 | /scripts/archive/io_import_minecraft/slimes.py | a45e92a5e623d492a7c4866e361bbd0e65c419ce | []
| no_license | talocan/blenderpython | b05204881183ff901ec189916a3bcc1d3e9d3e20 | 056ac37e76a1b410696c9efe4fe0ea09fdc68c0e | refs/heads/master | 2021-01-18T05:16:47.221786 | 2014-07-11T17:01:53 | 2014-07-11T17:01:53 | 21,749,332 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,681 | py | # Javarandom slime Python-version test harness.
from . import javarandom
rnd = javarandom.Random
def isSlimeSpawn(worldSeed, xPos, zPos):
rnd = javarandom.Random(worldSeed + jlong(xPos * xPos * 0x4c1906) + jlong(xPos * 0x5ac0db) + jlong(zPos * zPos) * 0x4307a7 + jlong(zPos * 0x5f24f) ^ 0x3ad8025f)
return rnd.nextInt(10) == 0
#Totally crucial!
def jlong(i):
# Python and Java don't agree on how ints work.
# Python 3 in particular treats everything as long.
#The seed A term in the RNG was wrong, before...
#This converts the unsigned generated int into a signed int if necessary.
i = (i & 0xffffffff) #vital!
if i & (1 << 31):
i -= (1 << 32)
return i
if __name__ == '__main__':
worldseed = 4784223057510287643 #Afarundria's seed.
# for z in range(64):
# for x in range(64):
# isSlime = isSlimeSpawn(worldseed,x,z)
# print("[%d,%d: %d]" % (x,z,isSlime), end="\r\n")
# #write out all the seeds the above line of code would generate!!
# for z in range(64):
# for x in range(64):
# seeda = jlong(x * x * 0x4c1906) # BASTARD OF A 2's COMPLEMENT!
# seedb = jlong(x * 0x5ac0db)
# seedc = jlong(z * z) * 0x4307a7
# seedd = jlong(z * 0x5f24f) ^ 0x3ad8025f
#
# seeder = (worldseed + seeda + seedb + seedc + seedd)
# #The seed line is INCORRECT!!
# # Here's the exact line of Java I'm trying to replicate:
# # Random rnd = new Random(seed + (long) (xPosition * xPosition * 0x4c1906) + (long) (xPosition * 0x5ac0db) + (long) (zPosition * zPosition) * 0x4307a7L + (long) (zPosition * 0x5f24f) ^ 0x3ad8025f);
# print("[%d,%d: %d] {%d,%d,%d,%d}" % (x,z,seeder,seeda,seedb,seedc,seedd), end="\r\n") | [
"[email protected]"
]
| |
bf0fc700681e156ba1a5fcc7f7484c85354f59f1 | a175e3f1cbb638bedc467bd408144209b5bc1ff9 | /EJERCICIO6.py | 3feba996cb015efffb2727992716239b44bf0939 | []
| no_license | Jelowis/Deber-De-Estructura-de-Datos | ace306332e8ea987dfefe0b0cdf458af478f91db | 9704c644527725b5d9d399699d841d4599f4cee8 | refs/heads/main | 2023-06-05T11:15:01.372313 | 2021-06-28T05:07:52 | 2021-06-28T05:07:52 | 380,911,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | #EJEMPLOS 1:
# Linealizar las siguientes expresiones:
# (no se olvide que linealizar significa escribir una expresión algebraica en una sola línea).
a=1
b=2
x=(a/(a+b))/(a/(a-b))
print(x)
| [
"[email protected]"
]
| |
aef119967ff1e2102f99d42ccb8e2aedf28da1da | 003d0e0b2cfe7179d149d035ed50daa4c5bb5c22 | /LDWS_videofile.py | 101bf414f1b2dfcdfe4ef1413cad751cfb29a94c | []
| no_license | dandancat123/LDWS-1 | 17a11d77ab731de3f65a72e9486b41afbdc8160d | 2c34a1671cf4bb3aee2494118253151962b032b4 | refs/heads/master | 2020-06-01T05:54:37.024249 | 2016-11-01T16:47:44 | 2016-11-01T16:47:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,072 | py | from __future__ import division
import numpy as np
import cv2
import math
font = cv2.FONT_HERSHEY_SIMPLEX
mp=7
cap = cv2.VideoCapture("F:/ab.mp4")
while(cap.isOpened()):
ret, frame = cap.read()
frm1 = frame[120:238,1:425]
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#cv2.imshow('frame',gray)
src2 = gray[120:238,1:425]
src= cv2.medianBlur(src2,3)
dst = cv2.Canny(src, 70, 250)
lines = cv2.HoughLines(dst, 1, math.pi/180.0, 85, np.array([]), 0, 0)
try :
a,b,c = lines.shape
except AttributeError :
continue
p1=np.array([])
#p2=np.array([])
#p3=np.array([[1,1]])
#p4=np.array([[1,1]])
#p5=np.array([[1,1]])
for i in range(a):
rho = lines[i][0][0]
theta = lines[i][0][1]
a = math.cos(theta)
b = math.sin(theta)
x0, y0 = a*rho, b*rho
pt1 = ( int(x0+1000*(-b)), int(y0+1000*(a)) )
pt2 = ( int(x0-1000*(-b)), int(y0-1000*(a)) )
if pt2[0]==pt1[0]:
xi=pt2[0]
elif pt2[1]==pt1[1]:
xi=0
else :
m=((pt2[1]-pt1[1])/(pt2[0]-pt1[0]))
b=pt2[1]-m*pt2[0]
x=((118-b)/m)
xi=math.floor(x)
#y=m*x+b
if 0 < xi < 400:
p1=np.append(p1,xi)
#p2=np.append(p2,pt2[0])
#p3=np.vstack((p3,[pt1[0],pt1[1]]))
#p4=np.vstack((p4,[pt2[0],pt2[1]]))
#p5=np.vstack((p5,[x,y]))
cv2.line(frm1, pt1, pt2, (0, 0, 255), 1, cv2.LINE_AA)
p1=sorted(p1)
if len(p1)!=0:
m=np.mean(p1)
else :
m=0
print 'p1',p1
print 'mean',m
if m!=0:
if m > mp+17 :
cv2.putText(frm1,'LEFT',(10,50), font, 2,(0,255,0),2,cv2.LINE_AA)
mp=m
elif m < mp-17 :
cv2.putText(frm1,'RIGHT',(250,50), font, 2,(0,255,0),2,cv2.LINE_AA)
mp=m
#print 'p3',p3
#print 'p4',p4
#print 'p5',p5
cv2.imshow('frame',frm1)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| [
"[email protected]"
]
| |
37f4f37f0f18dfbc2abbb9d59aa6f84967ff2d1b | 86a334c6b0207703a741aedc6a5e429d409cf1d9 | /main.py | e0b9f12fd8e91f364704d8e2d0c136c8559c597a | []
| no_license | fu2re/jati | acf9df450b190e68e87d7564a947b3ab7f93816e | fb39aab856b6bde47a361a68bbdf0088951c6a75 | refs/heads/master | 2022-12-02T19:04:41.433316 | 2020-08-16T22:11:02 | 2020-08-16T22:11:02 | 287,366,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 675 | py | import uvicorn
from fastapi import FastAPI
from wallet.conf import settings
from wallet.models import db
from wallet.views import router
async def init_app(db_dsn=settings.DB_DSN):
await db.set_bind(db_dsn,
echo=settings.DB_ECHO,
min_size=settings.DB_POOL_MIN_SIZE,
max_size=settings.DB_POOL_MAX_SIZE,
ssl=settings.DB_SSL)
app = FastAPI(title='jati',
version='0.1.0',
on_startup=[init_app])
app.include_router(router)
if __name__ == "__main__":
uvicorn.run(
app,
host="0.0.0.0",
port=8000,
log_level="info"
)
| [
"[email protected]"
]
| |
01680197696a2a264518b8624edee037385aefd9 | ca8c42c0a9b16876182bacfc3a3914ed64d1d74c | /database.py | 68a51d55f32066bddd770b640371f81796500421 | []
| no_license | Mamdad0u/audio_fingerprint | 3e2b08cf8bcb007b3fbf245777b2f75b5cf02cf6 | 307e5d8f53afa99d9f2d4eba4e45d0d483e3ffcf | refs/heads/main | 2023-05-15T04:16:49.194575 | 2021-05-29T18:18:07 | 2021-05-29T18:18:07 | 372,041,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 25 22:12:29 2021
@author: duwat
"""
import os
import warnings
warnings.filterwarnings("ignore")
from functions import get_filename
os.chdir('C:/Users/duwat/Desktop/projet shazam/Album')
# Song database
song_database = get_filename('C:/Users/duwat/Desktop/projet shazam/Album')
print('Les musiques contenues dans la database sont :')
index = 1
for i in song_database:
print(index, '- ' + i[:-4])
index = index+1
| [
"[email protected]"
]
| |
91e47148a1cf204a5219897181db5e063e3b713f | c539bef50942b7b52b8f4184c39d2f948788a851 | /polls/tests.py | 971c42925ebff7e44e6cf8412999051dc9f206d9 | [
"MIT"
]
| permissive | TulioAbreu/django-tutorial | 16cbbaf95f35017fe59ecca8ff1b98a77bebe664 | 3097ec5f57dccbb84737e6b847c2ebb07cb4a076 | refs/heads/master | 2020-05-25T23:03:10.830763 | 2019-08-09T15:12:36 | 2019-08-09T15:12:36 | 188,027,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,967 | py | import datetime
from django.test import TestCase
from django.utils import timezone
from django.urls import reverse
from .models import Question
class QuestionModelTests(TestCase):
def test_was_published_recently_with_future_question(self):
"""
was_]published_recently() returns Fasle for questions
pub_date is in the future
"""
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertIs(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
"""
was_published_recently() returns False for questions
whose pub_date is older than 1 day.
"""
time = timezone.now() - datetime.timedelta(days=1, seconds=1)
old_question = Question(pub_date=time)
self.assertIs(old_question.was_published_recently(), False)
def test_was_published_recently_with_recent_question(self):
"""
was_published_recently() returns True for questions
whose pub_date is within the last day.
"""
time = timezone.now() - datetime.timedelta(hours=23, minutes=59,
seconds=59)
recent_question = Question(pub_date=time)
self.assertIs(recent_question.was_published_recently(), True)
def create_question(question_text, days):
"""
Create a question with the given `question_text` and published
the given number of `days` offset to now (negative for questions
published in the past, positive for questions that have yet to
be published).
"""
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text=question_text,
pub_date=time)
class QuestionIndexViewTests(TestCase):
def test_no_questions(self):
"""
If no questions exist, an appropriate message is displayed.
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_past_question(self):
"""
Questions with a pub_date in the past are displayed on the
index page.
"""
create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_future_question(self):
"""
Questions with a pub_date in the future aren't displayed on
the index page.
"""
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
def test_future_question_and_past_question(self):
"""
Even if both past and future questions exist, only past questions
are displayed.
"""
create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question.>']
)
def test_two_past_questions(self):
"""
The questions index page may display multiple questions.
"""
create_question(question_text="Past question 1.", days=-30)
create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_question_list'],
['<Question: Past question 2.>', '<Question: Past question 1.>']
)
class QuestionDetailViewTests(TestCase):
def test_future_question(self):
"""
The detail view of a question with a pub_Date in the future
returns a 404 not found.
"""
future_question = create_question(question_text='Future question.', days=5)
url = reverse('polls:detail', args=(future_question.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_past_question(self):
"""
The detail view of a question with a pub_date in the past
displays the question's text.
"""
past_question = create_question(question_text='Past Question.', days=-5)
url = reverse('polls:detail', args=(past_question.id,))
response = self.client.get(url)
self.assertContains(response, past_question.question_text)
| [
"[email protected]"
]
| |
4695057503386c922f4e47a3787760213de5fdfd | 0571addf2ed1adc415755dfcccd480674d57e6d3 | /miye/scheduler/views.py | be351663b816fc51a18b9506fa5945be14b05f27 | []
| no_license | aya0321/CGU-IST303-F21-Prj-Team-2 | 632f092f6810c945f8a4945c02f1f6ebe1d3b55d | e14cddb2defc3f6b3968fa0a1b3f10a461e3e144 | refs/heads/main | 2023-08-23T17:57:10.831347 | 2021-10-12T21:53:15 | 2021-10-12T21:53:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,166 | py | from django.shortcuts import render
from django.http import HttpResponseRedirect
from db.models import DBService, DBGuest, DBAppointment
from .forms import ScheduleForm, is_time_between
from datetime import datetime, timedelta
def timeslot_is_free(service, date, time):
print(service)
print(type(date))
d = datetime.strptime(date, '%Y-%m-%d')
print(type(d))
print(time)
t = datetime.strptime(time, '%H:%M:%S').time()
print(type(time))
print('\n\n')
try:
appointments = DBAppointment.objects.filter(date=d)
except Exception as e:
print(e)
print('found no appointments on that date')
appointments = None
if appointments is None:
return True
try:
for appointment in appointments:
"""
[(0, "Mineral Bath: 90 mins – $2.50 per minute"),
(1, "Mineral Bath: 60 mins – $2.50 per minute"),
(2, "Swedish Massage: 30 mins – $3.00 per minute"),
(3, "Swedish Massage: 60 mins – $3.00 per minute"),
(4, "Deep Tissue: 30 mins – $3.00 per minute"),
(5, "Deep Tissue: 60 mins – $3.00 per minute"),
(6, "Shiatsu: 30 mins – $3.00 per minute"),
(7, "Shiatsu: 60 mins – $3.00 per minute")]
"""
# is it the same service?
if service == appointment.service:
duration = [
90,
60,
30,
60,
30,
60,
30,
60
]
end_time = appointment.start_time + timedelta(minutes = duration[service])
if is_time_between(appointment.start_time, end_time, t):
continue
else:
return False
except TypeError as te:
print(te)
print('only one appointment in the system, try again')
return True
# Create your views here.
def index(request):
# services = DBService.objects.all()
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = ScheduleForm(request.POST)
# check whether it's valid:
if form.is_valid():
if timeslot_is_free(form['service'].value(), form['date'].value(), form['time'].value()):
DBAppointment.objects.create(service=form['service'].value(), guest=form['spa_number'].value(),
date=form['date'].value(), start_time=form['time'].value())
return HttpResponseRedirect('/calendar')
else:
return HttpResponseRedirect('/')
# if a GET (or any other method) we'll create a blank form
else:
form = ScheduleForm()
services = DBService.objects.all()
return render(request, 'scheduler/schedule.html', {'form': form, 'services': services})
def calendar(request):
appointments = DBAppointment.objects.order_by('date', 'start_time')
return render(request, 'scheduler/calendar.html', {'appointments': appointments})
| [
"[email protected]"
]
| |
ca2e0e976863587691cfa5559255799fc9795b57 | becf4b22082a64d7bf24a89e87d90e64d677031c | /python/single_number.py | ff6b4ce9cac327fc74eb022e9569f907ce8565ee | []
| no_license | imalihaider/Hacktoberfest-2K21 | ad2213ae30b87ab8a3159957e8c3541ca4a07cc4 | ae43b06d8bce6154a7044b983bc8e7e87b58da2f | refs/heads/main | 2023-09-05T07:22:53.956085 | 2021-10-21T07:07:28 | 2021-10-21T07:07:28 | 420,880,613 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | ''' Given an array of integers. All numbers occur twice except one number which occurs once, find the number
array = [1,2,3,1,2]
Required no = 2*(sum_of_array_without_duplicates) - (sum_of_array)
= 2*(1 + 2 + 3) - (1 + 2 + 3 + 1 + 2)
= 2*6 - 9
= 12 - 9
= 3 (required answer)
'''
numbers=list(map(int,input().split()))
sum_without_duplicates=sum(set(numbers))
total_sum=sum(numbers)
print(2*sum_without_duplicates-total_sum)
| [
"[email protected]"
]
| |
71a4a495fab5597619eb47e0a72699d37a5edef9 | 2caf42defe0906f05850a145623f8bb82cab367d | /sendEmailAuto.py | 2b5d9cd15a62ce8d5bf8f5c943f465185909328f | []
| no_license | reshinto/useless_apps | 8cfc77bf4f1794400300161a27a3d051394c25c2 | 6d99062529abac3801d8a9a55c4e68d30ccec163 | refs/heads/master | 2023-05-28T12:27:45.816230 | 2023-05-14T07:42:27 | 2023-05-14T07:42:27 | 150,591,844 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,360 | py | import os
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
# Use to upload attachment file
from email.mime.base import MIMEBase
from email import encoders
import getpass
# Login email
username = os.environ.get("my_email")
password = os.environ.get("my_email_password")
# Use the following if you want to type your password before sending
# password = getpass.getpass("Password: ")
# Multiple emails: ["[email protected]", "[email protected]"]
to_email = ["[email protected]"]
cc_email = []
bcc_email = ["[email protected]"]
subject = "MY SUBJECT"
msg = MIMEMultipart()
msg["From"] = username
msg["To"] = ', '.join(to_email)
msg["cc"] = ', '.join(cc_email)
#msg["bcc"] = ', '.join(bcc_email)
msg["Subject"] = subject
emails = to_email + cc_email + bcc_email
body = """\
Hi there
"""
msg.attach(MIMEText(body, "plain"))
# Upload file: remove """ """ to attach file
"""
filename = "test.py"
attachment = open(filename, "rb")
part = MIMEBase("application", "octet-stream")
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header("Content-Disposition", "attachment; filename= " + filename)
msg.attach(part)
"""
text = msg.as_string()
server = smtplib.SMTP("smtp.gmail.com",587)
server.starttls()
server.login(username, password)
server.sendmail(username,emails,text)
server.quit()
| [
"[email protected]"
]
| |
a34857b1aa94ef4f72a28754feaa3cc06474bdb9 | c7b0b67d3cd539fbf2cb8c9adc1a317d9d86d556 | /Whatsinit.py | 612119dbadb035da08b82fb66f36c962c80613ec | []
| no_license | priyank-py/GettingStarted | 9636bebb66b14c109fa2e8015ddd62033e0d1627 | df07a53f84e854a5762aef816de8eaa5aed1cb0b | refs/heads/master | 2020-05-23T22:37:11.949297 | 2019-12-18T10:52:44 | 2019-12-18T10:52:44 | 186,977,603 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | class Enemy:
def __init__(self,x):
self.energy = x
def current_Energy(self):
print(self.energy)
enemy766 = Enemy(5)
Boss_Enemy = Enemy(30)
enemy766.current_Energy()
Boss_Enemy.current_Energy() | [
"[email protected]"
]
| |
4f2c1e2d7006bde5e44311d534e7d4b03ec4d140 | dc8a337ea1d8a285577d33e5cfd4dbbe846ee1a0 | /src/main/scala/algorithms/SquareRootDecomposition.py | 1498f98cbdd0e034b612eafc6b08897437581df8 | []
| no_license | joestalker1/leetcode | 8a5cdda17abd33c3eef859732f75d7bec77a9d0e | ae392ddbc7eb56cb814b9e9715043c98a89a6314 | refs/heads/master | 2023-04-13T22:09:54.407864 | 2023-04-09T19:22:54 | 2023-04-09T19:22:54 | 131,803,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,132 | py | import math
class Solution:
def __init__(self, input):
self.vals = [0] * len(input)
n = len(input)
self.block_size = math.ceil(math.sqrt(n))
self.blocks = [0] * self.block_size
block_index = -1
for i in range(n):
self.vals[i] = input[i]
if i % self.block_size == 0:
block_index += 1
self.blocks[block_index] += input[i]
def update(self, i, value):
blk = i // self.block_size
self.blocks[blk] += value - self.vals[i]
self.vals[i] = value
def query(self, left, right):
cur_sum = 0
while left < right and left % self.block_size != 0 and left != 0:
cur_sum += self.vals[left]
left += 1
while left + self.block_size - 1 <= right:
cur_sum += self.blocks[left // self.block_size]
left += self.block_size
while left <= right:
cur_sum += self.vals[left]
left += 1
return cur_sum
sol = Solution([3, 2, 4, 5, 1, 1, 5, 3, 7])
print(sol.query(0, 2))
sol.update(1, 6)
print(sol.query(0, 2))
| [
"[email protected]"
]
| |
7cb9bef4e826d7138ea79550d55b8f18d82095a0 | 0220dbfb5cac8dea227e72c9d2f652d21bd6bc7b | /TopoShortcutBind.py | 9323c91dd86275b567f4a90ed9353106969f5c0f | []
| no_license | fornof/BlenderAddons | ca9ca6b691ae5a0c0558f93c54423cc7703e0402 | e1fe966501b8613b306a55f23ce141c38059ece1 | refs/heads/master | 2022-03-06T03:35:02.151917 | 2022-02-20T20:48:33 | 2022-02-20T20:48:33 | 19,930,422 | 20 | 8 | null | null | null | null | UTF-8 | Python | false | false | 3,307 | py | bl_info = {
"name": "ShortcutTool",
"author": "Robert Fornof",
"version": (0, 5),
"blender": (2, 70, 0),
"location": "Check the shortcuts for dynamic topology ",
"description": "CTRL +SPACE toggles subdiv edges and collapse edges, hold down Q in sculpt to do the same",
"warning": "",
"wiki_url": "",
"category": "Object"}
import bpy
from bpy.app.handlers import persistent
def Press(self,context):
c = 'COLLAPSE'
context.tool_settings.sculpt.detail_refine_method = c
print("Pressed")
def Release(self,context):
s = 'SUBDIVIDE'
c = 'COLLAPSE'
context.tool_settings.sculpt.detail_refine_method = s
print("Released")
def Toggle(self,context):
s = 'SUBDIVIDE'
c = 'COLLAPSE'
context = bpy.context.scene
currentSetting = context.tool_settings.sculpt.detail_refine_method
if currentSetting == s:
context.tool_settings.sculpt.detail_refine_method = c
elif currentSetting == c:
context.tool_settings.sculpt.detail_refine_method = s
else:
context.tool_settings.sculpt.detail_refine_method = s #default
class TopoShortcutOn(bpy.types.Operator):
"""This Operator Add a Object to Another with Boolean Operations"""
bl_idname = "object.collapseon"
bl_label = "Topo Subdiv Toggle"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
print("Toggled")
Release(self,context)
return {'FINISHED'}
class TopoShortcutToggle(bpy.types.Operator):
"""This Operator Add a Object to Another with Boolean Operations"""
bl_idname = "object.collapseon"
bl_label = "Topo Subdiv Toggle"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
print("Toggled")
Toggle(self,context)
return {'FINISHED'}
class TopoShortcutOff(bpy.types.Operator):
"""This Operator Add a Object to Another with Boolean Operations"""
bl_idname = "object.collapseon"
bl_label = "Topo Subdiv Toggle"
@classmethod
def poll(cls, context):
return context.active_object is not None
def execute(self, context):
print("Toggled")
Release(self,context)
return {'FINISHED'}
def setTool(input):
context.tool_settings.sculpt.detail_refine_method = input
#------------------- REGISTER ------------------------------
addon_keymaps = []
def register():
bpy.utils.register_class(TopoShortcutOff)
bpy.utils.register_class(TopoShortcutOn)
bpy.utils.register_class(TopoShortcutToggle)
km = bpy.context.window_manager.keyconfigs.active.keymaps['Sculpt']
kmi = km.keymap_items.new(TopoShortcutOff.bl_idname, 'Q', 'PRESS', ctrl = False)
kmi = km.keymap_items.new(TopoShortcutOn.bl_idname, 'Q', 'RELEASE', ctrl = False)
kmi = km.keymap_items.new(TopoShortcutToggle.bl_idname, 'SPACE', 'RELEASE', ctrl = True)
def unregister():
bpy.utils.unregister_class(TopoShortcutOff)
bpy.utils.unregister_class(TopoShortcutOn)
for km, kmi in addon_keymaps:
km.keymap_items.remove(kmi)
addon_keymaps.clear()
if __name__ == "__main__":
register()
| [
"[email protected]"
]
| |
c4b4e250e50b767a813c6fec52c7e8ccbe76717d | be5caab0d5497959b1317b1acd07d91bddb93df8 | /Fig6 | 9cfc6db442bc96e1d35ee1a385d01d11cbaf3278 | []
| no_license | wp436507/pm25_o3_paper | b60975885cd738d0d10d0d5407d2b7c56198031c | abd3354134678930b49d3ecabe660197cc3170c3 | refs/heads/main | 2023-07-12T23:44:59.523433 | 2021-08-15T07:47:59 | 2021-08-15T07:47:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,804 | #!/usr/bin/env python3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from itertools import compress
from scipy import stats
import matplotlib as mpl
mpl.rc('font',family='Arial')
# load MODIS burned area data
bas = np.load('baf_sums.npy')
# these arrays of PM2.5 and O3 gridded data for the western U.S. can be
# downloaded from Zenodo.org (see README for details)
# dimension 1: 19 latitudes from 31-49N
# dimension 2: 24 longitudes from 126-103W
# dimension 3: 7300 days from 1 OCT 2000 - 30 SEP 2020
pm_west = np.load('pm_west.npy') # average daily concentrations
oz_west = np.load('oz_west.npy') # MDA8 concentrations
pm_west = np.reshape(pm_west,(456,7300)) # vectorize lats & lons
oz_west = np.reshape(oz_west,(456,7300)) # vectorize lats & lons
year_start = np.arange(0,7300,365)
all_matches = np.empty([456,3660])
all_matches1 = np.empty([456,7300])
for j in range(456):
cell = pm_west[j,:]
if np.nanmax(cell) > 0:
seas_days = np.empty(365)
fulldays = []
fulldays1 = []
for k in range(20):
step = year_start[k]
sea = np.arange(step,step+365,1)
pm2 = pm_west[j,sea]
oz2 = oz_west[j,sea]
y3 = np.nanpercentile(pm2,90)
x3 = np.where(pm2 > y3)[0]
y4 = np.nanpercentile(oz2,90)
x4 = np.where(oz2 > y4)[0]
match2 = [x4[i] in x3 for i in range(len(x4))]
day_idx2 = list(compress(x4,match2))
for m in range(365):
if m not in day_idx2:
seas_days[m] = 0
else:
seas_days[m] = 1
seas_days1 = seas_days[182:]
fulldays.extend(list(seas_days1))
fulldays1.extend(list(seas_days))
all_matches[j,:] = fulldays
all_matches1[j,:] = fulldays1
else:
all_matches[j,:] = ['nan']
all_matches1[j,:] = ['nan']
match_trunc = all_matches1[:,822:]
all_mats = np.nansum(match_trunc,axis=0)
all_mats = (all_mats/375)*100
maxs = np.flipud(np.argsort(all_mats))
# this code extracts co-occurrence extent peaks from non-overlapping
# 15-day windows between 2003-2020
day_accum = []
for k in range(6478):
day = maxs[k]
window = list(np.arange(day-7,day+8,1))
i, j = window[0], window[-1]
res = any(ele >= i and ele <= j for ele in day_accum)
if res == 0:
day_accum.append(day)
day_accum = np.array(day_accum)
dates = pd.date_range(start='1/1/2003', end='9/30/2020')
dates = dates[~((dates.month == 2) & (dates.day == 29))]
df = pd.DataFrame({'idx':np.arange(0,6478,1),'val':all_mats,'date':dates})
df = df[df['idx'].isin(day_accum)].reset_index()
df['day'] = df.date.dt.date
df1 = df.sort_values(by=['val'], ascending=False).reset_index()
df1['month'] = df1.date.dt.month
df1 = df1[~(df1.month == 10)]
df1 = df1[~(df1.month == 11)]
df1 = df1[~(df1.month == 12)]
df1 = df1[~(df1.month == 1)]
df1 = df1[~(df1.month == 2)]
df1 = df1[~(df1.month == 3)]
df1 = df1[~(df1.month == 4)]
df1 = df1[~(df1.month == 5)]
df1 = df1[~(df1.month == 6)]
df1.drop(columns='level_0',inplace=True)
df1 = df1.reset_index()
# load standardized anomalies of Tmax
tmax_z = np.load('tmax_z_20032020.npy')
tvec = np.reshape(tmax_z,(6336,6570))
tvec = tvec[:,:6478]
# this code extracts the daily percentage of the study area experiencing
# positive Tmax anomalies exceeding 1 standard deviation
counts = []
for k in range(6478):
day = tvec[:,k]
count = np.size(np.where(day >= 1)) # '1' is for one standard deviation
counts.append((count/6336)*100)
counts = np.array(counts)
lag_length = 7
windows = np.empty([21,lag_length+1]) # 21 independent widespread co-occurrence extent peaks
windows_tmax = np.empty([21,lag_length+1])
vals = []
for k in range(21):
row = list(df1.values[k,:])
idx = row[2]
vals.append(np.round(row[3],1))
windows[k,:] = bas[idx-lag_length:idx+1]
windows_tmax[k,:] = counts[idx-lag_length:idx+1]
avgs = np.nanmax(windows,axis=1)
max_t = np.nanmax(windows_tmax,axis=1)
stats.pearsonr(avgs,vals) # calculate Pearson correlation between burned area and co-occurrence extent
stats.pearsonr(max_t,vals) # calculate Pearson correlation between +Tmax extent and co-occurrence extent
# plot A
fig = plt.figure(figsize=(8,6))
ax = sns.regplot(avgs, vals, ci=None, scatter_kws={'s':75}, color='tab:brown')
plt.scatter(avgs[:5],vals[:5],color='y') # color the 5 most widespread days differently
ax.set_xlabel('Max daily burned area (Sq. Km.)',size=16)
ax.set_ylabel('Co-occurrence extent - % of western US',size=16)
ax.set_yticks(np.arange(25,75,5))
ax.set_xticks(np.arange(0,1201,100))
ax.tick_params(labelsize=13)
plt.title('(A) Burned area vs. co-occurrence spatial extent', size=17)
plt.grid(linewidth=0.5)
#ax.axhline(y=25, linestyle='--', color='k')
l = plt.legend(['$\it{r}$ = 0.66, $\it{p}$ = 0.001)'],
loc='upper left', fontsize=15)
for text in l.get_texts():
text.set_color('tab:brown')
plt.savefig('fig6a.png',dpi=600)
# plot B
fig = plt.figure(figsize=(8,6))
ax = sns.regplot(max_t, vals, ci=None, scatter_kws={'s':75}, color='orangered')
plt.scatter(max_t[:5],vals[:5],color='y') # color the 5 most widespread days differently
ax.set_xlabel('% of western US with +Tmax anomalies >1$\sigma$',size=16)
ax.set_ylabel('Co-occurrence extent - % of western US',size=16)
ax.set_yticks(np.arange(25,75,5))
ax.set_xticks(np.arange(0,101,5))
ax.tick_params(labelsize=13)
plt.title('(B) Tmax anomalies vs. co-occurrence spatial extent', size=17)
plt.grid(linewidth=0.5)
l = plt.legend(['$\it{r}$ = 0.49, $\it{p}$ = 0.02)'],
loc='upper left', fontsize=15)
for text in l.get_texts():
text.set_color('orangered')
plt.savefig('fig6b.png',dpi=600)
| [
"[email protected]"
]
| ||
add5de3393cb2a28c1c2499cc0c0db892962f913 | 00643559c0f925f0740e47ae488456b2102141fe | /Algorithm/Parameters/ES.py | c9c88613b976b557353898b5ff428806ed6aad34 | []
| no_license | yutarou-1204/LabTask | 9741aedf8d6772bbe341ec99c5e8cd572f7db72c | 4f1611f1134871c48e8e85f7a650231147cab0f4 | refs/heads/main | 2023-03-07T03:09:02.448594 | 2021-02-18T10:37:04 | 2021-02-18T10:37:04 | 339,763,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | u_lim = 1
l_lim = -1
u_mulim = u_lim/10
l_mulim = l_lim/10
children = 10
parent = 5
evalmax = 1000
G = int(evalmax/children) | [
"[email protected]"
]
| |
617dbdcc4efae43da961d888f341f5f78d04f314 | 94900642790ec9dd62be461ae45cab0a71fb8c91 | /tfrrs/tfrrs/middlewares.py | 0cfd583c90e6f355a130e83e56fc218c7f3e2dc2 | []
| no_license | Megts/progression-visualizer | 0d1f9867ecc2d043a65f468f14660180a5eb00a3 | 9793515285c42a4903141993333c934f91595c3f | refs/heads/main | 2023-04-14T02:27:45.963735 | 2021-04-27T00:43:17 | 2021-04-27T00:43:17 | 332,961,511 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,646 | py | # Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class TfrrsSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class TfrrsDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"[email protected]"
]
| |
c2e00a821d6e92bdfbb98ecf090636164be3ef88 | cbde10e19b4ed995f1649d3c05cf58245c75ac2c | /map.py | e91ab41a0e5faa0a03f8c5f9bc07320989d36074 | []
| no_license | jkereako/algorithms | 06b07d6f477f427ea9b9e89d95c4f00080233d71 | a2e57510517e903d79887a7077cdfd1f6ff418a1 | refs/heads/master | 2021-01-14T10:30:44.738235 | 2017-07-19T14:38:29 | 2017-07-19T14:38:29 | 45,010,915 | 2 | 0 | null | 2015-10-27T02:13:11 | 2015-10-27T02:13:11 | null | UTF-8 | Python | false | false | 1,563 | py |
class HashTable(object):
def __init__(self, size):
self.size = size
self.slots = [[] for x in range(self.size)]
self.data = [None for x in range(self.size)]
def put(self, key, data):
hash_val = self.hash_function(key, self.size)
if self.slots[hash_val] is None:
self.slots[hash_val] = key
self.data[hash_val] = data
else:
next_slot = self.rehash(hash_val)
while not self.slots[next_slot] and self.slots[next_slot] != key:
next_slot = self.rehash(next_slot)
if self.slots[next_slot] is None:
self.slots[next_slot] = key
self.data[next_slot] = data
else:
self.data[next_slot] = data # replace the current value
def get(self, key):
start = self.hash_function(key, self.size)
data = None
stop = False
found = False
position = start
while self.slots[position] != None and not found and not stop:
if self.slots[position] == key:
found = True
data = self.data[position]
else:
position = self.rehash(position)
if position is start:
stop = True
return data
def hash_function(self, key):
return key % self.size
def rehash(self, old_hash):
return (old_hash+1) % self.size
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, data):
self.put(key, data)
| [
"[email protected]"
]
| |
b86825732337e5053f01756b9ae092a2c3d1b124 | b1e85e5d848fffa13a6055c78ada579317e7ad6a | /tests/test_results.py | 66e7c1f140df23b495c20f50b2919274fae59e29 | []
| no_license | yjs1210/cardcounting | 4560fe440a77388e5856c5382eefd10eca21eb92 | b2a4d91db02c04c127fd7ed7373e7cff6a0c7b21 | refs/heads/master | 2023-02-19T20:48:23.775204 | 2021-01-21T07:23:24 | 2021-01-21T07:23:24 | 328,041,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,801 | py | import pytest
import os
import sys
ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..")
sys.path.append(os.path.join(ROOT, "src"))
from simulator import hard_policy, soft_policy, split_policy, betting_policy
from blackjack import (
Actions,
Cards,
Deck,
Player,
resolve_player_action,
Hand,
PlayerResultTypes,
resolve_dealer_action,
DealerResultTypes,
play,
)
import numpy as np
def test_player_final_hands():
# lots of splits, doesn't split more than 4
deck = Deck()
player = Player(
bankroll=np.inf,
hard_policy=hard_policy,
soft_policy=soft_policy,
split_policy=split_policy,
betting_policy=betting_policy,
)
player.increment_num_hands()
deck.set_cards(
[
Cards.TEN,
Cards.QUEEN,
Cards.KING,
Cards.NINE,
Cards.EIGHT,
Cards.ACE,
Cards.TEN,
Cards.FOUR,
Cards.EIGHT,
Cards.NINE,
Cards.TWO,
Cards.TWO,
Cards.TWO,
Cards.TWO,
Cards.TWO,
Cards.TWO,
Cards.TWO,
]
)
output = resolve_player_action(
Hand([Cards.TWO, Cards.TWO]), Cards.SEVEN, player, deck
)
assert output == [
(PlayerResultTypes.LIVE, 21),
(PlayerResultTypes.DOUBLE, 14),
(PlayerResultTypes.LIVE, 21),
(PlayerResultTypes.DOUBLE, 21),
]
player.next_round()
player.increment_num_hands()
deck.set_cards(
[
Cards.FIVE,
Cards.FIVE,
Cards.TWO,
Cards.KING,
Cards.ACE,
Cards.ACE,
Cards.ACE,
Cards.ACE,
]
)
output = resolve_player_action(
Hand([Cards.ACE, Cards.ACE]), Cards.SEVEN, player, deck
)
assert output == [
(PlayerResultTypes.LIVE, 12),
(PlayerResultTypes.LIVE, 12),
(PlayerResultTypes.LIVE, 21),
(PlayerResultTypes.LIVE, 13),
]
player.next_round()
player.increment_num_hands()
output = resolve_player_action(
Hand([Cards.ACE, Cards.JACK]), Cards.SEVEN, player, deck
)
assert output == [(PlayerResultTypes.BLACKJACK, 21)]
player.next_round()
player.increment_num_hands()
deck.set_cards(
[
Cards.FIVE,
Cards.FIVE,
Cards.TWO,
Cards.KING,
Cards.ACE,
Cards.ACE,
Cards.ACE,
Cards.ACE,
]
)
output = resolve_player_action(
Hand([Cards.ACE, Cards.SIX]), Cards.SIX, player, deck
)
assert output == [(PlayerResultTypes.DOUBLE, 18)]
player.next_round()
player.increment_num_hands()
output = resolve_player_action(
Hand([Cards.JACK, Cards.SIX]), Cards.ACE, player, deck
)
assert output == [(PlayerResultTypes.SURRENDER, 16)]
player.next_round()
player.increment_num_hands()
output = resolve_player_action(
Hand([Cards.JACK, Cards.SEVEN]), Cards.ACE, player, deck
)
assert output == [(PlayerResultTypes.SURRENDER, 17)]
player.next_round()
player.increment_num_hands()
deck.set_cards([Cards.TEN, Cards.THREE])
output = resolve_player_action(
Hand([Cards.ACE, Cards.TWO]), Cards.TWO, player, deck
)
assert output == [(PlayerResultTypes.LIVE, 16)]
player.next_round()
player.increment_num_hands()
deck.set_cards([Cards.SIX, Cards.TEN, Cards.THREE])
output = resolve_player_action(
Hand([Cards.ACE, Cards.TWO]), Cards.SEVEN, player, deck
)
assert output == [(PlayerResultTypes.BUST, 22)]
player.next_round()
player.increment_num_hands()
deck.set_cards([Cards.THREE])
output = resolve_player_action(
Hand([Cards.ACE, Cards.SEVEN]), Cards.TWO, player, deck
)
assert output == [(PlayerResultTypes.DOUBLE, 21)]
player.next_round()
player.increment_num_hands()
deck.set_cards([Cards.FIVE])
output = resolve_player_action(
Hand([Cards.ACE, Cards.SEVEN]), Cards.TWO, player, deck
)
assert output == [(PlayerResultTypes.DOUBLE, 13)]
player.next_round()
player.increment_num_hands()
deck.set_cards([Cards.FIVE])
output = resolve_player_action(
Hand([Cards.ACE, Cards.EIGHT]), Cards.TWO, player, deck
)
assert output == [(PlayerResultTypes.LIVE, 19)]
player.next_round()
player.increment_num_hands()
deck.set_cards([Cards.FIVE])
output = resolve_player_action(
Hand([Cards.NINE, Cards.NINE]), Cards.SEVEN, player, deck
)
assert output == [(PlayerResultTypes.LIVE, 18)]
player.next_round()
player.increment_num_hands()
deck.set_cards([Cards.FIVE])
output = resolve_player_action(
Hand([Cards.EIGHT, Cards.EIGHT]), Cards.ACE, player, deck
)
assert output == [(PlayerResultTypes.SURRENDER, 16)]
player.next_round()
player.increment_num_hands()
deck.set_cards([Cards.FIVE])
output = resolve_player_action(
Hand([Cards.TEN, Cards.TEN]), Cards.ACE, player, deck
)
assert output == [(PlayerResultTypes.LIVE, 20)]
def test_dealer_final_hands():
deck = Deck()
deck.set_cards(
[
Cards.SIX,
Cards.FOUR,
Cards.ACE,
Cards.THREE,
Cards.TEN,
Cards.TWO,
Cards.TWO,
Cards.FOUR,
Cards.TWO,
Cards.TWO,
Cards.THREE,
Cards.TEN,
Cards.FIVE,
Cards.TEN,
]
)
assert resolve_dealer_action(Hand([Cards.ACE, Cards.JACK]), deck) == (
DealerResultTypes.BLACKJACK,
21,
)
assert resolve_dealer_action(Hand([Cards.ACE, Cards.SIX]), deck) == (
DealerResultTypes.LIVE,
17,
)
assert resolve_dealer_action(Hand([Cards.TEN, Cards.SIX]), deck) == (
DealerResultTypes.LIVE,
21,
)
assert resolve_dealer_action(Hand([Cards.TEN, Cards.TWO]), deck) == (
DealerResultTypes.BUST,
22,
)
assert resolve_dealer_action(Hand([Cards.FIVE, Cards.THREE]), deck) == (
DealerResultTypes.LIVE,
19,
)
assert resolve_dealer_action(Hand([Cards.ACE, Cards.ACE]), deck) == (
DealerResultTypes.LIVE,
19,
)
assert resolve_dealer_action(Hand([Cards.TWO, Cards.FOUR]), deck) == (
DealerResultTypes.LIVE,
21,
)
assert resolve_dealer_action(Hand([Cards.TEN, Cards.SIX]), deck) == (
DealerResultTypes.BUST,
22,
)
def test_play():
player = Player(
bankroll=100,
hard_policy=hard_policy,
soft_policy=soft_policy,
split_policy=split_policy,
betting_policy=betting_policy,
)
deck = Deck(99)
# blackjacks
hands_1 = [
Cards.ACE,
Cards.JACK,
Cards.TWO,
Cards.FOUR,
] # player black jack dealer no blackjack
hands_2 = [
Cards.ACE,
Cards.JACK,
Cards.ACE,
Cards.JACK,
] # player black jack dealer blackjack
hands_3 = [
Cards.FOUR,
Cards.TWO,
Cards.ACE,
Cards.JACK,
] # player no black jack dealer blackjack
# hits
hands_4 = [
Cards.TEN,
Cards.EIGHT,
Cards.TWO,
Cards.TEN,
Cards.TEN,
] # Dealer shows T, has another T, Player hits with 10 to get to 20, push
hands_5 = [
Cards.TEN,
Cards.TEN,
Cards.EIGHT,
Cards.TWO,
Cards.TWO,
Cards.TEN,
] # Dealer shows T, has a Two, Player hits with 10 to get to 20, dealer busts to 22
hands_6 = [
Cards.SEVEN,
Cards.EIGHT,
Cards.TWO,
Cards.TEN,
Cards.TEN,
] # Dealer shows T, has a another T, Player hits with 10 to get to 17, stands and lose
hands_7 = [Cards.SEVEN, Cards.EIGHT, Cards.TEN, Cards.TEN] # Surrender 15
hands_8 = [
Cards.TEN,
Cards.THREE,
Cards.TEN,
Cards.TWO,
Cards.JACK,
Cards.TWO,
] # Dealer Shows two, player has 12, player hits and stands at 15, dealer hits and busts at 22
hands_9 = [
Cards.FIVE,
Cards.THREE,
Cards.TEN,
Cards.TWO,
Cards.JACK,
Cards.TWO,
] # Dealer Shows two, player has 12, player hits and stands at 15, dealer hits and wins busts at 17
# player stands
hands_10 = [
Cards.FIVE,
Cards.THREE,
Cards.TEN,
Cards.THREE,
Cards.JACK,
Cards.TWO,
] # Dealer Shows two, player has 13 and stands, dealer wins with 20
hands_11 = [
Cards.TEN,
Cards.SEVEN,
Cards.JACK,
Cards.JACK,
] # Player has 17 stands and loses
hands_12 = [
Cards.TEN,
Cards.TEN,
Cards.JACK,
Cards.NINE,
] # Player has 20 stands and wins
# regular surredners
hands_13 = [
Cards.FIVE,
Cards.TEN,
Cards.JACK,
Cards.TEN,
] # Player has 15 surrenders to 10 and loses
hands_14 = [
Cards.FIVE,
Cards.TEN,
Cards.NINE,
Cards.ACE,
] # Player has 15 surrenders to 11 and loses
hands_15 = [
Cards.SIX,
Cards.TEN,
Cards.JACK,
Cards.NINE,
] # Player has 16 surrenders to 9 and loses
hands_16 = [
Cards.SIX,
Cards.TEN,
Cards.JACK,
Cards.KING,
] # Player has 16 surrenders to 10 and loses
hands_17 = [
Cards.SIX,
Cards.TEN,
Cards.SEVEN,
Cards.ACE,
] # Player has 16 surrenders to A and loses
hands_18 = [
Cards.SEVEN,
Cards.TEN,
Cards.SEVEN,
Cards.ACE,
] # Player has 17 surrenders to A and loses
hands_19 = [
Cards.FIVE,
Cards.TEN,
Cards.JACK,
Cards.ACE,
] # Player has opportunity to surrender but blackjack overrules and dealer wins
# regular splits, max 4
hands_20 = [
Cards.SEVEN,
Cards.TEN,
Cards.JACK,
Cards.TEN,
Cards.EIGHT,
Cards.SEVEN,
Cards.ACE,
Cards.EIGHT,
Cards.TWO,
Cards.TWO,
Cards.TWO,
Cards.TWO,
Cards.TEN,
Cards.SEVEN,
] # split twos 4 times. DOuble the first, Win with 21, Second to 9 does not double. Hits to 17 to push, Third bust at 22, Fourth stays at 19 to win.
hands_21 = [
Cards.TEN,
Cards.TEN,
Cards.FIVE,
Cards.ACE,
Cards.ACE,
Cards.SEVEN,
Cards.FOUR,
Cards.FOUR,
Cards.FOUR,
Cards.TWO,
Cards.SIX,
] # Split 4 three times, doubles to 12 to lose, Doubles to 20 to win, Stays at 14 to lose
hands_22 = [
Cards.EIGHT,
Cards.THREE,
Cards.TWO,
Cards.TEN,
Cards.ACE,
Cards.ACE,
Cards.ACE,
Cards.ACE,
Cards.NINE,
Cards.ACE,
] # Split Aces 4 times, Win first, lose all three others
# soft hands
hands_23 = [
Cards.SIX,
Cards.THREE,
Cards.TWO,
Cards.ACE,
Cards.TEN,
Cards.SIX,
] # Doubles Ace and wins to a dealer bust
hands_24 = [
Cards.FIVE,
Cards.TEN,
Cards.TWO,
Cards.ACE,
Cards.TEN,
Cards.SIX,
] # Doubles Ace to 13 and and loses to dealer 21
hands_25 = [
Cards.ACE,
Cards.SEVEN,
Cards.ACE,
Cards.SEVEN,
Cards.TEN,
] # Hits soft 18 vs ten and wins with 18 to 17
hands_26 = [
Cards.ACE,
Cards.SEVEN,
Cards.ACE,
Cards.TEN,
Cards.TEN,
] # Hits soft 18 vs ten and loses with 18 to 20
hands_27 = [
Cards.FOUR,
Cards.SIX,
Cards.ACE,
Cards.TEN,
Cards.TEN,
] # Hits soft 17 and stays at 21 and wins
hands_28 = [
Cards.NINE,
Cards.NINE,
Cards.SIX,
Cards.ACE,
Cards.TEN,
Cards.TEN,
] # Hits soft 17 and busts
hands_29 = [
Cards.SEVEN,
Cards.THREE,
Cards.FOUR,
Cards.TWO,
Cards.ACE,
Cards.TEN,
Cards.THREE,
] # Hits soft 13, gets to soft 17 vs 3, can't double and push at 20
hands_30 = [
Cards.TEN,
Cards.FIVE,
Cards.SEVEN,
Cards.ACE,
Cards.TEN,
Cards.TWO,
] # Doubles 18 vs 2 wins to bust
# hits many cards into doubles, doesn't double and wins
deck.set_cards(
deck.get_cards()
+ hands_30
+ hands_29
+ hands_28
+ hands_27
+ hands_26
+ hands_25
+ hands_24
+ hands_23
+ hands_22
+ hands_21
+ hands_20
+ hands_19
+ hands_18
+ hands_17
+ hands_16
+ hands_15
+ hands_14
+ hands_13
+ hands_12
+ hands_11
+ hands_10
+ hands_9
+ hands_8
+ hands_7
+ hands_6
+ hands_5
+ hands_4
+ hands_3
+ hands_2
+ hands_1
)
# test blackjacks
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 100 + (1 * 1.5)
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 100 + (1 * 1.5)
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 100 + (0.5)
# test regular hits
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 100 + (0.5)
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 100 + (1 * 1.5)
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 100 + (1 * 0.5)
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 100
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 100 + (1)
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 100
# test regular stands
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 99
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 98
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 99
# test regular surrenders
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 98.5
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 98
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 97.5
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 97
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 96.5
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 96
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 95
# test regular splits
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == (95 + 2 + 0 - 1 + 1)
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == (97 - 2 + 2 - 1)
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == (96 + 1 - 3)
# test soft hands
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == (94 + 2)
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == (96 - 2)
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == (94 + 1)
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == (95 - 1)
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == (94 + 1)
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == (95 - 1)
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == (94)
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == (94 + 2)
def test_play_set2():
player = Player(
bankroll=100,
hard_policy=hard_policy,
soft_policy=soft_policy,
split_policy=split_policy,
betting_policy=betting_policy,
)
deck = Deck(99)
hands_1 = [Cards.TEN, Cards.FIVE, Cards.SEVEN, Cards.ACE, Cards.TEN, Cards.TWO]
# test stand with dupl;icate hands
hands_1 = [Cards.NINE, Cards.NINE, Cards.TEN, Cards.SEVEN]
hands_2 = [Cards.NINE, Cards.NINE, Cards.NINE, Cards.ACE]
hands_3 = [Cards.NINE, Cards.NINE, Cards.TEN, Cards.QUEEN]
# test hits with duplicate hands
hands_4 = [Cards.TEN, Cards.SEVEN, Cards.TWO, Cards.TWO, Cards.TEN, Cards.QUEEN]
hands_5 = [Cards.TEN, Cards.FOUR, Cards.FOUR, Cards.TEN, Cards.QUEEN]
# test surredern with 8,8
hands_6 = [Cards.EIGHT, Cards.EIGHT, Cards.NINE, Cards.ACE]
# test softs hits into soft 16-18 but does not double
hands_7 = [
Cards.SEVEN,
Cards.FIVE,
Cards.TWO,
Cards.ACE,
Cards.NINE,
Cards.TWO,
] # becomes, soft 18, stands since you can't double with 3 cards, push
hands_8 = [
Cards.EIGHT,
Cards.SIX,
Cards.FOUR,
Cards.TWO,
Cards.ACE,
Cards.NINE,
Cards.THREE,
] # becomes, hard 13, stands and loses
hands_9 = [
Cards.NINE,
Cards.NINE,
Cards.FIVE,
Cards.FOUR,
Cards.TWO,
Cards.ACE,
Cards.NINE,
Cards.THREE,
] # becomes, hard 12 hits, makes 21 push
# test bunch of weird surredner hit surredner stand,... etc rules
hands_10 = [
Cards.TEN,
Cards.EIGHT,
Cards.FOUR,
Cards.FOUR,
Cards.NINE,
Cards.ACE,
] # does not split 44, hits into 16, but doesn't surrender, busts.
hands_11 = [Cards.TEN, Cards.SEVEN, Cards.NINE, Cards.ACE] # surrender 17
hands_12 = [
Cards.TEN,
Cards.SEVEN,
Cards.TEN,
Cards.ACE,
] # dealer BJ can't surrender
hands_13 = [
Cards.FIVE,
Cards.TEN,
Cards.TWO,
Cards.NINE,
Cards.ACE,
] # hits into surrender can't surrender, stands
deck.set_cards(
deck.get_cards()
+ hands_13
+ hands_12
+ hands_11
+ hands_10
+ hands_9
+ hands_8
+ hands_7
+ hands_6
+ hands_5
+ hands_4
+ hands_3
+ hands_2
+ hands_1
)
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 100 + (1)
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 101 - 1
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 100 - 1
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 99 + 1
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 100 - 1
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 99 - 0.5
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 98.5
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 98.5 - 1
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 97.5
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 97.5 - 1
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 96.5 - 0.5
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 96 - 1
plays = play(player, deck=deck, wager=1)
player.next_round()
assert player.get_bankroll() == 95 - 1
print("yo?")
| [
"[email protected]"
]
| |
3e056accc07c2b9d8e4f074fc52a8ccc0a071755 | fd01e75f6e5cb4544484bfba48cb307f0e712ab0 | /LC/146_반복제어문3_형성평가7.py | e8d259ca02d84750622cc7ee9c13665704da2b1a | []
| no_license | hybae430/Jungol | 2b30aae434f6cc54f2b03e5aa9e8a5d98dc7f170 | a11dae5e6043f37fc9802ba317159f528f92b9f7 | refs/heads/master | 2023-03-24T09:57:53.905035 | 2021-03-20T09:16:48 | 2021-03-20T09:16:48 | 328,375,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | n = int(input())
num, alpha = 0, 0
for i in range(n):
for x in range(alpha, alpha + n):
print(chr(65 + x), end=" ")
for x in range(num, num + alpha):
print(x, end=" ")
alpha += n
num += 1
print() | [
"[email protected]"
]
| |
31fd48687601609d446b72e929f41c3b200399a2 | 8ffc7ed2767142b67a2c16a6d664ebbe4c8fa901 | /ersdtggserttye.py | 1f11bb3d79623fbb574647ccc9142e2add460ac7 | []
| no_license | RexDigitail/codeEge | cad9dbce67b96cf4642a4eadd90cf1a01c45be9a | 77cb0891ea6dadf3c76194e0d6789699932e1d56 | refs/heads/main | 2023-08-21T07:23:50.588496 | 2021-10-16T09:12:32 | 2021-10-16T09:12:32 | 412,766,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | k=0
l=0
s=0
m=0
for i in range(120115000,120200000+1):
for j in range(1,int(i**0.5)+1):
if int(i**0.5)**2==i:
m=1
if (i%j)==0:
l+=2
if l-m>=k:
k=l
s=i
l=0
m=0
print(k,s)
| [
"[email protected]"
]
| |
6448a838678049b588dad784fe27300469ce5752 | bccfce9c293481a85569da8559eadeb0ed3ae062 | /GO/src/alphaBetaThreaded.py | 0edd50ae5a225e543476d73374b6be694ff23d42 | [
"MIT"
]
| permissive | aliissaoui/Developping-Go-Agents-in-Python | 55d1ea20f7df9729fc6c796b9494cd22bc1d52ad | 7c06374b96f94103eddf9ad3cdfc3ca0086badbe | refs/heads/main | 2023-05-26T00:39:25.289877 | 2021-05-08T04:55:01 | 2021-05-08T04:55:01 | 315,443,906 | 0 | 0 | null | 2021-05-08T04:55:01 | 2020-11-23T21:22:02 | HTML | UTF-8 | Python | false | false | 3,855 | py | import time, os
import eval
import alphaBeta
import operator
import concurrent.futures
from copy import deepcopy as cp
import multiprocessing, threading
class AlphaBetaThreaded():
def __init__(self, color):
self._mycolor = color
self._maxscore = 100000
self._processes = []
self._eval = eval.Eval(color)
def keywithmaxval(self, d):
v=list(d.values())
k=list(d.keys())
return k[v.index(max(v))]
########################### THREADED ############################@@
def AlphaBetaCoupThreaded(self, b, depth):
""" First level of MinMax search with Alpha Beta Pruning"""
if b.is_game_over() or depth == 0:
return None
moves = b.generate_legal_moves()
n = len(moves)
manager = multiprocessing.Manager()
results = manager.dict()
start_time = time.time()
processes = [None] * n
boards = [None] * n
self._nodes = 0
v, coup = None, None
for i in range(n):
b.push(moves[i])
d = cp(b)
processes[i] = multiprocessing.Process(target=self.AlphaBetaThreaded, args=(d, depth - 1, -self._maxscore, self._maxscore, results, i, depth-1))
b.pop()
self._processes = processes
[process.start() for process in processes]
[process.join() for process in processes]
results = dict(results)
i = self.keywithmaxval(results)
coup, v = moves[int(i)], results[i]
end_time = time.time()
print("processes time=", end_time - start_time)
print("processes hit: ", coup, ", of value: ", v)
return (coup, v)
def BetaAlphaThreaded(self, b, depth, alpha, beta, results, i, depth_init):
self._nodes += 1
""" MaxMin with Alpha beta pruning"""
if b.is_game_over():
res = b.result()
if res == "1-0":
r = - ((-1)**self._mycolor) * self._maxscore
elif res == "0-1":
r = ((-1)**self._mycolor) * self._maxscore
else:
r = 0
return r
if depth == 0:
e = self._eval.evaluate(b)
return e
v = None
for m in b.generate_legal_moves():
b.push(m)
ret = self.AlphaBetaThreaded(b, depth - 1, alpha, beta, results, i, depth_init)
b.pop()
if v is None or ret > v:
v = ret
if alpha < v:
alpha = v
if alpha >= beta:
return beta
return alpha
def AlphaBetaThreaded(self, b, depth, alpha, beta, results, i, depth_init):
self._nodes += 1
""" MinMax with Alpha beta pruning"""
if b.is_game_over():
res = b.result()
if res == "1-0":
r = - ((-1)**self._mycolor) * self._maxscore
elif res == "0-1":
r = ((-1)**self._mycolor) * self._maxscore
else:
r = 0
if (depth == depth_init):
results[i] = r
return r
if depth == 0:
e = self._eval.evaluate(b)
if (depth == depth_init):
results[i] = e
return e
v = None
for move in b.generate_legal_moves():
b.push(move)
ret = self.BetaAlphaThreaded(b, depth-1, alpha, beta, results, i, depth_init)
b.pop()
if v is None or ret < v:
v = ret
if beta > v:
beta = v
if alpha >= beta:
if (depth == depth_init):
results[str(i)] = alpha
return alpha
if (depth == depth_init):
results[str(i)] = beta
return beta
| [
"[email protected]"
]
| |
170f6cc0fae64722db202a4f1c45c41de525235a | bda0259b6400e2c394f9d1a85e1b648a54fc88dd | /shifthappens/tasks/imagenet_drawing/imagenet_drawing.py | b9a69b24312492ac021bc8c14a67cfa1309e5cc9 | [
"CC-BY-4.0",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
]
| permissive | shift-happens-benchmark/icml-2022 | f3bb118e73c06e1fa45cbba8590a7463c3cf7862 | 74b3cda69a2b90bcefed3848faca41a92ad0c9bf | refs/heads/main | 2023-05-23T16:42:44.145930 | 2023-04-13T14:48:37 | 2023-04-13T14:48:37 | 422,538,065 | 39 | 24 | Apache-2.0 | 2023-04-15T18:56:06 | 2021-10-29T10:46:18 | Python | UTF-8 | Python | false | false | 3,196 | py | """Shift Happens task: ImageNet-Drawing"""
import dataclasses
import os
import numpy as np
import torchvision.datasets as tv_datasets
import torchvision.transforms as tv_transforms
import shifthappens.data.base as sh_data
import shifthappens.data.torch as sh_data_torch
import shifthappens.utils as sh_utils
from shifthappens import benchmark as sh_benchmark
from shifthappens.data.base import DataLoader
from shifthappens.models import base as sh_models
from shifthappens.models.base import PredictionTargets
from shifthappens.tasks.base import Task
from shifthappens.tasks.metrics import Metric
from shifthappens.tasks.task_result import TaskResult
@sh_benchmark.register_task(
name="ImageNet-Drawing", relative_data_folder="imagenet_drawing", standalone=True
)
@dataclasses.dataclass
class ImageNetDrawing(Task):
"""ImageNet-Drawing Dataset.
This task evaluates a model on ImageNet-Drawing. This
dataset was formed by converting the images in the
ImageNet validation set into colored pencil drawings
using simple image processing. See the readme file for
more information about how the dataset was constructed.
The goal of this evaluation task is to measure the
model's robustness to distribution shifts.
"""
resources = [
(
"imagenet-drawing.tar.gz",
"https://zenodo.org/record/6801109/files/imagenet-drawing.tar.gz?download=1",
"3fb1206b6e3190d0159e5dc01c0f97ab",
)
]
def setup(self):
"""Setup ImageNet-Drawing"""
dataset_folder = os.path.join(self.data_root, "imagenet-drawing")
if not os.path.exists(dataset_folder):
# download data
for file_name, url, md5 in self.resources:
sh_utils.download_and_extract_archive(
url, dataset_folder, md5, file_name
)
test_transform = tv_transforms.Compose(
[
tv_transforms.ToTensor(),
tv_transforms.Lambda(lambda x: x.permute(1, 2, 0)),
]
)
self.ch_dataset = tv_datasets.ImageFolder(
root=dataset_folder, transform=test_transform
)
self.images_only_dataset = sh_data_torch.IndexedTorchDataset(
sh_data_torch.ImagesOnlyTorchDataset(self.ch_dataset)
)
def _prepare_dataloader(self) -> DataLoader:
"""Builds the DatasetLoader object."""
return sh_data.DataLoader(self.images_only_dataset, max_batch_size=None)
def _evaluate(self, model: sh_models.Model) -> TaskResult:
"""Evaluates the model on the ImageNet-Drawing dataset."""
dataloader = self._prepare_dataloader()
all_predicted_labels_list = []
for predictions in model.predict(
dataloader, PredictionTargets(class_labels=True)
):
all_predicted_labels_list.append(predictions.class_labels)
all_predicted_labels = np.concatenate(all_predicted_labels_list, 0)
accuracy = (all_predicted_labels == np.array(self.ch_dataset.targets)).mean()
return TaskResult(
accuracy=accuracy, summary_metrics={Metric.Robustness: "accuracy"}
)
| [
"[email protected]"
]
| |
6cd826a9cb3114468f52d75933e8022deea099e1 | 6a659f809005d928147b1d45ae0e9d33cfdb9d91 | /chapter9/example9.13.py | a8bc6597ef8b4883dcb2e5dc9d950e48718e84be | []
| no_license | uosphys/comphy2019 | c347cc2cef67ad82360dffa18b6414cb8aa57a64 | 9a9df9185ee5b4178b7dbc465386aab54a4417fa | refs/heads/master | 2022-02-17T05:51:53.853591 | 2019-09-11T02:46:38 | 2019-09-11T02:46:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | #!/usr/bin/python
## example9_13
import numpy as np
from lamRange import *
from inversePower3 import *
N = 10
n = 100
d = np.ones(n)*2.0
c = np.ones(n-1)*(-1.0)
r = lamRange(d,c,N)
s = (r[N-1] + r[N])/2.0
lam,x = inversePower3(d,c,s) # Inverse power method
print("Eigenvalue No.",N," =",lam)
input("\nPress return to exit")
| [
"[email protected]"
]
| |
e492218b36b482d4a93daf0d1e6a28356f38bfff | 439386f9097632d44d31d1f599df76ec2820d072 | /常规项目/统一大厅常规checklist/1000/DFQP/src/cases/dfqp_broadcast_interface.py | db7b7a8e912baeac9d80fbdb92ce6c814f56b8d5 | []
| no_license | YiFeng0755/testcase | 33693f0940a6497aa40e2e51a0535c9eb6c12b29 | edc19480c3e94cbcbf004aa9d20099ec6d1b9304 | refs/heads/master | 2020-04-28T04:34:28.232022 | 2019-03-11T11:13:25 | 2019-03-11T11:13:25 | 146,287,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,250 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
地方棋牌广播测试
'''
import time
from runcenter.enums import EnumPriority,EnumStatus
from runcenter.testcase import debug_run_all,TestCase
from appiumcenter.luadriver import LuaDriver
from uilib.personinfo_page import Personinfo_Page
from uilib.hall_page import Hall_Page
from uilib.sign_page import Sign_Page
from uilib.game_page import Game_Page
from uilib.setting_page import Setting_Page
from uilib.broadcast_page import Broadcast_Page
from common.common import Common
from common import Interface as PHPInterface
import test_datas
from datacenter import dataprovider
testdata=test_datas.logindata4
@dataprovider.DataDrive(testdata)
class C222_DFQP_Broadcast(TestCase):
'''
游客账号点击广播输入文字发送,查看
'''
owner = "YoungLiu"
status = EnumStatus.Design
priority = EnumPriority.High
timeout = 5
def pre_test(self):
self.common = Common()
# 初始化Luadriver
self.luadriver = self.common.setupdriver()
self.start_step('删除登录文件')
self.common.deletefile(self.luadriver)
self.common.closedriver()
self.luadriver = self.common.setupdriver()
self.hall_page = Hall_Page()
self.sign_page = Sign_Page()
self.personinfo_page = Personinfo_Page()
self.broadcast_page = Broadcast_Page()
self.common.closeactivitytest(self.luadriver)
def run_test(self):
self.start_step("等待页面加载完成")
self.hall_page.wait_element("同步标志")
self.start_step('进入广播界面')
self.broadcast_page.wait_element('广播').click()
self.personinfo_page.screenshot('.png')
self.start_step('点击立即绑定')
self.broadcast_page.wait_element('发送').click()
try:
self.broadcast_page.wait_element('确定')
print '点击立即绑定可以成功绑定手机'
except:
print '点击立即绑定没有出现绑定手机操作'
def post_test(self):
'''
测试用例执行完成后,清理测试环境
'''
self.common.closedriver()
@dataprovider.DataDrive(testdata)
class C223_DFQP_Broadcast(TestCase):
'''
注册玩家等级不足15级,点击广播输入文字点击发送,查看
'''
owner = "YoungLiu"
status = EnumStatus.Design
priority = EnumPriority.High
timeout = 10
def pre_test(self):
UserID = self.casedata.get('mid')
print 'UserID:%s' % UserID
PHPInterface.set_level(UserID, 1) #将玩家等级设为1级
dict = PHPInterface.get_user_info(UserID) # 获取玩家信息
coin = eval(dict).get('result', {'coin': None}).get('coin') # 获取当前银币值
AddMoney = 30000 - coin
PHPInterface.add_money(UserID, AddMoney) # 将银币值设为30000
self.common = Common()
# 初始化Luadriver
self.luadriver = self.common.setupdriver()
self.hall_page = Hall_Page()
self.sign_page = Sign_Page()
self.game_page = Game_Page()
self.setting_page = Setting_Page()
self.personinfo_page = Personinfo_Page()
self.broadcast_page = Broadcast_Page()
self.common.closeactivitytest(self.luadriver)
def run_test(self):
self.start_step("等待页面加载完成")
self.hall_page.wait_element("同步标志")
self.start_step('进入头像界面')
self.hall_page.wait_element('头像').click()
self.common.loginuser(self.casedata['user'], self.casedata['password'])
self.start_step('关闭弹框')
time.sleep(4)
i = 0
while(i<3):
try:
self.sign_page.wait_element('关闭1').click()
i += 1
except:
i = 3
self.start_step('进入广播界面')
self.broadcast_page.wait_element('广播').click()
self.broadcast_page.wait_element('输入文字').send_keys('11')
self.start_step('点击发送')
self.broadcast_page.wait_element('发送').click()
self.personinfo_page.screenshot('.png')
def post_test(self):
'''
测试用例执行完成后,清理测试环境
'''
self.common.closedriver()
@dataprovider.DataDrive(testdata)
class C224_DFQP_Broadcast(TestCase):
'''
注册15级玩家银币不足23000,点击广播输入文字点击发送,查看
'''
owner = "YoungLiu"
status = EnumStatus.Design
priority = EnumPriority.High
timeout = 10
def pre_test(self):
UserID = self.casedata.get('mid')
print 'UserID:%s' % UserID
PHPInterface.set_level(UserID,15)
dict = PHPInterface.get_user_info(UserID) #获取玩家信息
coin = eval(dict).get('result',{'coin':None}).get('coin') #获取当前银币值
AddMoney = 10000 - coin
PHPInterface.add_money(UserID,AddMoney) #将银币值设为10000
self.common = Common()
# 初始化Luadriver
self.luadriver = self.common.setupdriver()
self.hall_page = Hall_Page()
self.sign_page = Sign_Page()
self.game_page = Game_Page()
self.setting_page = Setting_Page()
self.personinfo_page = Personinfo_Page()
self.broadcast_page = Broadcast_Page()
self.common.closeactivitytest(self.luadriver)
def run_test(self):
self.start_step("等待页面加载完成")
self.hall_page.wait_element("同步标志")
self.start_step('进入头像界面')
self.hall_page.wait_element('头像').click()
self.common.loginuser(self.casedata['user'], self.casedata['password'])
self.start_step('关闭弹框')#登录过后也会出现弹框
time.sleep(4)
i = 0
while (i < 3):
try:
self.sign_page.wait_element('关闭1').click()
i += 1
except:
i = 3
self.start_step('进入广播界面')
self.broadcast_page.wait_element('广播').click()
self.broadcast_page.wait_element('输入文字').send_keys('11')
self.start_step('点击发送')
self.broadcast_page.wait_element('发送').click()
self.personinfo_page.screenshot('.png')
def post_test(self):
'''
测试用例执行完成后,清理测试环境
'''
self.common.closedriver()
@dataprovider.DataDrive(testdata)
class C225_DFQP_Broadcast(TestCase):
'''
注册15级玩家银币足够,点击广播输入文字点击发送,查看
'''
owner = "YoungLiu"
status = EnumStatus.Design
priority = EnumPriority.High
timeout = 5
def pre_test(self):
UserID = self.casedata.get('mid')
print 'UserID:%s' % UserID
PHPInterface.set_level(UserID,16)
dict = PHPInterface.get_user_info(UserID) #获取玩家信息
coin = eval(dict).get('result',{'coin':None}).get('coin') #获取当前银币值
AddMoney = 40000 - coin
PHPInterface.add_money(UserID,AddMoney) #将银币值设为40000
self.common = Common()
# 初始化Luadriver
self.luadriver = self.common.setupdriver()
self.hall_page = Hall_Page()
self.sign_page = Sign_Page()
self.game_page = Game_Page()
self.setting_page = Setting_Page()
self.personinfo_page = Personinfo_Page()
self.broadcast_page = Broadcast_Page()
self.common.closeactivitytest(self.luadriver)
def run_test(self):
self.start_step("等待页面加载完成")
self.hall_page.wait_element("同步标志")
self.start_step('进入头像界面')
self.hall_page.wait_element('头像').click()
self.common.loginuser(self.casedata['user'], self.casedata['password'])
self.start_step('关闭弹框')
time.sleep(4)
i = 0
while (i < 3):
try:
self.sign_page.wait_element('关闭1').click()
i += 1
except:
i = 3
self.start_step('进入广播界面')
self.broadcast_page.wait_element('广播').click()
self.broadcast_page.wait_element('输入文字').send_keys('11')
self.start_step('点击发送')
self.broadcast_page.wait_element('发送').click()
time.sleep(1)
self.personinfo_page.screenshot('.png')
def post_test(self):
'''
测试用例执行完成后,清理测试环境
'''
self.common.closedriver()
@dataprovider.DataDrive(testdata)
class C226_DFQP_Broadcast(TestCase):
'''
无广播消息时,广播界面消息列表显示空白
'''
owner = "YoungLiu"
status = EnumStatus.Design
priority = EnumPriority.High
timeout = 10
def pre_test(self):
self.common = Common()
# 初始化Luadriver
self.luadriver = self.common.setupdriver()
self.hall_page = Hall_Page()
self.sign_page = Sign_Page()
self.game_page = Game_Page()
self.setting_page = Setting_Page()
self.personinfo_page = Personinfo_Page()
self.broadcast_page = Broadcast_Page()
self.common.closeactivitytest(self.luadriver)
def run_test(self):
self.start_step("等待页面加载完成")
self.hall_page.wait_element("同步标志")
self.start_step('进入头像界面')
self.hall_page.wait_element('头像').click()
self.common.loginuser(self.casedata['user'], self.casedata['password'])
self.start_step('关闭弹框')
time.sleep(4)
i = 0
while (i < 3):
try:
self.sign_page.wait_element('关闭1').click()
i += 1
except:
i = 3
self.personinfo_page.screenshot('.png')
def post_test(self):
'''
测试用例执行完成后,清理测试环境
'''
self.common.closedriver()
@dataprovider.DataDrive(testdata)
class C227_DFQP_Broadcast(TestCase):
'''
发送两条广播,查看广播消息界面显示
'''
owner = "YoungLiu"
status = EnumStatus.Design
priority = EnumPriority.High
timeout = 5
def pre_test(self):
global UserID
UserID = self.casedata.get('mid')
print 'UserID:%s' % UserID
dict = PHPInterface.get_user_info(UserID) #获取玩家信息
coin = eval(dict).get('result',{'coin':None}).get('coin') #获取当前银币值
AddMoney = 300000 - coin
PHPInterface.add_money(UserID,AddMoney) #将银币值设为300000
self.common = Common()
# 初始化Luadriver
self.luadriver = self.common.setupdriver()
self.hall_page = Hall_Page()
self.sign_page = Sign_Page()
self.game_page = Game_Page()
self.setting_page = Setting_Page()
self.personinfo_page = Personinfo_Page()
self.broadcast_page = Broadcast_Page()
self.common.closeactivitytest(self.luadriver)
def run_test(self):
self.start_step("等待页面加载完成")
self.hall_page.wait_element("同步标志")
self.start_step('进入头像界面')
self.hall_page.wait_element('头像').click()
self.common.loginuser(self.casedata['user'], self.casedata['password'])
self.start_step('关闭弹框')
time.sleep(4)
i = 0
while (i < 3):
try:
self.sign_page.wait_element('关闭1').click()
i += 1
except:
i = 3
PHPInterface.broadcast(UserID, content='地方棋牌测试专用1')
time.sleep(1)
PHPInterface.broadcast(2188068, content='地方棋牌测试专用2')
time.sleep(1)
self.personinfo_page.screenshot('.png')
def post_test(self):
'''
测试用例执行完成后,清理测试环境
'''
self.common.closedriver()
@dataprovider.DataDrive(testdata)
class C228_DFQP_Broadcast(TestCase):
'''
接收系统消息,查看广播消息界面显示
'''
owner = "YoungLiu"
status = EnumStatus.Design
priority = EnumPriority.High
timeout = 5
def pre_test(self):
self.common = Common()
# 初始化Luadriver
self.luadriver = self.common.setupdriver()
self.hall_page = Hall_Page()
self.sign_page = Sign_Page()
self.game_page = Game_Page()
self.setting_page = Setting_Page()
self.personinfo_page = Personinfo_Page()
self.broadcast_page = Broadcast_Page()
self.common.closeactivitytest(self.luadriver)
def run_test(self):
self.start_step("等待页面加载完成")
self.hall_page.wait_element("同步标志")
self.start_step('进入头像界面')
self.hall_page.wait_element('头像').click()
self.common.loginuser(self.casedata['user'], self.casedata['password'])
self.start_step('关闭弹框')
time.sleep(4)
i = 0
while (i < 3):
try:
self.sign_page.wait_element('关闭1').click()
i += 1
except:
i = 3
self.start_step('进入广播界面')
self.broadcast_page.wait_element('广播').click()
self.personinfo_page.screenshot('.png')
def post_test(self):
'''
测试用例执行完成后,清理测试环境
'''
self.common.closedriver()
@dataprovider.DataDrive(testdata)
class C229_DFQP_Broadcast(TestCase):
'''
在大厅界面接收玩家广播消息
'''
owner = "YoungLiu"
status = EnumStatus.Design
priority = EnumPriority.High
timeout = 5
def pre_test(self):
self.common = Common()
# 初始化Luadriver
self.luadriver = self.common.setupdriver()
self.hall_page = Hall_Page()
self.sign_page = Sign_Page()
self.game_page = Game_Page()
self.setting_page = Setting_Page()
self.personinfo_page = Personinfo_Page()
self.broadcast_page = Broadcast_Page()
self.common.closeactivitytest(self.luadriver)
def run_test(self):
self.start_step("等待页面加载完成")
self.hall_page.wait_element("同步标志")
self.start_step('进入头像界面')
self.hall_page.wait_element('头像').click()
self.common.loginuser(self.casedata['user'], self.casedata['password'])
self.start_step('关闭弹框')
time.sleep(4)
i = 0
while (i < 3):
try:
self.sign_page.wait_element('关闭1').click()
i += 1
except:
i = 3
UserID = self.casedata.get('mid')
print 'UserID:%s' % UserID
PHPInterface.broadcast(UserID,content='地方棋牌测试专用')
self.personinfo_page.screenshot('.png')
def post_test(self):
'''
测试用例执行完成后,清理测试环境
'''
self.common.closedriver()
if __name__ == '__main__':
C222_DFQP_Exchange = C227_DFQP_Broadcast()
C222_DFQP_Exchange.debug_run() | [
"[email protected]"
]
| |
a8b593cd2c4e3a67ef96a8b426ea1bfd92635deb | 7b8ea847579f409227cacf4cd073c4073b04d355 | /opengl/oglc-env/Scripts/painter.py | d756d4b2b2682ea8749fa9f3f4a04af16ed3c350 | []
| no_license | itaditya/Python | dcea29225826aceb3645194adede1ef5b947ecc4 | 5365dddf67d7456800fa58f8ae74dbff22b8fed2 | refs/heads/master | 2021-09-23T01:58:11.171768 | 2017-10-13T13:25:03 | 2017-10-13T13:25:03 | 50,483,903 | 5 | 2 | null | 2018-09-19T19:32:59 | 2016-01-27T05:27:42 | Python | UTF-8 | Python | false | false | 2,140 | py | #!f:\git\python\opengl\oglc-env\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates pasting into an already displayed
# photoimage. note that the current version of Tk updates the whole
# image every time we paste, so to get decent performance, we split
# the image into a set of tiles.
#
try:
from tkinter import Tk, Canvas, NW
except ImportError:
from Tkinter import Tk, Canvas, NW
from PIL import Image, ImageTk
import sys
#
# painter widget
class PaintCanvas(Canvas):
def __init__(self, master, image):
Canvas.__init__(self, master, width=image.size[0], height=image.size[1])
# fill the canvas
self.tile = {}
self.tilesize = tilesize = 32
xsize, ysize = image.size
for x in range(0, xsize, tilesize):
for y in range(0, ysize, tilesize):
box = x, y, min(xsize, x+tilesize), min(ysize, y+tilesize)
tile = ImageTk.PhotoImage(image.crop(box))
self.create_image(x, y, image=tile, anchor=NW)
self.tile[(x, y)] = box, tile
self.image = image
self.bind("<B1-Motion>", self.paint)
def paint(self, event):
xy = event.x - 10, event.y - 10, event.x + 10, event.y + 10
im = self.image.crop(xy)
# process the image in some fashion
im = im.convert("L")
self.image.paste(im, xy)
self.repair(xy)
def repair(self, box):
# update canvas
dx = box[0] % self.tilesize
dy = box[1] % self.tilesize
for x in range(box[0]-dx, box[2]+1, self.tilesize):
for y in range(box[1]-dy, box[3]+1, self.tilesize):
try:
xy, tile = self.tile[(x, y)]
tile.paste(self.image.crop(xy))
except KeyError:
pass # outside the image
self.update_idletasks()
#
# main
if len(sys.argv) != 2:
print("Usage: painter file")
sys.exit(1)
root = Tk()
im = Image.open(sys.argv[1])
if im.mode != "RGB":
im = im.convert("RGB")
PaintCanvas(root, im).pack()
root.mainloop()
| [
"[email protected]"
]
| |
0b3dba9d9ee6f60e6df93d5f6116dd2d5dc156ca | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_154/ch37_2019_04_04_19_28_17_473765.py | 1a7f008bfb57c2c72d4a363abdbcf74e91a11b25 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | def eh_primo(n):
if n < 2:
return False
if n == 2:
return True
for test in range(3, n, 2):
if n % test == 0:
return False
return True
def imprime_primos(n):
contador = 0
x = 2
while contador != n:
if eh_primo(x):
print(x)
contador = contador + 1
x = x + 1
return | [
"[email protected]"
]
| |
c20e5205d3fe2387f6f9ce003eea43dc26e4d3d7 | dd1e2ed53fec3dca0fa60042c04ad8cf6019ed89 | /python/scope/scope_testing/scope_testing.py | b33903d20d27126dfc72f0ab05cdb4d003793fcb | []
| no_license | cloudavail/snippets | 9be4ee285789ff3cff1a3a71e1f505a1b1697500 | 340f5c2735d6ec88b793f1eea91f2b026c24586e | refs/heads/main | 2023-08-03T10:30:13.976947 | 2023-05-15T04:46:32 | 2023-05-15T04:46:32 | 12,838,293 | 22 | 24 | null | 2023-09-07T03:33:17 | 2013-09-15T00:40:49 | JavaScript | UTF-8 | Python | false | false | 527 | py | #!/usr/bin/env python
# http://simeonfranklin.com/blog/2012/jul/1/python-decorators-in-12-steps/
def test_function():
print 'test_function called'
variable_in_local_scope = 'red'
print variable_in_local_scope
# the test_function() has access to variables in the global scope
print variable_in_global_scope
print locals()
variable_in_global_scope = 'orange'
# print globals returns only variable in global scope
print 'printing globals'
print globals()
print 'calling test_function'
test_function()
| [
"[email protected]"
]
| |
e0c121eaff7b28193f08ab0f7b1e5f48439594a0 | 13e7ed5e57d8706e234c8f6e8d880af977c61f77 | /MarksPredict/settings.py | a8ee0bcdf99c6c81d2e57b62f2d67aad589c8020 | []
| no_license | sitanshu-cse10/ML | 57545cc1bb3dad71f7b061b759fc164686c728fd | 21b58fee7700ec3fd9a4c52cac481be2ff1d9a27 | refs/heads/master | 2020-03-26T06:39:03.097462 | 2018-12-03T17:47:40 | 2018-12-03T17:47:40 | 144,614,770 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,175 | py | """
Django settings for MarksPredict project.
Generated by 'django-admin startproject' using Django 2.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'boym1(u7=^&lx^^n=v-j#$m*6=1u+0$5zv5y2*f40ax@!z3xwy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'predict',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MarksPredict.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'MarksPredict.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS=(os.path.join(BASE_DIR,'static'),)
| [
"[email protected]"
]
| |
d6389ebffd00aba72cd3fe58ca5a95e6459ecebd | 6d339503ad7a6602f5a0ca0a231f1f7d37b163b7 | /1010.py | d766217490184989bb6ba8a66796edd1abecf473 | []
| no_license | pcorrea2018/URI-Python-Answers | e1c64f5be747ce7acee01835ba6576b138592f9c | f7554891ffaefa3fc997e18bb1015dbd39aa74d5 | refs/heads/master | 2021-05-08T16:13:12.367528 | 2018-02-08T00:24:13 | 2018-02-08T00:24:13 | 120,146,787 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | c1, n1, v1 = input().split()
c1 = int(c1)
n1 = int(n1)
v1 = float(v1)
c2, n2, v2 = input().split()
c2 = int(c2)
n2 = int(n2)
v2 = float(v2)
valor = n1*v1 + n2*v2
print ("VALOR A PAGAR: R$ %0.2F" %valor)
| [
"[email protected]"
]
| |
b49a2ac7599f06a223c5f41eb449d65e472d212e | 7c43576414444e83afd7c98e6d7030c3a70f42de | /utils.py | 1f71c85602edc0da441f459177e6437aa5769636 | []
| no_license | M-Ashmitha/object_detection_ros | bea55fd5bb0559173cf77c8b9781189a114df7d1 | 7342e60b85f94ac64098450b24a9e736c40914e8 | refs/heads/master | 2022-12-04T01:49:25.016281 | 2020-08-25T01:47:27 | 2020-08-25T01:47:27 | 289,602,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,835 | py | #!/usr/bin/env python
"""
Helper functions and classes will be placed here.
"""
import os
import tarfile
import six.moves.urllib as urllib
import numpy as np
from cob_perception_msgs.msg import Detection, DetectionArray, Rect
def download_model(\
download_base='http://download.tensorflow.org/models/object_detection/', \
model_name='ssd_mobilenet_v1_coco_11_06_2017'\
):
"""
Downloads the detection model from tensorflow servers
Args:
download_base: base url where the object detection model is downloaded from
model_name: name of the object detection model
Returns:
"""
# add tar gz to the end of file name
model_file = model_name + '.tar.gz'
try:
opener = urllib.request.URLopener()
opener.retrieve(download_base + model_file, \
model_file)
tar_file = tarfile.open(model_file)
for f in tar_file.getmembers():
file_name = os.path.basename(f.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(f, os.getcwd())
except Exception as e:
raise
def create_detection_msg(im, output_dict, category_index, bridge):
"""
Creates the detection array message
Args:
im: (std_msgs_Image) incomming message
output_dict (dictionary) output of object detection model
category_index: dictionary of labels (like a lookup table)
bridge (cv_bridge) : cv bridge object for converting
Returns:
msg (cob_perception_msgs/DetectionArray) The message to be sent
"""
boxes = output_dict["detection_boxes"]
scores = output_dict["detection_scores"]
classes = output_dict["detection_classes"]
masks = None
if 'detection_masks' in output_dict:
masks = output_dict["detection_masks"]
msg = DetectionArray()
msg.header = im.header
scores_above_threshold = np.where(scores > 0.5)[0]
for s in scores_above_threshold:
# Get the properties
bb = boxes[s,:]
sc = scores[s]
cl = classes[s]
# Create the detection message
detection = Detection()
detection.header = im.header
detection.label = category_index[int(cl)]['name']
detection.id = cl
detection.score = sc
detection.detector = 'Tensorflow object detector'
detection.mask.roi.x = int((im.width-1) * bb[1])
detection.mask.roi.y = int((im.height-1) * bb[0])
detection.mask.roi.width = int((im.width-1) * (bb[3]-bb[1]))
detection.mask.roi.height = int((im.height-1) * (bb[2]-bb[0]))
if 'detection_masks' in output_dict:
detection.mask.mask = \
bridge.cv2_to_imgmsg(masks[s], "mono8")
print (detection.mask.mask.width)
msg.detections.append(detection)
return msg
| [
"[email protected]"
]
| |
914c0aef556843e38fecd5dd326b2ed54cfbe735 | 8fa162cddb2046cb47f3a06c72743ed67685d03a | /dvc/command/add.py | ba6c81801556bf43258b3bc210869f6db662c0ef | [
"Apache-2.0"
]
| permissive | franekp/dvc | be9c123f03b77daa39781bd7e62fa25b9fae449f | e380a4a8586da643bf4e0d2281b13aee0d5e5207 | refs/heads/master | 2020-03-19T18:35:47.416381 | 2018-06-10T14:35:49 | 2018-06-10T14:35:49 | 136,816,230 | 0 | 0 | Apache-2.0 | 2018-06-10T14:32:53 | 2018-06-10T14:32:52 | null | UTF-8 | Python | false | false | 374 | py | from dvc.exceptions import DvcException
from dvc.command.common.base import CmdBase
class CmdAdd(CmdBase):
def run(self):
for target in self.args.targets:
try:
self.project.add(target)
except DvcException as ex:
self.project.logger.error('Failed to add {}', ex)
return 1
return 0
| [
"[email protected]"
]
| |
cb98d77006dd907b02aff546f824e4bbad97363d | 98bedb94c7e17653046a42379821709aec4c80f4 | /MCQApp/migrations/0001_initial.py | 25a4b866994edf066539202cdd37b44c02cc890a | []
| no_license | AndreiKud/StudentPlatform | e5d9f1998f9d32cf92b4300c52fcb095ce99ff6f | e24ce7231a37c12cffcf0d134c0a60f8e66a0b12 | refs/heads/master | 2023-07-29T06:39:59.882444 | 2021-09-08T10:24:44 | 2021-09-08T10:24:44 | 264,679,310 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,939 | py | # Generated by Django 2.2.12 on 2020-05-26 00:33
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('QuizApp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='MCQQuestion',
fields=[
('question_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='QuizApp.Question')),
('answer_order', models.CharField(blank=True, choices=[('content', 'Content'), ('none', 'None')], help_text='The order in which multichoice answer options are displayed to the user', max_length=30, null=True, verbose_name='Answer Order')),
],
options={
'verbose_name': 'Multiple Choice Question',
'verbose_name_plural': 'Multiple Choice Questions',
},
bases=('QuizApp.question',),
),
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(help_text='Enter the answer text that you want displayed', max_length=1000, verbose_name='Content')),
('correct', models.BooleanField(default=False, help_text='Is this a correct answer?', verbose_name='Correct')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='MCQApp.MCQQuestion', verbose_name='Question')),
],
options={
'verbose_name': 'Answer',
'verbose_name_plural': 'Answers',
},
),
]
| [
"[email protected]"
]
| |
30d4234bc4160c7c323924a2a58fef4490aba037 | f3b233e5053e28fa95c549017bd75a30456eb50c | /mcl1_input/L67/67-27_MD_NVT_rerun/set_1ns_equi_1_m.py | 903799fb96fc0319e908362b60c2504832808f05 | []
| no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | import os
dir = '/mnt/scratch/songlin3/run/mcl1/L67/MD_NVT_rerun/ti_one-step/67_27/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi_1_m.in'
temp_pbs = filesdir + 'temp_1ns_equi_1_m.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi_1_m.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#PBS
pbs = workdir + "%6.5f_1ns_equi_1_m.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../67-27_merged.prmtop .")
os.system("cp ../0.5_equi_0_3.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
]
| |
294c691170eaea3e20fc1c9b2b0ae1a41578fbf1 | 72ce57d187fb6a4730f1390e280b939ef8087f5d | /nuitka/codegen/OperationCodes.py | 2541fe799fc5a097d966b790405863df757cf43b | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
]
| permissive | tommyli3318/Nuitka | c5b7681b73d96cb8859210ed1a78f09149a23825 | ae52b56024d53159a72a5acbfaac792ca207c418 | refs/heads/develop | 2020-05-02T17:02:10.578065 | 2019-10-27T15:53:32 | 2019-10-27T15:53:32 | 178,086,582 | 1 | 0 | Apache-2.0 | 2019-06-06T00:32:48 | 2019-03-27T22:53:31 | Python | UTF-8 | Python | false | false | 26,815 | py | # Copyright 2019, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Codes for operations.
There are unary and binary operations. Many of them have specializations and
of course types could play into it. Then there is also the added difficulty of
in-place assignments, which have other operation variants.
"""
from nuitka.containers.oset import OrderedSet
from . import OperatorCodes
from .CodeHelpers import (
generateChildExpressionsCode,
pickCodeHelper,
withObjectCodeTemporaryAssignment,
)
from .ErrorCodes import getErrorExitBoolCode, getErrorExitCode
def generateOperationBinaryCode(to_name, expression, emit, context):
left_arg_name, right_arg_name = generateChildExpressionsCode(
expression=expression, emit=emit, context=context
)
# TODO: Decide and use one single spelling, inplace or in_place
inplace = expression.isInplaceSuspect()
assert not inplace or not expression.getLeft().isCompileTimeConstant(), expression
_getBinaryOperationCode(
to_name=to_name,
expression=expression,
operator=expression.getOperator(),
arg_names=(left_arg_name, right_arg_name),
in_place=inplace,
emit=emit,
context=context,
)
def generateOperationNotCode(to_name, expression, emit, context):
arg_name, = generateChildExpressionsCode(
expression=expression, emit=emit, context=context
)
res_name = context.getIntResName()
emit("%s = CHECK_IF_TRUE( %s );" % (res_name, arg_name))
getErrorExitBoolCode(
condition="%s == -1" % res_name,
release_name=arg_name,
needs_check=expression.getOperand().mayRaiseExceptionBool(BaseException),
emit=emit,
context=context,
)
to_name.getCType().emitAssignmentCodeFromBoolCondition(
to_name=to_name, condition="%s == 0" % res_name, emit=emit
)
def generateOperationUnaryCode(to_name, expression, emit, context):
arg_name, = generateChildExpressionsCode(
expression=expression, emit=emit, context=context
)
_getUnaryOperationCode(
to_name=to_name,
expression=expression,
operator=expression.getOperator(),
arg_name=arg_name,
needs_check=expression.mayRaiseException(BaseException),
emit=emit,
context=context,
)
# Note: These are ordered, so we can define the order they are created in
# the code generation of specialized helpers, as this set is used for input
# there too.
specialized_add_helpers_set = OrderedSet(
(
"BINARY_OPERATION_ADD_OBJECT_INT",
"BINARY_OPERATION_ADD_INT_OBJECT",
"BINARY_OPERATION_ADD_INT_INT",
"BINARY_OPERATION_ADD_OBJECT_STR",
"BINARY_OPERATION_ADD_STR_OBJECT",
"BINARY_OPERATION_ADD_STR_STR",
"BINARY_OPERATION_ADD_OBJECT_UNICODE",
"BINARY_OPERATION_ADD_UNICODE_OBJECT",
"BINARY_OPERATION_ADD_UNICODE_UNICODE",
"BINARY_OPERATION_ADD_OBJECT_FLOAT",
"BINARY_OPERATION_ADD_FLOAT_OBJECT",
"BINARY_OPERATION_ADD_FLOAT_FLOAT",
"BINARY_OPERATION_ADD_OBJECT_TUPLE",
"BINARY_OPERATION_ADD_TUPLE_OBJECT",
"BINARY_OPERATION_ADD_TUPLE_TUPLE",
"BINARY_OPERATION_ADD_OBJECT_LIST",
"BINARY_OPERATION_ADD_LIST_OBJECT",
"BINARY_OPERATION_ADD_LIST_LIST",
"BINARY_OPERATION_ADD_OBJECT_BYTES",
"BINARY_OPERATION_ADD_BYTES_OBJECT",
"BINARY_OPERATION_ADD_BYTES_BYTES",
"BINARY_OPERATION_ADD_OBJECT_LONG",
"BINARY_OPERATION_ADD_LONG_OBJECT",
"BINARY_OPERATION_ADD_LONG_LONG",
# These are friends naturally, they all add with another
"BINARY_OPERATION_ADD_FLOAT_LONG",
"BINARY_OPERATION_ADD_LONG_FLOAT",
"BINARY_OPERATION_ADD_FLOAT_INT",
"BINARY_OPERATION_ADD_INT_FLOAT",
"BINARY_OPERATION_ADD_LONG_INT",
"BINARY_OPERATION_ADD_INT_LONG",
# These are friends too.
"BINARY_OPERATION_ADD_UNICODE_STR",
"BINARY_OPERATION_ADD_STR_UNICODE",
# Default implementation.
"BINARY_OPERATION_ADD_OBJECT_OBJECT",
)
)
nonspecialized_add_helpers_set = set()
specialized_sub_helpers_set = OrderedSet(
(
"BINARY_OPERATION_SUB_OBJECT_INT",
"BINARY_OPERATION_SUB_INT_OBJECT",
"BINARY_OPERATION_SUB_INT_INT",
"BINARY_OPERATION_SUB_OBJECT_FLOAT",
"BINARY_OPERATION_SUB_FLOAT_OBJECT",
"BINARY_OPERATION_SUB_FLOAT_FLOAT",
"BINARY_OPERATION_SUB_OBJECT_LONG",
"BINARY_OPERATION_SUB_LONG_OBJECT",
"BINARY_OPERATION_SUB_LONG_LONG",
# These are friends naturally, they all sub with another
"BINARY_OPERATION_SUB_FLOAT_LONG",
"BINARY_OPERATION_SUB_LONG_FLOAT",
"BINARY_OPERATION_SUB_FLOAT_INT",
"BINARY_OPERATION_SUB_INT_FLOAT",
"BINARY_OPERATION_SUB_LONG_INT",
"BINARY_OPERATION_SUB_INT_LONG",
# Default implementation.
"BINARY_OPERATION_SUB_OBJECT_OBJECT",
)
)
# These made no sense to specialize for, nothing to gain.
nonspecialized_sub_helpers_set = set(
("BINARY_OPERATION_SUB_OBJECT_LIST", "BINARY_OPERATION_SUB_OBJECT_TUPLE")
)
specialized_mul_helpers_set = OrderedSet(
(
"BINARY_OPERATION_MUL_OBJECT_INT",
"BINARY_OPERATION_MUL_INT_OBJECT",
"BINARY_OPERATION_MUL_INT_INT",
"BINARY_OPERATION_MUL_OBJECT_LONG",
"BINARY_OPERATION_MUL_CLONG_CLONG",
"BINARY_OPERATION_MUL_INT_CLONG",
"BINARY_OPERATION_MUL_CLONG_INT",
"BINARY_OPERATION_MUL_LONG_OBJECT",
"BINARY_OPERATION_MUL_LONG_LONG",
"BINARY_OPERATION_MUL_OBJECT_STR",
"BINARY_OPERATION_MUL_STR_OBJECT",
"BINARY_OPERATION_MUL_INT_STR",
"BINARY_OPERATION_MUL_STR_INT",
"BINARY_OPERATION_MUL_LONG_STR",
"BINARY_OPERATION_MUL_STR_LONG",
# Should not occur.
# "BINARY_OPERATION_MUL_STR_STR",
"BINARY_OPERATION_MUL_OBJECT_UNICODE",
"BINARY_OPERATION_MUL_UNICODE_OBJECT",
"BINARY_OPERATION_MUL_INT_UNICODE",
"BINARY_OPERATION_MUL_UNICODE_INT",
"BINARY_OPERATION_MUL_LONG_UNICODE",
"BINARY_OPERATION_MUL_UNICODE_LONG",
# Should not occur.
# "BINARY_OPERATION_MUL_UNICODE_UNICODE",
"BINARY_OPERATION_MUL_OBJECT_FLOAT",
"BINARY_OPERATION_MUL_FLOAT_OBJECT",
"BINARY_OPERATION_MUL_FLOAT_FLOAT",
"BINARY_OPERATION_MUL_OBJECT_TUPLE",
"BINARY_OPERATION_MUL_TUPLE_OBJECT",
"BINARY_OPERATION_MUL_INT_TUPLE",
"BINARY_OPERATION_MUL_TUPLE_INT",
"BINARY_OPERATION_MUL_LONG_TUPLE",
"BINARY_OPERATION_MUL_TUPLE_LONG",
# Should not occur.
# "BINARY_OPERATION_MUL_TUPLE_TUPLE",
"BINARY_OPERATION_MUL_OBJECT_LIST",
"BINARY_OPERATION_MUL_LIST_OBJECT",
"BINARY_OPERATION_MUL_INT_LIST",
"BINARY_OPERATION_MUL_LIST_INT",
"BINARY_OPERATION_MUL_LONG_LIST",
"BINARY_OPERATION_MUL_LIST_LONG",
# Should not occur.
# "BINARY_OPERATION_MUL_LIST_LIST",
"BINARY_OPERATION_MUL_OBJECT_BYTES",
"BINARY_OPERATION_MUL_BYTES_OBJECT",
"BINARY_OPERATION_MUL_LONG_BYTES",
"BINARY_OPERATION_MUL_BYTES_LONG",
# Should not occur.
# "BINARY_OPERATION_MUL_BYTES_BYTES",
# These are friends naturally, they all mul with another
"BINARY_OPERATION_MUL_FLOAT_LONG",
"BINARY_OPERATION_MUL_LONG_FLOAT",
"BINARY_OPERATION_MUL_FLOAT_INT",
"BINARY_OPERATION_MUL_INT_FLOAT",
"BINARY_OPERATION_MUL_LONG_INT",
"BINARY_OPERATION_MUL_INT_LONG",
# Default implementation.
"BINARY_OPERATION_MUL_OBJECT_OBJECT",
)
)
nonspecialized_mul_helpers_set = set()
specialized_truediv_helpers_set = OrderedSet(
(
"BINARY_OPERATION_TRUEDIV_OBJECT_INT",
"BINARY_OPERATION_TRUEDIV_INT_OBJECT",
"BINARY_OPERATION_TRUEDIV_INT_INT",
"BINARY_OPERATION_TRUEDIV_OBJECT_LONG",
"BINARY_OPERATION_TRUEDIV_LONG_OBJECT",
"BINARY_OPERATION_TRUEDIV_LONG_LONG",
"BINARY_OPERATION_TRUEDIV_OBJECT_FLOAT",
"BINARY_OPERATION_TRUEDIV_FLOAT_OBJECT",
"BINARY_OPERATION_TRUEDIV_FLOAT_FLOAT",
# These are friends naturally, they div mul with another
"BINARY_OPERATION_TRUEDIV_FLOAT_LONG",
"BINARY_OPERATION_TRUEDIV_LONG_FLOAT",
"BINARY_OPERATION_TRUEDIV_FLOAT_INT",
"BINARY_OPERATION_TRUEDIV_INT_FLOAT",
"BINARY_OPERATION_TRUEDIV_LONG_INT",
"BINARY_OPERATION_TRUEDIV_INT_LONG",
# Default implementation.
"BINARY_OPERATION_TRUEDIV_OBJECT_OBJECT",
)
)
nonspecialized_truediv_helpers_set = set(
(
# e.g. pathlib defines objects that do this.
"BINARY_OPERATION_TRUEDIV_OBJECT_UNICODE",
"BINARY_OPERATION_TRUEDIV_UNICODE_OBJECT",
)
)
specialized_olddiv_helpers_set = OrderedSet(
helper.replace("TRUEDIV", "OLDDIV") for helper in specialized_truediv_helpers_set
)
nonspecialized_olddiv_helpers_set = set()
specialized_floordiv_helpers_set = OrderedSet(
helper.replace("TRUEDIV", "FLOORDIV") for helper in specialized_truediv_helpers_set
)
nonspecialized_floordiv_helpers_set = set()
_iadd_helpers_set = OrderedSet(
(
"BINARY_OPERATION_ADD_OBJECT_OBJECT_INPLACE",
"BINARY_OPERATION_ADD_OBJECT_LIST_INPLACE",
"BINARY_OPERATION_ADD_OBJECT_TUPLE_INPLACE",
"BINARY_OPERATION_ADD_OBJECT_UNICODE_INPLACE",
"BINARY_OPERATION_ADD_OBJECT_STR_INPLACE",
"BINARY_OPERATION_ADD_OBJECT_BYTES_INPLACE",
"BINARY_OPERATION_ADD_OBJECT_INT_INPLACE",
"BINARY_OPERATION_ADD_OBJECT_LONG_INPLACE",
"BINARY_OPERATION_ADD_OBJECT_FLOAT_INPLACE",
"BINARY_OPERATION_ADD_LIST_OBJECT_INPLACE",
"BINARY_OPERATION_ADD_TUPLE_OBJECT_INPLACE",
"BINARY_OPERATION_ADD_UNICODE_OBJECT_INPLACE",
"BINARY_OPERATION_ADD_STR_OBJECT_INPLACE",
"BINARY_OPERATION_ADD_BYTES_OBJECT_INPLACE",
"BINARY_OPERATION_ADD_INT_OBJECT_INPLACE",
"BINARY_OPERATION_ADD_LONG_OBJECT_INPLACE",
"BINARY_OPERATION_ADD_FLOAT_OBJECT_INPLACE",
"BINARY_OPERATION_ADD_LIST_LIST_INPLACE",
"BINARY_OPERATION_ADD_TUPLE_TUPLE_INPLACE",
"BINARY_OPERATION_ADD_STR_STR_INPLACE",
"BINARY_OPERATION_ADD_UNICODE_UNICODE_INPLACE",
"BINARY_OPERATION_ADD_BYTES_BYTES_INPLACE",
"BINARY_OPERATION_ADD_INT_INT_INPLACE",
"BINARY_OPERATION_ADD_LONG_LONG_INPLACE",
"BINARY_OPERATION_ADD_FLOAT_FLOAT_INPLACE",
)
)
specialized_mod_helpers_set = OrderedSet(
(
"BINARY_OPERATION_MOD_OBJECT_INT",
"BINARY_OPERATION_MOD_INT_OBJECT",
"BINARY_OPERATION_MOD_INT_INT",
"BINARY_OPERATION_MOD_OBJECT_LONG",
"BINARY_OPERATION_MOD_LONG_OBJECT",
"BINARY_OPERATION_MOD_LONG_LONG",
"BINARY_OPERATION_MOD_OBJECT_FLOAT",
"BINARY_OPERATION_MOD_FLOAT_OBJECT",
"BINARY_OPERATION_MOD_FLOAT_FLOAT",
# These are friends naturally, they mod with another
"BINARY_OPERATION_MOD_FLOAT_LONG",
"BINARY_OPERATION_MOD_LONG_FLOAT",
"BINARY_OPERATION_MOD_FLOAT_INT",
"BINARY_OPERATION_MOD_INT_FLOAT",
"BINARY_OPERATION_MOD_LONG_INT",
"BINARY_OPERATION_MOD_INT_LONG",
# String interpolation with STR:
"BINARY_OPERATION_MOD_STR_INT",
"BINARY_OPERATION_MOD_STR_LONG",
"BINARY_OPERATION_MOD_STR_FLOAT",
"BINARY_OPERATION_MOD_STR_STR",
"BINARY_OPERATION_MOD_STR_BYTES",
"BINARY_OPERATION_MOD_STR_UNICODE",
"BINARY_OPERATION_MOD_STR_TUPLE",
"BINARY_OPERATION_MOD_STR_LIST",
"BINARY_OPERATION_MOD_STR_DICT",
"BINARY_OPERATION_MOD_STR_OBJECT",
# String formatting with UNICODE:
"BINARY_OPERATION_MOD_UNICODE_INT",
"BINARY_OPERATION_MOD_UNICODE_LONG",
"BINARY_OPERATION_MOD_UNICODE_FLOAT",
"BINARY_OPERATION_MOD_UNICODE_STR",
"BINARY_OPERATION_MOD_UNICODE_BYTES",
"BINARY_OPERATION_MOD_UNICODE_UNICODE",
"BINARY_OPERATION_MOD_UNICODE_TUPLE",
"BINARY_OPERATION_MOD_UNICODE_LIST",
"BINARY_OPERATION_MOD_UNICODE_DICT",
"BINARY_OPERATION_MOD_UNICODE_OBJECT",
# String formatting with BYTES:
"BINARY_OPERATION_MOD_BYTES_LONG",
"BINARY_OPERATION_MOD_BYTES_FLOAT",
"BINARY_OPERATION_MOD_BYTES_BYTES",
"BINARY_OPERATION_MOD_BYTES_UNICODE",
"BINARY_OPERATION_MOD_BYTES_TUPLE",
"BINARY_OPERATION_MOD_BYTES_LIST",
"BINARY_OPERATION_MOD_BYTES_DICT",
"BINARY_OPERATION_MOD_BYTES_OBJECT",
# String formatting with OBJECT:
"BINARY_OPERATION_MOD_OBJECT_STR",
"BINARY_OPERATION_MOD_OBJECT_BYTES",
"BINARY_OPERATION_MOD_OBJECT_UNICODE",
"BINARY_OPERATION_MOD_OBJECT_TUPLE",
"BINARY_OPERATION_MOD_OBJECT_LIST",
"BINARY_OPERATION_MOD_OBJECT_DICT",
# Default implementation.
"BINARY_OPERATION_MOD_OBJECT_OBJECT",
)
)
nonspecialized_mod_helpers_set = set(
("BINARY_OPERATION_MOD_TUPLE_OBJECT", "BINARY_OPERATION_MOD_LIST_OBJECT")
)
specialized_bitor_helpers_set = OrderedSet(
(
"BINARY_OPERATION_BITOR_OBJECT_INT",
"BINARY_OPERATION_BITOR_INT_OBJECT",
"BINARY_OPERATION_BITOR_INT_INT",
"BINARY_OPERATION_BITOR_OBJECT_LONG",
"BINARY_OPERATION_BITOR_LONG_OBJECT",
"BINARY_OPERATION_BITOR_LONG_LONG",
"BINARY_OPERATION_BITOR_LONG_INT",
"BINARY_OPERATION_BITOR_INT_LONG",
# Set containers can do this
"BINARY_OPERATION_BITOR_OBJECT_SET",
"BINARY_OPERATION_BITOR_SET_OBJECT",
"BINARY_OPERATION_BITOR_SET_SET",
"BINARY_OPERATION_BITOR_OBJECT_LIST",
"BINARY_OPERATION_BITOR_LIST_OBJECT",
"BINARY_OPERATION_BITOR_OBJECT_LIST",
"BINARY_OPERATION_BITOR_LIST_OBJECT",
"BINARY_OPERATION_BITOR_OBJECT_TUPLE",
"BINARY_OPERATION_BITOR_TUPLE_OBJECT",
# Default implementation.
"BINARY_OPERATION_BITOR_OBJECT_OBJECT",
)
)
nonspecialized_bitor_helpers_set = set()
specialized_bitand_helpers_set = OrderedSet(
helper.replace("_BITOR_", "_BITAND_") for helper in specialized_bitor_helpers_set
)
nonspecialized_bitand_helpers_set = OrderedSet(
helper.replace("_BITOR_", "_BITAND_") for helper in nonspecialized_bitor_helpers_set
)
specialized_bitxor_helpers_set = OrderedSet(
helper.replace("_BITOR_", "_BITXOR_") for helper in specialized_bitor_helpers_set
)
nonspecialized_bitxor_helpers_set = OrderedSet(
helper.replace("_BITOR_", "_BITXOR_") for helper in nonspecialized_bitor_helpers_set
)
specialized_lshift_helpers_set = OrderedSet(
helper.replace("_BITOR_", "_LSHIFT_")
for helper in specialized_bitor_helpers_set
if "_SET" not in helper
if "_TUPLE" not in helper
)
nonspecialized_lshift_helpers_set = OrderedSet(
helper.replace("_BITOR_", "_LSHIFT_") for helper in nonspecialized_bitor_helpers_set
)
specialized_rshift_helpers_set = OrderedSet(
helper.replace("_LSHIFT_", "_RSHIFT_") for helper in specialized_lshift_helpers_set
)
nonspecialized_rshift_helpers_set = OrderedSet(
helper.replace("_LSHIFT_", "_RSHIFT_")
for helper in nonspecialized_lshift_helpers_set
)
specialized_pow_helpers_set = OrderedSet(
(
"BINARY_OPERATION_POW_INT_INT",
"BINARY_OPERATION_POW_OBJECT_INT",
"BINARY_OPERATION_POW_INT_OBJECT",
"BINARY_OPERATION_POW_OBJECT_LONG",
"BINARY_OPERATION_POW_LONG_OBJECT",
"BINARY_OPERATION_POW_LONG_LONG",
"BINARY_OPERATION_POW_LONG_INT",
"BINARY_OPERATION_POW_INT_LONG",
"BINARY_OPERATION_POW_OBJECT_FLOAT",
"BINARY_OPERATION_POW_FLOAT_OBJECT",
"BINARY_OPERATION_POW_FLOAT_FLOAT",
# Default implementation.
"BINARY_OPERATION_POW_OBJECT_OBJECT",
)
)
nonspecialized_pow_helpers_set = set()
specialized_matmult_helpers_set = OrderedSet(
(
# Default implementation.
"BINARY_OPERATION_MATMULT_LONG_OBJECT",
"BINARY_OPERATION_MATMULT_OBJECT_LONG",
"BINARY_OPERATION_MATMULT_OBJECT_OBJECT",
)
)
nonspecialized_matmult_helpers_set = set()
def _getBinaryOperationCode(
to_name, expression, operator, arg_names, in_place, emit, context
):
# This needs to have one case per operation of Python, and there are many
# of these, pylint: disable=too-many-branches,too-many-statements
left = expression.getLeft()
prefix_args = ()
ref_count = 1
needs_check = expression.mayRaiseExceptionOperation()
if operator == "IPow" and in_place:
helper = "POWER_OPERATION_INPLACE"
elif operator == "IPow":
helper = "POWER_OPERATION2"
elif operator == "Add":
helper = pickCodeHelper(
prefix="BINARY_OPERATION_ADD",
suffix="",
left_shape=left.getTypeShape(),
right_shape=expression.getRight().getTypeShape(),
helpers=specialized_add_helpers_set,
nonhelpers=nonspecialized_add_helpers_set,
source_ref=expression.source_ref,
)
elif operator == "Sub":
helper = pickCodeHelper(
prefix="BINARY_OPERATION_SUB",
suffix="",
left_shape=left.getTypeShape(),
right_shape=expression.getRight().getTypeShape(),
helpers=specialized_sub_helpers_set,
nonhelpers=nonspecialized_sub_helpers_set,
source_ref=expression.source_ref,
)
elif operator == "IAdd" and in_place:
helper = pickCodeHelper(
prefix="BINARY_OPERATION_ADD",
suffix="_INPLACE",
left_shape=left.getTypeShape(),
right_shape=expression.getRight().getTypeShape(),
helpers=_iadd_helpers_set,
# TODO: Add this once generated.
nonhelpers=(),
source_ref=False,
)
elif operator == "IMult" and in_place:
helper = "BINARY_OPERATION_MUL_INPLACE"
elif operator == "Div":
helper = pickCodeHelper(
prefix="BINARY_OPERATION_OLDDIV",
suffix="",
left_shape=left.getTypeShape(),
right_shape=expression.getRight().getTypeShape(),
helpers=specialized_olddiv_helpers_set,
nonhelpers=nonspecialized_olddiv_helpers_set,
source_ref=expression.source_ref,
)
elif operator == "FloorDiv":
helper = pickCodeHelper(
prefix="BINARY_OPERATION_FLOORDIV",
suffix="",
left_shape=left.getTypeShape(),
right_shape=expression.getRight().getTypeShape(),
helpers=specialized_floordiv_helpers_set,
nonhelpers=nonspecialized_floordiv_helpers_set,
source_ref=expression.source_ref,
)
elif operator == "TrueDiv":
helper = pickCodeHelper(
prefix="BINARY_OPERATION_TRUEDIV",
suffix="",
left_shape=left.getTypeShape(),
right_shape=expression.getRight().getTypeShape(),
helpers=specialized_truediv_helpers_set,
nonhelpers=nonspecialized_truediv_helpers_set,
source_ref=expression.source_ref,
)
elif operator == "Mult":
helper = pickCodeHelper(
prefix="BINARY_OPERATION_MUL",
suffix="",
left_shape=left.getTypeShape(),
right_shape=expression.getRight().getTypeShape(),
helpers=specialized_mul_helpers_set,
nonhelpers=nonspecialized_mul_helpers_set,
source_ref=expression.source_ref,
)
elif operator == "Mod":
helper = pickCodeHelper(
prefix="BINARY_OPERATION_MOD",
suffix="",
left_shape=left.getTypeShape(),
right_shape=expression.getRight().getTypeShape(),
helpers=specialized_mod_helpers_set,
nonhelpers=nonspecialized_mod_helpers_set,
source_ref=expression.source_ref,
)
elif operator == "LShift":
helper = pickCodeHelper(
prefix="BINARY_OPERATION_LSHIFT",
suffix="",
left_shape=left.getTypeShape(),
right_shape=expression.getRight().getTypeShape(),
helpers=specialized_lshift_helpers_set,
nonhelpers=nonspecialized_lshift_helpers_set,
source_ref=expression.source_ref,
)
elif operator == "RShift":
helper = pickCodeHelper(
prefix="BINARY_OPERATION_RSHIFT",
suffix="",
left_shape=left.getTypeShape(),
right_shape=expression.getRight().getTypeShape(),
helpers=specialized_rshift_helpers_set,
nonhelpers=nonspecialized_rshift_helpers_set,
source_ref=expression.source_ref,
)
elif operator == "BitOr":
helper = pickCodeHelper(
prefix="BINARY_OPERATION_BITOR",
suffix="",
left_shape=left.getTypeShape(),
right_shape=expression.getRight().getTypeShape(),
helpers=specialized_bitor_helpers_set,
nonhelpers=nonspecialized_bitor_helpers_set,
source_ref=expression.source_ref,
)
elif operator == "BitAnd":
helper = pickCodeHelper(
prefix="BINARY_OPERATION_BITAND",
suffix="",
left_shape=left.getTypeShape(),
right_shape=expression.getRight().getTypeShape(),
helpers=specialized_bitand_helpers_set,
nonhelpers=nonspecialized_bitand_helpers_set,
source_ref=expression.source_ref,
)
elif operator == "BitXor":
helper = pickCodeHelper(
prefix="BINARY_OPERATION_BITXOR",
suffix="",
left_shape=left.getTypeShape(),
right_shape=expression.getRight().getTypeShape(),
helpers=specialized_bitxor_helpers_set,
nonhelpers=nonspecialized_bitxor_helpers_set,
source_ref=expression.source_ref,
)
elif operator == "Pow":
helper = pickCodeHelper(
prefix="BINARY_OPERATION_POW",
suffix="",
left_shape=left.getTypeShape(),
right_shape=expression.getRight().getTypeShape(),
helpers=specialized_pow_helpers_set,
nonhelpers=nonspecialized_pow_helpers_set,
source_ref=expression.source_ref,
)
elif operator == "MatMult":
helper = pickCodeHelper(
prefix="BINARY_OPERATION_MATMULT",
suffix="",
left_shape=left.getTypeShape(),
right_shape=expression.getRight().getTypeShape(),
helpers=specialized_matmult_helpers_set,
nonhelpers=nonspecialized_matmult_helpers_set,
source_ref=expression.source_ref,
)
elif operator == "Divmod":
helper = "BUILTIN_DIVMOD"
elif len(arg_names) == 2:
helper = "BINARY_OPERATION"
prefix_args = (OperatorCodes.binary_operator_codes[operator],)
else:
assert False, operator
# We must assume to write to a variable is "in_place" is active, not e.g.
# a constant reference. That was asserted before calling us.
if in_place:
res_name = context.getBoolResName()
# For module variable C type to reference later.
if left.getVariable().isModuleVariable():
emit("%s = %s;" % (context.getInplaceLeftName(), arg_names[0]))
# We may have not specialized this one yet, so lets use generic in-place
# code, or the helper specified.
if helper == "BINARY_OPERATION":
emit(
"%s = BINARY_OPERATION_INPLACE( %s, &%s, %s );"
% (
res_name,
OperatorCodes.binary_operator_codes[operator],
arg_names[0],
arg_names[1],
)
)
else:
emit("%s = %s( &%s, %s );" % (res_name, helper, arg_names[0], arg_names[1]))
ref_count = 0
getErrorExitBoolCode(
condition="%s == false" % res_name,
release_names=arg_names,
needs_check=needs_check,
emit=emit,
context=context,
)
emit("%s = %s;" % (to_name, arg_names[0]))
if ref_count:
context.addCleanupTempName(to_name)
else:
with withObjectCodeTemporaryAssignment(
to_name, "op_%s_res" % operator.lower(), expression, emit, context
) as value_name:
emit(
"%s = %s( %s );"
% (
value_name,
helper,
", ".join(str(arg_name) for arg_name in prefix_args + arg_names),
)
)
getErrorExitCode(
check_name=value_name,
release_names=arg_names,
needs_check=needs_check,
emit=emit,
context=context,
)
if ref_count:
context.addCleanupTempName(value_name)
def _getUnaryOperationCode(
to_name, expression, operator, arg_name, needs_check, emit, context
):
impl_helper, ref_count = OperatorCodes.unary_operator_codes[operator]
helper = "UNARY_OPERATION"
prefix_args = (impl_helper,)
with withObjectCodeTemporaryAssignment(
to_name, "op_%s_res" % operator.lower(), expression, emit, context
) as value_name:
emit(
"%s = %s( %s );"
% (
value_name,
helper,
", ".join(str(arg_name) for arg_name in prefix_args + (arg_name,)),
)
)
getErrorExitCode(
check_name=value_name,
release_name=arg_name,
needs_check=needs_check,
emit=emit,
context=context,
)
if ref_count:
context.addCleanupTempName(value_name)
| [
"[email protected]"
]
| |
2c9578bb8c1413bfbb0ba22784dc571ba71e0c22 | 52b6f2b19fb501e48fbf6e078252c305c6477d73 | /_science/prod.py | 4e4d221bc38ba0a1950f6d732af1e64372aae2ba | []
| no_license | IanHung/_science | 60dfeb959161e392dfc5d153f13e16a69bbd0b39 | cff1b8ee3fa255e0761f9d0b98564af05feede66 | refs/heads/master | 2020-06-02T03:06:04.722352 | 2013-09-03T22:24:45 | 2013-09-03T22:24:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | '''
Created on 2013-08-27
@author: Ian
'''
import os
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': os.environ['RDS_DB_NAME'],
'USER': os.environ['RDS_USERNAME'],
'PASSWORD': os.environ['RDS_PASSWORD'],
'HOST': os.environ['RDS_HOSTNAME'],
'PORT': os.environ['RDS_PORT'],
}
}
ALLOWED_HOSTS = [".underscorescience.com"]
MEDIA_ROOT = '/media/'
STATIC_ROOT = '/static/'
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_STORAGE_BUCKET_NAME =os.environ['AWS_STORAGE_BUCKET_NAME']
DEFAULT_FILE_STORAGE = '_science.s3utils.MediaRootS3BotoStorage'
STATICFILES_STORAGE = '_science.s3utils.StaticRootS3BotoStorage'
s3_URL = 'http://%s.s3.amazonaws.com/' %AWS_STORAGE_BUCKET_NAME
STATIC_URL = s3_URL + STATIC_ROOT
MEDIA_URL = s3_URL + MEDIA_ROOT
AWS_QUERYSTRING_AUTH =False
#allows session cookie to apply to all sub domains.
SESSION_COOKIE_DOMAIN=".underscorescience.com"
#prepend ww for seo
PREPEND_WWW = True
SECRET_KEY = os.environ['SECRET_KEY']
| [
"[email protected]"
]
| |
f71aaa51a64a2e67ff1b9823093a2019c5a2b983 | d002ef03c46773b2c53c4ff724103281ad1d3652 | /0-setup/1-basics/3-decision/4-modulo-operator/bot.py | d2ac3903fb5f4f92c4d2781744f1e62b0a9e4aac | []
| no_license | 4courr72/com404 | 6b023dfeacea9bc50b418fb3b177a2dec451ff95 | 9018bfe1ee928a9ea65cadb9f1ba72b36dd24e91 | refs/heads/master | 2020-07-31T13:39:44.815221 | 2019-12-10T13:26:18 | 2019-12-10T13:26:18 | 210,621,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | #Asking for user input (an integer!)
print("Please enter a whole number.")
number = int( input() ) #To force saved input as an integer - think of it as gets input first (so inside brackets) then turns it into an integer
#Modulo 2 to see if odd or even
if (number % 2 != 0):
print("The number " + str(number) + " is an odd number")
else:
print("The number " + str(number) + " is an even number")
#[Comments 15-10-19: Solutions there now - I note Prins has used '== 0' and I used '!= 0' and have switched the print statements - this gives the same result
| [
"[email protected]"
]
| |
16ef81874494815258632712aecd282b3d3fe109 | 00fed3d8f6c4b73e62048ff910c1cbcecff422e3 | /sample_mini_set.py | 4aeed8c8a0217f1f84119df4f05ac36174dd93eb | []
| no_license | SeanZChen/SAR_PROJECT | 9ee0eca9693a8d2b624bb90b7abc1be74a9371d2 | c34d734ea6d1ba19d0d7a9a29297da2c1b5cf237 | refs/heads/master | 2021-05-25T23:51:01.882935 | 2020-06-05T11:56:40 | 2020-06-05T11:56:40 | 253,970,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,600 | py | ##################################################
#
# author: Zixuan Chen
# Date: 2020/04/07
# Description:
# Sample small training set
#
##################################################
import os
import random
import cv2
import argparse
cats = ['2S1', 'BRDM_2', 'BTR_60', 'D7', 'BMP2', 'BTR70', 'T72', 'T62', 'ZIL131', 'ZSU_23_4']
def sample(cfg):
if not os.path.exists(cfg.save_root):
os.makedirs(cfg.save_root)
else:
os.system("rm -rf %s"%(cfg.save_root))
for cat in cats:
original_dir = os.path.join(cfg.original_root, 'TRAIN', cat)
save_dir = os.path.join(cfg.save_root, 'TRAIN', cat)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
imgs = os.listdir(original_dir)
sample_size = min(len(imgs), cfg.sample_size)
print('Sampleing %s, sampled: %d'%(cat, sample_size))
selected = random.sample(imgs, sample_size)
for item in selected:
selected_dir = os.path.join(original_dir, item)
# new_dir = os.path.join(save_dir, item)
os.system("cp %s %s"%(selected_dir, save_dir))
os.system("cp -r %s %s"%(os.path.join(cfg.original_root, 'TEST'), cfg.save_root))
def main():
parser = argparse.ArgumentParser(description='Training efficient networks')
parser.add_argument('--sample-size', default=50, type=int)
parser.add_argument('--original_root', default='./data', type=str)
parser.add_argument('--save_root', default='./data_small_set', type=str)
cfg = parser.parse_args()
sample(cfg)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
f8942fcf58e92e580ebc988cc3035632d869faba | a561673adf29beb7939052b898dad5bf9167cefc | /pkg/codegen/internal/test/testdata/output-funcs/python/pulumi_mypkg/func_with_dict_param.py | 194d0e90d558b3e0a7ef6ea22b51830d4b8c97fb | [
"Apache-2.0"
]
| permissive | orionstudt/pulumi | 50fd75d4ec7bb48646cd3c83198afcf4a556a5fa | 7ef0b83c0cc7c4f9093e2a8fc0303e875d35c15c | refs/heads/master | 2023-08-12T13:57:32.605402 | 2021-10-18T12:24:46 | 2021-10-18T12:24:46 | 312,097,288 | 0 | 1 | Apache-2.0 | 2021-01-11T17:12:44 | 2020-11-11T21:43:03 | null | UTF-8 | Python | false | false | 2,134 | py | # coding=utf-8
# *** WARNING: this file was generated by test. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'FuncWithDictParamResult',
'AwaitableFuncWithDictParamResult',
'func_with_dict_param',
'func_with_dict_param_output',
]
@pulumi.output_type
class FuncWithDictParamResult:
def __init__(__self__, r=None):
if r and not isinstance(r, str):
raise TypeError("Expected argument 'r' to be a str")
pulumi.set(__self__, "r", r)
@property
@pulumi.getter
def r(self) -> str:
return pulumi.get(self, "r")
class AwaitableFuncWithDictParamResult(FuncWithDictParamResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return FuncWithDictParamResult(
r=self.r)
def func_with_dict_param(a: Optional[Mapping[str, str]] = None,
b: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableFuncWithDictParamResult:
"""
Check codegen of functions with a Dict<str,str> parameter.
"""
__args__ = dict()
__args__['a'] = a
__args__['b'] = b
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('mypkg::funcWithDictParam', __args__, opts=opts, typ=FuncWithDictParamResult).value
return AwaitableFuncWithDictParamResult(
r=__ret__.r)
@_utilities.lift_output_func(func_with_dict_param)
def func_with_dict_param_output(a: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
b: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[FuncWithDictParamResult]:
"""
Check codegen of functions with a Dict<str,str> parameter.
"""
...
| [
"[email protected]"
]
| |
ee7f8abd5fe37016ea1dc506931e4aacbe80ac29 | 10316355e41f868d2ad6dcd29962a349bca082c9 | /venv/bin/django-admin | 5eff6d709d2c349377db07cfc4fb3d55809e03db | []
| no_license | lucas-rafa-94/bexigaprojeto | d35e098072d5a6a5f4e7a66221f6325c31b67aa8 | aa3e18856c0961678e5a6d5755b5878c67daea6f | refs/heads/master | 2022-03-15T05:23:38.612991 | 2019-07-11T05:35:44 | 2019-07-11T05:35:44 | 194,945,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | #!/Users/lucasdossantos/PycharmProjects/bexigaprojeto/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"[email protected]"
]
| ||
54837d0322474f4a5bb87c841c454ed412440574 | 9b018e9eac8d97fbcc1e76a69b95c994f79a6ea3 | /randomtest/migrations/0054_auto_20170814_1657.py | 6212765bfa979c499d1d27436ef03f9a60646c2d | []
| no_license | hbgolze/contest-database | e39f269d7337652e7cdad03cc29827d4f729ec8f | 263385b438f7b7e1ab99062aad561ed0cec9c079 | refs/heads/master | 2023-06-26T21:30:08.246647 | 2023-06-11T02:34:41 | 2023-06-11T02:34:41 | 71,851,839 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 701 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-08-14 21:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('randomtest', '0053_problemgroup_is_shared'),
]
operations = [
migrations.AddField(
model_name='solution',
name='created_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='solution',
name='modified_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
| [
"[email protected]"
]
| |
e9175226a256f288a61fe9f8504a7a8c5b0780ac | 6ce6dccdc47f4b6a9cd5251fcd3f6ca2225931a5 | /sample/chapt02/chapt02-20.py | 7151b64aae909c6fcff57f56c0aa7ce7e3c89289 | []
| no_license | VsPun/learning | f411bd936b069b6b3a1b3f62dc8adc946f4ca978 | c517bc48e241eb1cbf1411894b9d4ab4b3ead22c | refs/heads/master | 2020-08-04T04:56:57.445164 | 2019-04-19T11:26:21 | 2019-04-19T11:26:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | def plusVal( val1, val2 ):
r = val1 + val2
return r
a = 10 + 20 # a=30となる
b = plusVal( 10, 20 ) # b=30となる
c = 10 + 20 + 30 # c=60となる
d = plusVal( 10, 20 ) + 30 # d=60となる
e = 10 + plusVal( 20, 30 ) # e=60となる
| [
"[email protected]"
]
| |
8f8ee752f8f128767c6d850042d54e473e726596 | d5d12507f8e62abd6ad4ae143ed0c30d2ec70a34 | /chapter_18/chapter18.py | 146d239b75801d502cb20d34a582aeef597f4d22 | []
| no_license | JohnHowardRoark/thinkcspy3 | 5ad3add17c4b534f0a1d007c7def1ace72583223 | def8e6615f2bcef367cb747d13a9f3cf23ece83a | refs/heads/master | 2020-09-19T21:35:57.213913 | 2019-12-22T18:25:58 | 2019-12-22T18:25:58 | 224,303,785 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,190 | py | """ How to Think Like a Computer Scientist: Learning with Python 3
Wentworth et al 2012
Chapter 18: Exercise 02
"""
import turtle
def make_window(colr, ttle):
""" setup the window with given background color and title """
w = turtle.Screen()
w.bgcolor(colr)
w.title(ttle)
return w
def make_turtle(color, size):
""" setup the turtle with given color and pensize """
t = turtle.Turtle()
t.color(color)
t.pensize(size)
t.hideturtle() # do not show turtle
t.speed(0) # 0 - 10 scale, 0 is fastest
return t
""" setup the canvas and turtle """
wn = make_window("lightgreen", "Turtle")
tess = make_turtle("blue" , 2)
tess.penup()
tess.forward(-250)
tess.pendown()
def koch(t, order, size):
"""
Make turtle t draw a Koch fractal of 'order' and 'size'.
Leave the turtle facing the same direction.
"""
if order == 0: # The base case is just a straight line
t.forward(size)
else:
for angle in [85, -170, 85, 0]:
koch(t, order-1, size/3)
t.right(angle)
koch(tess, 3, 600)
""" useful to ensure return to prompt after window is closed """
wn.mainloop() | [
"[email protected]"
]
| |
b3ae8ca2d64aa838dee23fbf5110cc142cdd0a73 | 6e9476a1715c2b2ae2fbfea557699f63bc781e1f | /src/whispy_lispy/compiler.py | a467ea5a6125b0a538e356169777b1073d2fc3ef | [
"MIT"
]
| permissive | vladiibine/whispy_lispy | f92b872d91fa82abd19f31616760a68f68b5cc57 | 9e5d6196f24e5c3ed5fb8753620ace5994c24add | refs/heads/master | 2021-01-10T01:30:39.478372 | 2016-10-07T20:45:12 | 2016-10-07T20:45:12 | 36,161,333 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | # -*- coding utf-8 -*-
"""This module marks the intention to someday convert the AST emitted by the
parser into some intermediate representation (or assembler core?) to maybe
run on some virtual machines, like the python interpreter, JVM, etc.
""" | [
"[email protected]"
]
| |
ff36301ab905c9ebfa065c44cf6fb1f52644949a | a837c7d25f9d55c4e756652f0c597c835021820f | /test/testCancion.py | 0c9f89987547bcf85f8779bfbf20e0ed51f87bcf | []
| no_license | GisselleIb/BasedeDatos | 1b4457c4bd5d2cf074484b69bab286ef101b7977 | c8b264ac6641431912f1c21692487251d35fc0a8 | refs/heads/master | 2020-03-30T15:09:18.614282 | 2018-10-26T01:51:54 | 2018-10-26T01:51:54 | 151,350,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | import unittest
import sys
sys.path.append('../..')
from src.cancion import Cancion
class TestCancion(unittest.TestCase):
def setUp(self):
self.cancion=Cancion()
def test_setArtista(self):
self.assertEqual(self.cancion.artista,"")
self.cancion.setArtista("artist")
self.assertEqual(self.cancion.artista,"artist")
def test_setTitulo(self):
self.assertEqual(self.cancion.titulo,"")
self.cancion.setTitulo("title")
self.assertEqual(self.cancion.titulo,"title")
def test_setFecha(self):
self.assertEqual(self.cancion.fecha,"")
self.cancion.setFecha("11/11/11")
self.assertEqual(self.cancion.fecha,"11/11/11")
def test_setGenero(self):
self.assertEqual(self.cancion.genero,"")
self.cancion.setGenero("genre")
self.assertEqual(self.cancion.genero,"genre")
def test_setTrack(self):
self.assertEqual(self.cancion.track,"")
self.cancion.setTrack("1")
self.assertEqual(self.cancion.track,1)
| [
"[email protected]"
]
| |
05c2d99222f7470e4c829593f2fcbf0f3a3aa404 | ed6d11f0a506fd781fe0a307ea16ecf6880770bd | /ENV/bin/easy_install | 99e1f536dfa11ccd02fa5b6a8edb39b272d6cfea | []
| no_license | Xa1n/gilded_rose_python | 6dfe0a4536975f0c053d2fcf590972aecd187d3a | bea23b2d49708db9d44b75446d61451cf1504981 | refs/heads/master | 2020-06-01T09:03:44.961899 | 2019-06-07T10:18:07 | 2019-06-07T10:18:07 | 190,724,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | #!/Users/xainstorey/Desktop/Makers/challenges/gilded_rose/ENV/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
]
| ||
6bea42a14ebfcac2b02dee5c682277b4ca32f59f | 3017e7f0e8cd99469c7c98ec8a4b9b75d39c0c2f | /pythonkitabi/ingilizce/ch19/kalam/main.py | 674f344a308f3216fde766ace3ab1fc1334017cd | []
| no_license | Rmys/projects | de6cb9d5d3f027d98c812647369d1e487d902c4b | 60ce197bc1fb7ad3fa31f12559b74ee450b69df1 | refs/heads/master | 2020-03-19T15:36:11.603931 | 2011-09-16T00:15:34 | 2011-09-16T00:15:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 895 | py | #!/usr/bin/env python
"""
main.py - application starter
copyright: (C) 2001, Boudewijn Rempt
email: [email protected]
"""
import sys, locale
sys.setappdefaultencoding("utf8")
from qt import *
from kalamapp import KalamApp
from kalamdoc import KalamDoc
from kalamview import KalamView
import kalamconfig
from resources import TRUE, FALSE
def main(args):
app=QApplication(args)
translator = QTranslator(app)
translator.load("kalam_" + locale.getlocale()[0] + ".qm",
kalamconfig.get("libdir","."))
app.installTranslator(translator)
kalam = KalamApp()
app.setMainWidget(kalam)
kalam.show()
if len(args) > 1:
for arg in args[1:]:
document=KalamDoc()
document.open(arg)
kalam.docManager.addDocument(document, KalamView)
app.exec_loop()
if __name__=="__main__":
main(sys.argv)
| [
"[email protected]"
]
| |
b9adc7331dee337e2a3ca0f2f3e0ea8179079032 | c54722779374bf15b0e1bfef9cbc5b98001bbea8 | /Reinforcement/R-1.5_squareSumComprehension.py | da748599de383ee5d89782ae7e35b8e0ddde5b7b | []
| no_license | fahrulardiannugroho/struktur-data-tugas-1 | f210e035045d3a685fb2e1972f2ec7bf9e017d1a | 500345debcd6ea4cd67d0010be83424edc06bd29 | refs/heads/master | 2023-05-03T12:22:02.062493 | 2021-05-31T12:44:27 | 2021-05-31T12:44:27 | 372,419,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | def squares_sum(n):
return sum(number**2 for number in range(0,n))
print(squares_sum(3))
| [
"[email protected]"
]
| |
d9486a49cfb4da916da38d7215ba0be37fbd98f5 | 9ea53850ba6135ac6f5686c1a38a1029ebccb294 | /src/infer.py | d7e92fe3313444e734712fafe6c45921eb629952 | [
"MIT"
]
| permissive | yangbo/pytorch-vgg-cifar10 | 25f0a0c5b2e482587fbed1fd05342260456a8ab0 | 11870ecbe1620b2a8bd7a9163da8cb9c226c2fac | refs/heads/master | 2021-09-01T06:03:13.876308 | 2017-12-25T08:12:53 | 2017-12-25T08:12:53 | 113,654,176 | 0 | 0 | null | 2017-12-09T08:23:43 | 2017-12-09T08:23:42 | null | UTF-8 | Python | false | false | 570 | py | import torch
import sys
def infer():
best_epoch = 273 # from 0 start
checkpoint_file = '../save_da_vgg19_bn/checkpoint_{}.tar'.format(best_epoch)
checkpoint = torch.load(checkpoint_file)
print('best epoch is', checkpoint['epoch'])
# import vgg
sys.path.insert(0, '../src')
import vgg
model = vgg.vgg19_bn()
model.features = torch.nn.DataParallel(model.features)
state = checkpoint['state_dict']
model.load_state_dict(state)
print(model.parameters)
if __name__ == '__main__':
infer() | [
"[email protected]"
]
| |
bf1ec2799e38215a98eae3c52afaf3ee5ca4190d | 06d22b05c4069f24949c98a099694a806c0e237c | /Работа с данными в Pandas/Titanic.py | 66cd0cdddbdcb2985111cc6611e3fa604e3d812e | []
| no_license | DuwazSandbox/ds_introduce | b02c36a6c388e3870c645d2d58474b9065ae45ca | ada734804e53b063f3b7ecebbfe73298eb23ee43 | refs/heads/master | 2023-04-28T09:34:05.696199 | 2021-05-06T07:08:35 | 2021-05-06T07:08:35 | 352,138,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,408 | py | import bs4
import pandas as pd
import numpy as np
from beauty_print import beauty_print
titanic_full_df = pd.read_csv("https://github.com/agconti/kaggle-titanic/raw/master/data/train.csv", sep=",")
# Общее представление работы с данными
#beauty_print("Размер:", titanic_full_df.shape)
#print("Инфо:")
#titanic_full_df.info()
#beauty_print("Описание:", titanic_full_df.describe())
#beauty_print("Наименования колонок:", titanic_full_df.columns)
#beauty_print("Типы данных по колонкам:", titanic_full_df.dtypes)
#beauty_print("Начальные данные:", titanic_full_df.head())
#beauty_print("Конечные данные:", titanic_full_df.tail(7))
#beauty_print("Количество ячеек с NULL'ом:", titanic_full_df.isnull().sum())
#beauty_print("Рандомные 5 строк таблицы:", titanic_full_df.sample(5))
# Индексация и выделение данных
#beauty_print("Определённая колонка + только начальные данные:", titanic_full_df["Age"].head())
#beauty_print("Тип у колонки в DataFrame:", type(titanic_full_df["Age"]))
#beauty_print("2 колонки + только начальные данные:", titanic_full_df[["Age", "Sex"]].head())
#Работа с новой колонкой в DataFrame
titanic_full_df["Relatives"] = titanic_full_df["SibSp"] + titanic_full_df["Parch"]
#beauty_print("Вывод новой колонки с двумя старыми:", titanic_full_df[["SibSp", "Parch", "Relatives"]].head())
#beauty_print("Удаление 3й строчки (с индексом 2):", titanic_full_df.drop(2, axis=0).head())
#beauty_print("Удаление столбца с названием 'Relatives':", titanic_full_df.drop("Relatives", axis=1).head())
#beauty_print("Получение первых 10 индексов из DataFrame в виде листа:", titanic_full_df.index.tolist()[:10])
#beauty_print("Получение данных из определённых индексов и колонок:", titanic_full_df.loc[442 : 450 : 2, ["Age", "Sex"]])
#beauty_print("Установка колонки в качестве индекса и выборка по значению индекса:", titanic_full_df.set_index(["Embarked"]).loc["S"].head())
#beauty_print("Получение значений у первой строки:", titanic_full_df.iloc[0])
#beauty_print("Получение данных из определённых индексов:", titanic_full_df.iloc[[564, 442]])
#beauty_print("Получение данных из определённых индексов и колонок (2):", titanic_full_df.loc[[564, 442], ["Name", "Sex"]])
#beauty_print("Проверка значений каждой ячейки на равенство единицы:", titanic_full_df == 1)
#beauty_print("Проверка значений каждой ячейки в колонке 'Survived':", (titanic_full_df.Survived == 1).head())
#beauty_print("Получение таблицы, в которой поле 'Survived' равно 0:", titanic_full_df[titanic_full_df["Survived"] == 0].head())
#beauty_print("Получение количества элементов с одинаковыми значениями в таблице 'Sex', с учётом, что 'Survived' равно 1:", titanic_full_df[titanic_full_df["Survived"] == 1]["Sex"].value_counts())
#beauty_print("Получение таблицы с 'Fare > 100' или 'Name' содержащим 'Master':", titanic_full_df[(titanic_full_df["Fare"] > 100) | (titanic_full_df["Name"].str.find("Master") != -1)].head())
# Методы
#beauty_print("Получение списка всех значений колонки 'Embarked':", titanic_full_df["Embarked"].unique())
#beauty_print("Получение количества всех различных значений колонки 'Embarked':", titanic_full_df["Embarked"].nunique())
#beauty_print("Получение количества элементов с одинаковыми значениями в таблице 'Survived':", titanic_full_df["Survived"].value_counts())
#beauty_print("Получение количества элементов с одинаковыми значениями в таблице 'PClass':", titanic_full_df["Pclass"].value_counts())
# Замена значений [1,2,3] колонки 'Pclass' на текст
titanic_full_df["Pclass"].replace({1: "Элита", 2: "Средний класс", 3: "Работяги"}, inplace=True)
#beauty_print("Получение количества элементов с одинаковыми значениями в таблице 'PClass' (2):", titanic_full_df["Pclass"].value_counts())
#beauty_print("Замена значений колонки 'Fare' на 'Дёшево' или 'Дорого' в зависимости от значения:", titanic_full_df["Fare"].apply(lambda x: "Дёшево" if x < 20 else "Дорого"))
# Заполнение новой колонки 'Fare_Bin' в зависимости от значения ячейки 'Fare'
titanic_full_df["Fare_Bin"] = titanic_full_df["Fare"].apply(lambda x: "Дёшево" if x < 20 else "Дорого")
#beauty_print("Произвести сортировку таблицы по полю 'Name':", titanic_full_df.sort_values(by='Name'))
# Работа с пропущенными значениями
#beauty_print("Имеются ли NULL в колонках? :", titanic_full_df.isnull().any())
#beauty_print("Получение таблицы без строк, в которых имеются пропущенные значения:", titanic_full_df.dropna())
#beauty_print("Получение таблицы без строк, в которых имеются пропущенные значения в колонках 'Age' и 'Sex':", titanic_full_df.dropna(subset=["Age", "Sex"]))
#beauty_print("Получение таблицы со строками, где не менее 14 заполненных колонок:", titanic_full_df.dropna(thresh=14))
#beauty_print("Вставка слова 'ПРОПУСК' в ячейки с пропущенными значениями:", titanic_full_df.fillna("ПРОПУСК"))
#beauty_print("Заполнение пустых ячеек в колонке 'Age' средним значением по таблице:", titanic_full_df["Age"].fillna(value=titanic_full_df["Age"].mean()))
# Работа с объединениями
#beauty_print("Получение таблицы количества выживших и погибших мужчин и женщин:", titanic_full_df[["Sex", "Survived"]].pivot_table(index=["Sex"], columns=["Survived"], aggfunc=len))
#beauty_print("Получение таблицы среднего возраста выживших и погибших мужчин и женщин:", titanic_full_df[["Sex", "Survived", "Age"]].pivot_table(values=["Age"], index=["Sex"], columns=["Survived"], aggfunc="mean"))
#beauty_print("Произвести группировку по колонке Pclass и выбрать максимальное значение из каждой группы:", titanic_full_df[titanic_full_df.groupby("Pclass")["PassengerId"].transform(max) == titanic_full_df["PassengerId"]])
#beauty_print("Получение среднего возраста каждого из объединения колонки Pclass:", titanic_full_df.groupby("Pclass").mean()["Age"])
#beauty_print("Получение статистических данных (count, mean, std, min/max, 25%, 50%, 75%) по возрасту в каждом объединении колонки Pclass:", titanic_full_df.groupby("Pclass").describe()["Age"])
#beauty_print("Поменять колонки и строки местами (транспонирование) (пример с прошлым запросом):", titanic_full_df.groupby("Pclass").describe()["Age"].transpose())
#beauty_print("Получение mix/max/std по возрасту по каждому из объединений колонки Pclass:", titanic_full_df.groupby("Pclass")["Age"].agg(["min", "max", "std"]))
#beauty_print("Получение среднего возраста пассажиров и их количество по каждому из объединений колонки Pclass:", titanic_full_df.groupby("Pclass").agg({"Age": np.mean, "PassengerId": "count"}))
#beauty_print("Получение среднего показателя 'Fare' по каждому из объединений колонкок Pclass и Sex одновременно:", titanic_full_df.groupby(["Pclass", "Sex"]).mean()["Fare"])
# Цикл по значениям
#print("Отображение индекса и значение колонки 'Name':")
#for index, row in titanic_full_df.iterrows():
# print("Index: {}, Name: {}".format(index, row["Name"]))
#print("Отображение наименования объединения групп 'Pclass' и средний возраст:")
#for group_name, group in titanic_full_df.groupby("Pclass"):
# print(group_name, group["Age"].mean()) | [
"[email protected]"
]
| |
e9efdb9fc7477f2fd0f82b93c4d61b80333782c6 | 41040a12d8dd8f7fa6ca4591139c68f446190870 | /manage.py | f80a8bd3661075c7dcb46185f3b4a146b56aaacc | []
| no_license | laeeqkhan01/geoloctest_1 | c1db7fab420e650741dd9a68e44dfea29d28f4df | 99a6fc1dd2692310a342f2f2b3257ee58b7c025c | refs/heads/master | 2023-01-04T03:48:52.441389 | 2020-10-26T00:53:39 | 2020-10-26T00:53:39 | 307,166,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 632 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'geoloctest_1.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
77ff76fa06821167c172846bf37979d9ac39e693 | dc5ad69c775f7072248d9936d9773ba93ced4b27 | /fabtools/require/apache.py | c109401d3abf58f4f443f3885cb74022891e2186 | [
"BSD-2-Clause"
]
| permissive | MorphiX00/fabtools | b8469b686dddfc86682ef0e78d2e91cea55fa133 | 0cde09d10ce3b8b8df763605e53c7a06a24d588f | refs/heads/master | 2021-01-18T07:49:08.019949 | 2013-02-26T15:13:12 | 2013-02-26T15:13:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | import tempita
from fabtools import apache
from fabtools.require.deb import package
from fabtools.require.service import started
from fabtools.require.files import file as _file
def server():
package('apache2')
started('apache2')
def site(site_name, template_contents=None, template_source=None, enabled=True, check_config=True, **kwargs):
server()
config_filename = '/etc/apache2/sites-available/%s.conf' % site_name
if template_contents:
tmpl = tempita.Template(template_contents)
elif template_source:
f = open(template_source, 'r')
tmpl = tempita.Template(f.read())
f.close()
_file(
path=config_filename,
contents=tmpl.substitute(kwargs)
)
if enabled:
apache.enable_site(site_name)
| [
"[email protected]"
]
| |
a6d3e8adda1b8ff6dc2c78add86277e2aba7fabf | 0f155d5615d9e6aa73dd238ad8c144ac89066f91 | /tests/gold_tests/tls/tls_sni_host_policy.test.py | bd2ac5b2a37d6a820fa4f2038b7c8571e5338a22 | [
"BSD-3-Clause",
"OpenSSL",
"MIT",
"ISC",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-openssl",
"Apache-2.0",
"LicenseRef-scancode-ssleay-windows",
"BSD-2-Clause",
"HPND",
"TCL",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown"
]
| permissive | nozomi1773/trafficserver | 0491fb9a3e9bd6bfacc22e463186213e2e97b1ad | 2ee141137545a84584d8047eee70b171b5254c40 | refs/heads/master | 2021-07-10T04:51:58.601768 | 2020-08-04T03:21:25 | 2020-08-04T03:21:25 | 181,676,681 | 0 | 0 | Apache-2.0 | 2019-04-16T11:32:40 | 2019-04-16T11:32:40 | null | UTF-8 | Python | false | false | 7,572 | py | '''
Test exercising host and SNI mismatch controls
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = '''
Test exercising host and SNI mismatch controls
'''
ts = Test.MakeATSProcess("ts", select_ports=True, enable_tls=True)
cafile = "{0}/signer.pem".format(Test.RunDirectory)
cafile2 = "{0}/signer2.pem".format(Test.RunDirectory)
server = Test.MakeOriginServer("server")
request_header = {"headers": "GET /case1 HTTP/1.1\r\nHost: example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
request_header = {"headers": "GET / HTTP/1.1\r\nHost: bar.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
ts.addSSLfile("ssl/signer.pem")
ts.Disk.records_config.update({
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.client.verify.server': 0,
'proxy.config.url_remap.pristine_host_hdr' : 1,
'proxy.config.ssl.CA.cert.filename': '{0}/signer.pem'.format(ts.Variables.SSLDir),
'proxy.config.exec_thread.autoconfig.scale': 1.0,
'proxy.config.http.host_sni_policy': 2,
'proxy.config.ssl.TLSv1_3': 0
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
# Just map everything through to origin. This test is concentratign on the user-agent side
ts.Disk.remap_config.AddLine(
'map / http://127.0.0.1:{0}/'.format(server.Variables.Port)
)
# Scenario 1: Default no client cert required. cert required for bar.com
ts.Disk.sni_yaml.AddLines([
'sni:',
'- fqdn: boblite',
' verify_client: STRICT',
' host_sni_policy: PERMISSIVE',
'- fqdn: bob',
' verify_client: STRICT',
])
# case 1
# sni=bob and host=dave. Do not provide client cert. Should fail
tr = Test.AddTestRun("Connect to bob without cert")
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.Processes.Default.StartBefore(server)
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.Processes.Default.Command = "curl --tls-max 1.2 -k -H 'host:dave' --resolve 'bob:{0}:127.0.0.1' https://bob:{0}/case1".format(ts.Variables.ssl_port)
tr.Processes.Default.ReturnCode = 35
# case 2
# sni=bob and host=dave. Do provide client cert. Should succeed
tr = Test.AddTestRun("Connect to bob with good cert")
tr.Setup.Copy("ssl/signed-foo.pem")
tr.Setup.Copy("ssl/signed-foo.key")
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.Processes.Default.Command = "curl --tls-max 1.2 -k --cert ./signed-foo.pem --key ./signed-foo.key -H 'host:dave' --resolve 'bob:{0}:127.0.0.1' https://bob:{0}/case1".format(ts.Variables.ssl_port)
tr.Processes.Default.ReturnCode = 0
# case 3
# sni=dave and host=bob. Do not provide client cert. Should fail due to sni-host mismatch
tr = Test.AddTestRun("Connect to dave without cert")
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.Processes.Default.Command = "curl --tls-max 1.2 -k -H 'host:bob' --resolve 'dave:{0}:127.0.0.1' https://dave:{0}/case1".format(ts.Variables.ssl_port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.All = Testers.ContainsExpression("Access Denied", "Check response")
# case 4
# sni=dave and host=bob. Do provide client cert. Should fail due to sni-host mismatch
tr = Test.AddTestRun("Connect to dave with cert")
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.Processes.Default.Command = "curl --tls-max 1.2 -k --cert ./signed-foo.pem --key ./signed-foo.key -H 'host:bob' --resolve 'dave:{0}:127.0.0.1' https://dave:{0}/case1".format(ts.Variables.ssl_port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.All = Testers.ContainsExpression("Access Denied", "Check response")
# case 5
# sni=ellen and host=boblite. Do not provide client cert. Should warn due to sni-host mismatch
tr = Test.AddTestRun("Connect to ellen without cert")
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.Processes.Default.Command = "curl --tls-max 1.2 -k -H 'host:boblite' --resolve 'ellen:{0}:127.0.0.1' https://ellen:{0}/warnonly".format(ts.Variables.ssl_port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.All = Testers.ExcludesExpression("Access Denied", "Check response")
# case 6
# sni=ellen and host=boblite. Do provide client cert. Should warn due to sni-host mismatch
tr = Test.AddTestRun("Connect to ellen with cert")
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.Processes.Default.Command = "curl --tls-max 1.2 -k --cert ./signed-foo.pem --key ./signed-foo.key -H 'host:boblite' --resolve 'ellen:{0}:127.0.0.1' https://ellen:{0}/warnonly".format(ts.Variables.ssl_port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.All = Testers.ExcludesExpression("Access Denied", "Check response")
# case 7
# sni=ellen and host=fran. Do not provide client cert. No warning since neither name is mentioned in sni.yaml
tr = Test.AddTestRun("Connect to ellen without cert")
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.Processes.Default.Command = "curl --tls-max 1.2 -k -H 'host:fran' --resolve 'ellen:{0}:127.0.0.1' https://ellen:{0}/warnonly".format(ts.Variables.ssl_port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.All = Testers.ExcludesExpression("Access Denied", "Check response")
# case 8
# sni=ellen and host=fran. Do provide client cert. No warning since neither name is mentioned in sni.yaml
tr = Test.AddTestRun("Connect to ellen with cert")
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.Processes.Default.Command = "curl --tls-max 1.2 -k --cert ./signed-foo.pem --key ./signed-foo.key -H 'host:fran' --resolve 'ellen:{0}:127.0.0.1' https://ellen:{0}/warnonly".format(ts.Variables.ssl_port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.Streams.All = Testers.ExcludesExpression("Access Denied", "Check response")
ts.Disk.diags_log.Content += Testers.ContainsExpression("WARNING: SNI/hostname mismatch sni=dave host=bob action=terminate", "Should have warning on mismatch")
ts.Disk.diags_log.Content += Testers.ContainsExpression("WARNING: SNI/hostname mismatch sni=ellen host=boblite action=continue", "Should have warning on mismatch")
ts.Disk.diags_log.Content += Testers.ExcludesExpression("WARNING: SNI/hostname mismatch sni=ellen host=fran", "Should not have warning on mismatch with non-policy host")
| [
"[email protected]"
]
| |
af115fd69d7e52b2290a00ca9b39401000f50d5c | e48723e45c247e15d26144fa003c3ddf1c480929 | /templateFacadePractices.py | 8fed9024f35bd9ed95253c0a70cabd28851780f7 | []
| no_license | simonjj/ZenossScripts | 962654387ba3fc8f373ae0f9ffabf5c1fecaca46 | eea7c6756af965ff72b839c56c41a72c5f8eb1d5 | refs/heads/master | 2020-12-24T15:05:00.204961 | 2012-03-28T23:58:27 | 2012-03-28T23:58:27 | 2,534,367 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,047 | py | #get a hold of API
from Products.Zuul import getFacade
a = getFacade("template")
#listing all of the templates
for template in a.getTemplates("/zport/dmd/Devices"):
#printing the "raw" info object
print template
#printing the ID of the the object
print template.id
print template.getName()
#filtering out one particular template
for template in a.getTemplates("/zport/dmd/Devices"):
if template.id == "ZenJMX":
break
#accessing/retrieving one particular template
for template in a.getTemplates("/zport/dmd/Devices/rrdTemplates/copy_of_ZenJMX"):
print template
#copying a template
a.copyTemplate(TEMPLATE_UID_TO_COPY, TARGET_UID_FOR_THE_TARGET)
c.copyTemplate(template.uid, "/zport/dmd/Devices/Server/Tomcat")
#listing all the datasource for one template
for ds in a.getDataSources("/zport/dmd/Devices/rrdTemplates/ZenJMX_EAN_Hotels_7606"): print ds
#changing the jmxPort on a template
for ds in a.getDataSources("/zport/dmd/Devices/rrdTemplates/ZenJMX_EAN_Hotels_7606"):
ds.jmxPort = 7606
#list out and append 7606 to the graph definitions
for g in a.getGraphs(myt.uid):
print g
g.name += " 7606"
#add a new template to a certain device class
a.addTemplate("My New Template Name", "/zport/dmd/Devices/Server/SSH/Linux/ClickServers")
# SUPPORTED DATA SOURCE TYPES
SNMP
COMMAND
Built-In
PING
GangliaMonitor
ApacheMonitor
Cisco UCS XML API
DigMonitor
DnsMonitor
FtpMonitor
HttpMonitor
IRCDMonitor
JabberMonitor
LDAPMonitor
MySqlMonitor
NNTPMonitor
NtpMonitor
RPCMonitor
Splunk
CWMonitor
JMX
MAILTX
SQL
VMware
WebTx
WinPerf
vCloudStatus
vCloud
#create a new datasource specifiying:
# 1. the uid of the template to which attach the datasource to
# 2. the name/id of the template
# 3. the type of the datasource (see above)
a.addDataSource(t.uid, "myfirstautomaticjmxdatasource", "JMX")
#creating a datapoint on the previous template
a.addDataPoint(ds.uid, "howdyyall")
#looping and adding 10 datasources of type jmx to one template
for i in range(10):
a.addDataSource(t.uid, "myfirstautomaticjmxdatasource%s" % i, "JMX")
#adding the same datapoint to the different datasource from above
for ds in a.getDataSources(t.uid):
a.addDataPoint(ds.uid, "thisisthesamedatapointforall")
# adding a graph definition to the template
a.addGraphDefinition(t.uid, "mygraph")
#retrieve a datapoint info object to further work with (e.g. add to graphs)
from Products.Zuul.interfaces import IDataPointInfo
# access all the datapoints from the datasource (using the detour via the real object)
ds._object.getRRDDataPoints()
#this returns all the datapoints for the template as an array
[<RRDDataPoint at /zport/dmd/Devices/rrdTemplates/SimonTest/datasources/
# accessing the first object in the array and turn it into a infor object to use
dp = ds._object.getRRDDataPoints()[0]
dpi = IDataPointInfo(dp)
dpi.uid
# adding a datapoint to a graph using:
# 1. the uid of the datapoint
# 2. the uid of the graph
a.addDataPointToGraph(dpi.uid, g.uid)
def getGraphById(t, graphid):
for g in a.getGraphs(t.uid):
if graphid == g.id: return g
#this method combines all the previous things into one
def mystuff(t, dsname, dpname, gname, type="JMX"):
from Products.Zuul.interfaces import IDataPointInfo, IRRDDataSourceInfo, IGraphInfo
print "woring on %s" % dsname
nds = IRRDDataSourceInfo(a.addDataSource(t.uid, dsname, type))
print "created datasource"
ndp = IDataPointInfo(a.addDataPoint(nds.uid, dpname))
print "created data point"
a.addGraphDefinition(t.uid, gname)
ng = getGraphById(t, gname)
print "created graph def."
a.addDataPointToGraph(ndp.uid, ng.uid)
print "done"
# reading in a file
# open the file
myfile = open("/tmp/path/tofile/blah". "r")
# read in the lines and strip the newline chart off the end
lines = [l.strip() for l in myfile.readlines()]
# create an array to hold the data
dsdata = []
#split all the lines and add them to data array
for l in lines:
dsdata.append(l.split())
| [
"[email protected]"
]
| |
773cae2cb5845ed68ef3e3c463bef91ccc809a89 | b2ad72ec39c7c174be23311f28822a4136cf634c | /thr_interaction_controller/scripts/interaction_controller_gestures.py | 4638619df3beece338d1add70f2b372546471f54 | []
| no_license | 3rdHand-project/thr_infrastructure | 0a2e866561f6022176e5329859de0650b6a54322 | d06fb3cb55ce303703a50bc8cf12017b7c95f926 | refs/heads/master | 2021-01-11T08:24:08.122093 | 2017-12-22T19:36:22 | 2017-12-22T19:36:22 | 51,926,129 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,027 | py | #!/usr/bin/env python
import os
import rospy
import rospkg
import actionlib
import json
from random import choice
from threading import Lock
from thr_infrastructure_msgs.msg import *
from thr_infrastructure_msgs.srv import *
from actionlib_msgs.msg import *
from collections import deque
from kinect2.client import Kinect2Client
class InteractionController(object):
def __init__(self):
self.lock = Lock()
self.running = True
self.waiting = False
self.last_predicition_confidence = None
self.last_predicted_decision_str = None
self.last_human_decision = None
self.current_scene = None
self.last_scene = None
self.logs = []
# Kinect controls
self.kinect = Kinect2Client('BAXTERFLOWERS.local')
self.last_skeleton = None
self.skeleton_id = ''
self.hand_state = {'filtered_state': ''}
self.speech = deque()
self.speech_lock = Lock()
self.last_sentence = ''
self.last_speak = rospy.Time(0)
# Parameters to be tweaked
self.interaction_loop_rate = rospy.Rate(1)
self.reward_service = '/thr/learner'
self.predictor_service = 'thr/predictor'
self.scene_state_service = '/thr/scene_state'
self.run_decision_name = '/thr/run_decision'
# Initiating topics ands links to services/actions
self.run_decision_client = actionlib.SimpleActionClient(self.run_decision_name, RunDecisionAction)
rospy.loginfo("Waiting action client {}...".format(self.run_decision_name))
self.run_decision_client.wait_for_server()
for service in [self.reward_service, self.predictor_service, self.scene_state_service]:
rospy.loginfo("Waiting service {}...".format(service))
rospy.wait_for_service(service)
self.rospack = rospkg.RosPack()
with open(self.rospack.get_path("thr_action_server")+"/config/decision_action_mapping.json") as config_file:
self.decision_action_mapping = json.load(config_file)
rospy.loginfo("Starting Kinect 2 services...")
self.start_kinect_services()
rospy.loginfo("IC for interaction via gestures ready!")
#################################################
# KINECT SERVICES ###############################
#################################################
def say(self, sentence, interval=15):
if self.last_sentence != sentence or rospy.get_time() - self.last_speak > interval:
self.kinect.tts.say(sentence)
self.last_sentence = sentence
self.last_speak = rospy.get_time()
def cb_skeleton(self, msg):
num_skeletons = len(msg.keys())
if self.skeleton_id == '':
if num_skeletons > 1:
rospy.logwarn("{} skeletons are visible, ignoring frame".format(num_skeletons))
elif num_skeletons == 0:
rospy.logwarn("No skeleton visible, ignoring frame")
else:
self.skeleton_id = msg.keys()[0]
rospy.loginfo("Selected skeleton {}".format(self.skeleton_id))
if num_skeletons > 0 and self.skeleton_id != '':
try:
self.last_skeleton = msg[self.skeleton_id]
except KeyError:
# Skeleton ID has changed
rospy.logwarn("Skeleton ID has changed")
self.say("I'm tracking someone else")
self.skeleton_id = ''
self.last_skeleton = None
####### Hand state filtering
# If you're looking for the end filtered state this is self.hand_state['filtered_state']
activating_duration = 3
stopping_duration = 2
timeout_duration = 3.5 # The gesture expires within 3.5 sec if it hasn't been activated, must be > other durations
if self.last_skeleton is not None:
hand_state = self.last_skeleton['HandRight']['HandState']
if hand_state not in self.hand_state:
self.hand_state[hand_state] = {}
if 'filtered_state' not in self.hand_state:
self.hand_state['filtered_state'] = ''
if hand_state in ['NotTracked', 'Unknown'] or self.hand_state['filtered_state'] != '' and hand_state != self.hand_state['filtered_state']:
if self.hand_state['filtered_state'] != "":
if 'disappeared' not in self.hand_state[self.hand_state['filtered_state']]:
self.hand_state[self.hand_state['filtered_state']]['disappeared'] = rospy.get_time()
elif rospy.get_time() - self.hand_state[self.hand_state['filtered_state']]['disappeared'] > stopping_duration:
del self.hand_state[self.hand_state['filtered_state']]['disappeared']
self.hand_state['filtered_state'] = ""
rospy.loginfo("Switch to {}".format(self.hand_state['filtered_state']))
else:
if self.hand_state['filtered_state'] == "":
if 'appeared' not in self.hand_state[hand_state]:
self.hand_state[hand_state]['appeared'] = rospy.get_time()
elif rospy.get_time() - self.hand_state[hand_state]['appeared'] > activating_duration:
self.hand_state['filtered_state'] = hand_state
rospy.loginfo("Switch to {}".format(self.hand_state['filtered_state']))
del self.hand_state[hand_state]['appeared']
for gesture in self.hand_state:
if 'appeared' in self.hand_state[gesture] and rospy.get_time() - self.hand_state[gesture]['appeared'] > timeout_duration:
del self.hand_state[gesture]['appeared']
if 'disappeared' in self.hand_state[gesture] and rospy.get_time() - self.hand_state[gesture]['disappeared'] > timeout_duration:
del self.hand_state[gesture]['disappeared']
def cb_speech(self, msg):
with self.speech_lock:
try:
self.speech.append(msg['semantics'])
except KeyError as e:
rospy.logerr("Malformed speech message: no key {}".format(e.message))
def start_kinect_services(self):
self.kinect.skeleton.set_callback(self.cb_skeleton)
self.kinect.speech.set_callback(self.cb_speech)
with open('{}/config/{}/grammar_en.xml'.format(self.rospack.get_path('thr_scenes'), rospy.get_param('/thr/scene'))) as f:
self.kinect.speech.params.set_grammar(f, "Grammar {}".format(rospy.get_param('/thr/scene')))
self.kinect.tts.params.set_language('english')
self.kinect.speech.params.semantic_on()
self.kinect.tts.params.queue_on()
msg = self.kinect.skeleton.start()
msg += self.kinect.tts.start()
msg += self.kinect.speech.start()
if len(msg) > 0:
rospy.logerr(msg)
#################################################
# SERVICE CALLERS ###############################
#################################################
def set_new_training_example(self, scene, decision, prediction_confidence, predicted_action, corrected):
request = SetNewTrainingExampleRequest()
request.decision = decision
request.predicted_decision = predicted_action
request.scene_state = scene
request.prediction_confidence = prediction_confidence
request.corrected = corrected
try:
reward = rospy.ServiceProxy(self.reward_service, SetNewTrainingExample)
reward(request)
except rospy.ServiceException as e:
rospy.logerr("Cannot set training example: {}".format(e.message))
def update_scene(self):
request = GetSceneStateRequest()
try:
getscene = rospy.ServiceProxy(self.scene_state_service, GetSceneState)
self.last_scene = self.current_scene
self.current_scene = getscene(request).state
except rospy.ServiceException as e:
rospy.logerr("Cannot update scene {}:".format(e.message))
def predict(self):
request = GetNextDecisionRequest()
request.scene_state = self.current_scene
try:
predict = rospy.ServiceProxy(self.predictor_service, GetNextDecision)
return predict(request)
except rospy.ServiceException as e:
rospy.logerr("Cannot call predictor:".format(e.message))
decision = Decision(type='wait')
return GetNextDecisionResponse(decisions=[decision], probas=[1.])
def start_or_stop_episode(self, start=True):
for node in ['scene_state_manager', 'scene_state_updater', 'action_server', 'learner_predictor']:
url = '/thr/{}/start_stop'.format(node)
rospy.wait_for_service(url)
rospy.ServiceProxy(url, StartStopEpisode).call(StartStopEpisodeRequest(
command=StartStopEpisodeRequest.START if start else
StartStopEpisodeRequest.STOP))
###################################################################################################################
def process_speech(self):
with self.speech_lock:
decisions = [Decision(type=speech[0], parameters=speech[1:]) for speech in self.speech]
self.speech.clear()
return decisions
def run_decision(self, decision):
if decision.type == 'wait':
self.waiting = True
return
os.system('beep')
goal = RunDecisionGoal()
goal.decision = decision
self.run_decision_client.send_goal(goal)
while self.run_decision_client.get_state() in [GoalStatus.PENDING, GoalStatus.ACTIVE] and not rospy.is_shutdown():
self.interaction_loop_rate.sleep()
self.current_decision = decision
return False
def run(self):
def decision_to_tts(decision):
obj = decision.parameters[0].split('/')[-1].replace('_', ' ')
action = decision.type.split('_')[-1]
return action, obj
rospy.loginfo('Manual interaction starting from gestures!')
self.start_or_stop_episode(True)
def filter(actions, action):
return [decision for index, decision in enumerate(actions.decisions) if
action in decision.type and all_decisions.probas[index] > 0.]
if not rospy.is_shutdown():
try:
while self.running and not rospy.is_shutdown():
self.update_scene()
all_decisions = self.predict()
go_homes = filter(all_decisions, 'go_home')
if len(go_homes) > 0:
decision = choice(go_homes)
else:
gives = filter(all_decisions, 'give')
picks = filter(all_decisions, 'pick')
holds = filter(all_decisions, 'hold')
if len(gives) > 0:
decision = choice(gives)
decision.parameters = decision.parameters #+ map(str, flattened_gesture_pose) + [
else:
self.say("Please tell or show me what to do")
gesture = self.hand_state['filtered_state']
wait = False
speech_decisions = self.process_speech()
if len(speech_decisions) > 0:
if speech_decisions[0] in holds or speech_decisions[0] in picks:
decision = speech_decisions[0]
else:
action, obj = decision_to_tts(speech_decisions[0])
self.say("I cannot {} {} now".format(action, obj))
wait = True
else:
if gesture == 'Lasso':
if len(holds) > 0:
decision = choice(holds)
else:
self.say("I cannot hold now")
wait = True
elif gesture == 'Open':
if len(picks) > 0:
decision = choice(picks)
else:
rospy.logwarn("I cannot pick now")
wait = True
else:
wait = True
if wait:
self.interaction_loop_rate.sleep()
continue
rospy.logwarn("You showed a {} gesture corresponding to a {} action".format(gesture, decision.type))
action, obj = decision_to_tts(decision)
self.say("I'm {}ing {}".format(action, obj))
type, params = decision.type, decision.parameters
rospy.logwarn("Choosing decision {}{}".format(type, params))
self.logs.append({'timestamp': rospy.get_time(),
'type': type,
'parameters': params})
self.run_decision(decision)
self.interaction_loop_rate.sleep()
finally:
logs_name = rospy.get_param('/thr/logs_name')
if logs_name != "none":
with open('action_decisions_' + logs_name + '.json', 'w') as f:
json.dump(self.logs, f)
if __name__ == '__main__':
rospy.init_node("interaction_controller")
InteractionController().run()
| [
"[email protected]"
]
| |
571105821c9382af323e4a0cb4a3a7a127838e1f | 93c036c2c9a4175162391adfb33ff9211c0ca292 | /onenet/alignPrint.py | 1fde1cf55bf5cc0226fdafb1ef24ec85d5455c59 | []
| no_license | fiso0/MxPython | ff8f021e57279190068c0dbe484384b28c6409f7 | 2c1545cf0f8a8062b948346006f6dbd05a1c74e9 | refs/heads/master | 2023-03-12T00:40:18.960523 | 2023-02-27T06:51:32 | 2023-02-27T06:51:32 | 59,210,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,137 | py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# 经测试无效
# 重写一个格式对齐函数。函数中推断字符串是否是中文字符串,有的话则加入全角空格补齐,否则加入半角空格补齐。
def myAlign(string, length=0):
if length == 0:
return string
slen = len(string)
re = string
if isinstance(string, str):
placeholder = ' '
else:
placeholder = u' '
while slen < length:
re += placeholder
slen += 1
return re
# 经测试,在命令行中可以对齐,在PyCharm中有轻微不对齐(可能是字体原因)
# 在string后面补充.,使总长度达到length
def myAlign2(string, length=0):
if length == 0:
return string
# len(string.encode('GBK')) 中文按长度为2,英文长度为1计算
re = "{label:.<{width}}".format(label=string, width=length-len(string.encode('GBK'))+len(string))
return re
if __name__ == '__main__':
s1 = u'我是一个长句子,是的very long的句子。'
s2 =u'我是短句子'
print(myAlign(s1, 20) + myAlign(s2, 10))
print(myAlign(s2, 20) + myAlign(s1, 10))
print(myAlign2(s1, 40) + (s2))
print(myAlign2(s2, 40) + (s1))
input() | [
"[email protected]"
]
| |
89c9af3c363d991a525ae91f0b0774b14f7f2552 | 11e6e2dfc53073bc0dd3c74751eaa0c2e900dd94 | /REL_SERVIDORES_sqlconsulta.py | 5b6532f94554364987feccf44347782aae0e9c68 | [
"MIT"
]
| permissive | tiagotouso/TALENTOS_HUMANOS | 66c3cc013da8a12e797a277f541f3c74d913943e | c391f7d7a331d5f8b186b27af6a9b61448620cc6 | refs/heads/master | 2021-05-25T22:08:38.450403 | 2020-10-15T14:02:55 | 2020-10-15T14:02:55 | 253,940,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,044 | py | '''
GERADOR DE RELATÓRIO MODELO PARA CONSULTAR A TABELA TB_SER_REL
'''
from SQL import sqlpandas
from AUXILIAR import salvarPandas
def sqlConsultaNova(narquivo, colunas, tipo, carreira, grupo, renomear=True):
'''
FUNÇÃO MODELO PARA CONSULTAR A TABELA TB_SER_REL
CRIAR CONSULTA SQL E SALVAR ARQUIVO NA PASTA DE RELATÓRIOS - PADRÃO PARA GERAR SQL DE CONSULTA DO SISTEMA DE RELATÓRIO
ENTRA
NOME DA CONSULTA PARA SALVAR O ARQUIVO GERADO
CAMPOS, PARAMETROS ([ATIVOS, APOSENTADOS, DESLIGADOS, TODOS],
[TÉCN, TÉCN-ESP, PROF 3º, PROF 2º, OUTROS])PARA CONSULTA SQL
SAI
TABELA EM CSV DA CONSULTA REALIZADA
'''
# PEGAR OS NOMES DOS CAMPOS NA TABELA TS_SIS__CONFIG_TABELAS
sql = '''SELECT CAMPO, AS_NOME FROM talentoshumanos.ts_sis__config_tabelas;'''
dados = sqlpandas(sql)
oldcampo = list(dados['CAMPO'])
newcampo = list(dados['AS_NOME'])
#MONTAR DICIONÁRIO COM OS CAMPOS DA TABELA (MAIS OS NOMES PARA EXIBIÇÃO)
diccampos = {}
for n, v in enumerate(newcampo):
if str(v).strip() != '-':
diccampos[str(v).strip()] = str(oldcampo[n]).strip()
titulo = colunas
#MONTAR SQL PARA CONSULTA
sql = '''select {0} from tb_ser_rel'''
if renomear == False: #SQL SEM RENOMEAR OS CAMPOS
ax = []
for coluna in colunas:
ax.append(diccampos[coluna])
titulo = ax
else: #SQL RENOMEANDO OS CAMPOS
ax = []
for coluna in colunas:
ax.append(diccampos[coluna] + ' AS \'' + coluna + '\'')
axtx = ', '.join(ax)
sql = sql.format(axtx)
#FILTRO NA ATIVIDADE
where = ''''''
if tipo == 'ATI':
where = '''\nwhere IT_DA_OCOR_INGR_ORGAO_SERV is not null
and IT_DA_OCOR_INATIVIDADE_SERV is null
and IT_DA_OCOR_EXCLUSAO_SERV is null'''
elif tipo == 'APO':
where = '''\nwhere IT_DA_OCOR_INGR_ORGAO_SERV is not null
and IT_DA_OCOR_INATIVIDADE_SERV is not null
and IT_DA_OCOR_EXCLUSAO_SERV is null'''
elif tipo == 'DES':
where = '''\nwhere IT_DA_OCOR_INGR_ORGAO_SERV is not null
and IT_DA_OCOR_EXCLUSAO_SERV is not null'''
elif tipo == 'TODOS':
where = '''\nwhere IT_DA_OCOR_INGR_ORGAO_SERV is not null'''
sql = sql + where
#FILTRO NA CARREIRA
if len(carreira) != 0:
wherecarreira = ''
axcarreira = ''
if len(carreira) > 1:
axcarreira = '\', \''.join(carreira)
axcarreira = '\'' + axcarreira + '\''
else:
axcarreira = '\'' + carreira[0] + '\''
wherecarreira = '''\nand DES_CARREIRA in ({0})'''.format(axcarreira)
sql = sql + wherecarreira
#FILTRO NO GRUPO
if len(grupo) > 0:
axgrupo = '\', \''.join(grupo)
axgrupo = '\'' + axgrupo + '\''
sqlgrupo = '''\nand DES_GRUPO in ({0})'''.format(axgrupo)
sql = sql + sqlgrupo
#SALVAR TABELA GERADA
dados = sqlpandas(sql)
if len(dados) > 0:
salvarPandas(dados, narquivo) | [
"[email protected]"
]
| |
62bf5a35b3734cfe82f533f2b605f1b22012c38d | 8c52d18ecf23734d5457f7fde9f2e3a66e7d73e7 | /backend/wallet/migrations/0001_initial.py | 45781799168796d908f498ba7d93fa311743d71e | []
| no_license | crowdbotics-apps/klapr-19351 | 9dd3b8a0d2fa48fdec9d70086057677b0c13f90b | e0f3a98c18bb43e2b314206e1c480d6d54edff8c | refs/heads/master | 2022-11-24T14:44:00.997468 | 2020-08-04T00:36:18 | 2020-08-04T00:36:18 | 284,837,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,875 | py | # Generated by Django 2.2.15 on 2020-08-04 00:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('taxi_profile', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='UserWallet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('balance', models.FloatField()),
('expiration_date', models.DateTimeField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='userwallet_user', to='taxi_profile.UserProfile')),
],
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account_token', models.CharField(max_length=255)),
('payment_account', models.CharField(max_length=10)),
('timestamp_created', models.DateTimeField(auto_now_add=True)),
('wallet', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='paymentmethod_wallet', to='wallet.UserWallet')),
],
),
migrations.CreateModel(
name='DriverWallet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('balance', models.FloatField()),
('expiration_date', models.DateTimeField()),
('driver', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='driverwallet_driver', to='taxi_profile.DriverProfile')),
],
),
]
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.